38 fail, 112 skipped, 3 951 pass in 12h 51m 31s
27 files 27 suites 12h 51m 31s ⏱️
4 101 tests 3 951 ✅ 112 💤 38 ❌
51 413 runs 48 802 ✅ 2 308 💤 303 ❌
Results for commit e72e219.
Annotations
Check warning on line 0 in distributed.dashboard.tests.test_scheduler_bokeh
github-actions / Unit Test Results
All 10 runs failed: test_TaskGraph (distributed.dashboard.tests.test_scheduler_bokeh)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
assert {5} == {6}
Extra items in the left set:
#x1B[0m#x1B[94m5#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
Extra items in the right set:
#x1B[0m#x1B[94m6#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
Full diff:
#x1B[0m#x1B[90m #x1B[39;49;00m {#x1B[90m#x1B[39;49;00m
#x1B[91m- 6,#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
? ^#x1B[90m#x1B[39;49;00m
#x1B[92m+ 5,#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
? ^#x1B[90m#x1B[39;49;00m
#x1B[90m #x1B[39;49;00m }#x1B[90m#x1B[39;49;00m
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:54730', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:54731', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:54734', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_TaskGraph(c, s, a, b):
gp = TaskGraph(s)
futures = c.map(inc, range(5))
total = c.submit(sum, futures)
await total
gp.update()
> assert set(map(len, gp.node_source.data.values())) == {6}
E assert {5} == {6}
E
E Extra items in the left set:
E #x1B[0m#x1B[94m5#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
E Extra items in the right set:
E #x1B[0m#x1B[94m6#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
E
E Full diff:
E #x1B[0m#x1B[90m #x1B[39;49;00m {#x1B[90m#x1B[39;49;00m
E #x1B[91m- 6,#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
E ? ^#x1B[90m#x1B[39;49;00m
E #x1B[92m+ 5,#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
E ? ^#x1B[90m#x1B[39;49;00m
E #x1B[90m #x1B[39;49;00m }#x1B[90m#x1B[39;49;00m
distributed\dashboard\tests\test_scheduler_bokeh.py:830: AssertionError
Check warning on line 0 in distributed.dashboard.tests.test_scheduler_bokeh
github-actions / Unit Test Results
All 10 runs failed: test_TaskGraph_complex (distributed.dashboard.tests.test_scheduler_bokeh)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
AssertionError: assert 1 == 14
+ where 1 = len({('random_sample-77af884053aaf4828200ff34d2866cf9', 0, 0): 0})
+ where {('random_sample-77af884053aaf4828200ff34d2866cf9', 0, 0): 0} = <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFE67278C0>.index
+ where <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFE67278C0> = <distributed.dashboard.components.scheduler.TaskGraph object at 0x000001BFE67240E0>.layout
+ and 14 = len({('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 0): <TaskState ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 0) released>, ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 1): <TaskState ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 1) released>, ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 0): <TaskState ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 0) released>, ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 1): <TaskState ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 1) released>, ...})
+ where {('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 0): <TaskState ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 0) released>, ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 1): <TaskState ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 1) released>, ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 0): <TaskState ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 0) released>, ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 1): <TaskState ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 1) released>, ...} = <Scheduler 'tcp://127.0.0.1:54770', workers: 2, cores: 3, tasks: 14>.tasks
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:54770', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:54771', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:54774', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_TaskGraph_complex(c, s, a, b):
da = pytest.importorskip("dask.array")
gp = TaskGraph(s)
x = da.random.random((2000, 2000), chunks=(1000, 1000))
y = c.persist((x + x.T) - x.mean(axis=0))
await wait(y)
gp.update()
assert len(gp.layout.index) == len(gp.node_source.data["x"])
> assert len(gp.layout.index) == len(s.tasks)
E AssertionError: assert 1 == 14
E + where 1 = len({('random_sample-77af884053aaf4828200ff34d2866cf9', 0, 0): 0})
E + where {('random_sample-77af884053aaf4828200ff34d2866cf9', 0, 0): 0} = <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFE67278C0>.index
E + where <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFE67278C0> = <distributed.dashboard.components.scheduler.TaskGraph object at 0x000001BFE67240E0>.layout
E + and 14 = len({('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 0): <TaskState ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 0) released>, ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 1): <TaskState ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 1) released>, ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 0): <TaskState ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 0) released>, ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 1): <TaskState ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 1) released>, ...})
E + where {('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 0): <TaskState ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 0) released>, ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 1): <TaskState ('mean_agg-aggregate-bf62e42aa1a246a796d9c78111fcd0d9', 1) released>, ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 0): <TaskState ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 0) released>, ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 1): <TaskState ('mean_chunk-ffdb3f2c012fbc1348650c59f7b570df', 0, 1) released>, ...} = <Scheduler 'tcp://127.0.0.1:54770', workers: 2, cores: 3, tasks: 14>.tasks
distributed\dashboard\tests\test_scheduler_bokeh.py:919: AssertionError
Check warning on line 0 in distributed.diagnostics.tests.test_graph_layout
github-actions / Unit Test Results
All 13 runs failed: test_basic (distributed.diagnostics.tests.test_graph_layout)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
AssertionError: assert 5 == 6
+ where 5 = len({'inc-13d8bf3b290c64da13f1e89208d17279': 4, 'inc-1969fe6fbe5b2a68496f2aacd7f70f03': 2, 'inc-1c5dc105bdf7123f726968dc07c76801': 1, 'inc-b8e973c523148214e26b7bad903ad3a1': 3, ...})
+ where {'inc-13d8bf3b290c64da13f1e89208d17279': 4, 'inc-1969fe6fbe5b2a68496f2aacd7f70f03': 2, 'inc-1c5dc105bdf7123f726968dc07c76801': 1, 'inc-b8e973c523148214e26b7bad903ad3a1': 3, ...} = <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFEEE21C10>.y
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:57483', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:57484', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:57487', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_basic(c, s, a, b):
gl = GraphLayout(s)
s.add_plugin(gl)
futures = c.map(inc, range(5))
total = c.submit(sum, futures)
await total
> assert len(gl.x) == len(gl.y) == 6
E AssertionError: assert 5 == 6
E + where 5 = len({'inc-13d8bf3b290c64da13f1e89208d17279': 4, 'inc-1969fe6fbe5b2a68496f2aacd7f70f03': 2, 'inc-1c5dc105bdf7123f726968dc07c76801': 1, 'inc-b8e973c523148214e26b7bad903ad3a1': 3, ...})
E + where {'inc-13d8bf3b290c64da13f1e89208d17279': 4, 'inc-1969fe6fbe5b2a68496f2aacd7f70f03': 2, 'inc-1c5dc105bdf7123f726968dc07c76801': 1, 'inc-b8e973c523148214e26b7bad903ad3a1': 3, ...} = <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFEEE21C10>.y
distributed\diagnostics\tests\test_graph_layout.py:20: AssertionError
Check warning on line 0 in distributed.diagnostics.tests.test_graph_layout
github-actions / Unit Test Results
All 13 runs failed: test_construct_after_call (distributed.diagnostics.tests.test_graph_layout)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
AttributeError: 'str' object has no attribute 'key'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:57497', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:57498', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:57501', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_construct_after_call(c, s, a, b):
futures = c.map(inc, range(5))
total = c.submit(sum, futures)
await total
> gl = GraphLayout(s)
distributed\diagnostics\tests\test_graph_layout.py:33:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFEEE09520>
scheduler = <Scheduler 'tcp://127.0.0.1:57497', workers: 0, cores: 0, tasks: 0>
def __init__(self, scheduler):
self.name = f"graph-layout-{uuid.uuid4()}"
self.x = {}
self.y = {}
self.collision = {}
self.scheduler = scheduler
self.index = {}
self.index_edge = {}
self.next_y = 0
self.next_index = 0
self.next_edge_index = 0
self.new = []
self.new_edges = []
self.state_updates = []
self.visible_updates = []
self.visible_edge_updates = []
if self.scheduler.tasks:
dependencies = {
> k: [ds.key for ds in ts.dependencies]
for k, ts in scheduler.tasks.items()
}
E AttributeError: 'str' object has no attribute 'key'
distributed\diagnostics\graph_layout.py:38: AttributeError
Check warning on line 0 in distributed.diagnostics.tests.test_graph_layout
github-actions / Unit Test Results
All 13 runs failed: test_release_tasks (distributed.diagnostics.tests.test_graph_layout)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
assert 0 == 1
+ where 0 = len([])
+ where [] = <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFEEDF76B0>.visible_updates
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:57525', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:57526', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:57529', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_release_tasks(c, s, a, b):
gl = GraphLayout(s)
s.add_plugin(gl)
futures = c.map(inc, range(5))
total = c.submit(sum, futures)
await total
key = total.key
del total
while key in s.tasks:
await asyncio.sleep(0.01)
> assert len(gl.visible_updates) == 1
E assert 0 == 1
E + where 0 = len([])
E + where [] = <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFEEDF76B0>.visible_updates
distributed\diagnostics\tests\test_graph_layout.py:68: AssertionError
Check warning on line 0 in distributed.diagnostics.tests.test_graph_layout
github-actions / Unit Test Results
All 13 runs failed: test_layout_scatter (distributed.diagnostics.tests.test_graph_layout)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
assert 0 > 0
+ where 0 = len([])
+ where [] = <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFEF43F7D0>.state_updates
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:57563', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:57564', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:57567', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_layout_scatter(c, s, a, b):
gl = GraphLayout(s)
s.add_plugin(gl)
data = await c.scatter([1, 2, 3], broadcast=True)
futures = [c.submit(sum, data) for _ in range(5)]
await wait(futures)
> assert len(gl.state_updates) > 0
E assert 0 > 0
E + where 0 = len([])
E + where [] = <distributed.diagnostics.graph_layout.GraphLayout object at 0x000001BFEF43F7D0>.state_updates
distributed\diagnostics\tests\test_graph_layout.py:112: AssertionError
Check warning on line 0 in distributed.diagnostics.tests.test_progress
github-actions / Unit Test Results
All 13 runs failed: test_multiprogress (distributed.diagnostics.tests.test_progress)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
AttributeError: 'str' object has no attribute 'key'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:57979', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:57980', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:57983', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_multiprogress(c, s, a, b):
x1 = c.submit(f, 1)
x2 = c.submit(f, x1)
x3 = c.submit(f, x2)
y1 = c.submit(g, x3)
y2 = c.submit(g, y1)
p = MultiProgress([y2], scheduler=s, complete=True)
> await p.setup()
distributed\diagnostics\tests\test_progress.py:67:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
distributed\diagnostics\progress.py:210: in setup
self.all_keys, errors = dependent_keys(tasks, complete=self.complete)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tasks = [<TaskState 'g-74f028212b9515639cf77921e2299214' forgotten>]
complete = True
def dependent_keys(tasks, complete=False):
"""
All keys that need to compute for these keys to finish.
If *complete* is false, omit tasks that are busy processing or
have finished executing.
"""
out = set()
errors = set()
stack = list(tasks)
while stack:
ts = stack.pop()
> key = ts.key
E AttributeError: 'str' object has no attribute 'key'
distributed\diagnostics\progress.py:33: AttributeError
Check warning on line 0 in distributed.diagnostics.tests.test_progress_widgets
github-actions / Unit Test Results
All 10 runs failed: test_progressbar_widget (distributed.diagnostics.tests.test_progress_widgets)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: ProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:58181', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:58182', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:58185', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_progressbar_widget(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait(z)
progress = ProgressWidget([z.key], scheduler=s.address, complete=True)
> await progress.listen()
distributed\diagnostics\tests\test_progress_widgets.py:31:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <distributed.diagnostics.progressbar.ProgressWidget object at 0x000001BFEF8F6ED0>
async def listen(self):
complete = self.complete
keys = self.keys
async def setup(scheduler):
p = Progress(keys, scheduler, complete=complete)
await p.setup()
return p
def function(scheduler, p):
result = {
"all": len(p.all_keys),
"remaining": len(p.keys),
"status": p.status,
}
if p.status == "error":
result.update(p.extra)
return result
self.comm = await connect(
self.scheduler, **(self.client().connection_args if self.client else {})
)
logger.debug("Progressbar Connected to scheduler")
await self.comm.write(
{
"op": "feed",
"setup": dumps(setup),
"function": dumps(function),
"interval": self.interval,
},
serializers=self.client()._serializers if self.client else None,
)
while True:
try:
response = await self.comm.read(
deserializers=self.client()._deserializers if self.client else None
)
except CommClosedError:
break
self._last_response = response
self.status = response["status"]
> self._draw_bar(**response)
E TypeError: ProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
distributed\diagnostics\progressbar.py:100: TypeError
Check warning on line 0 in distributed.diagnostics.tests.test_progress_widgets
github-actions / Unit Test Results
<
10000
p class="ml-3 mb-0 pl-4 text-bold d-flex flex-items-center">
All 10 runs failed: test_multi_progressbar_widget (distributed.diagnostics.tests.test_progress_widgets)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: MultiProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:58194', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:58195', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:58198', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_multi_progressbar_widget(c, s, a, b):
x1 = c.submit(inc, 1)
x2 = c.submit(inc, x1)
x3 = c.submit(inc, x2)
y1 = c.submit(dec, x3)
y2 = c.submit(dec, y1)
e = c.submit(throws, y2)
other = c.submit(inc, 123)
await wait([other, e])
p = MultiProgressWidget([e.key], scheduler=s.address, complete=True)
> await p.listen()
distributed\diagnostics\tests\test_progress_widgets.py:52:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <distributed.diagnostics.progressbar.MultiProgressWidget object at 0x000001BFEED25FD0>
async def listen(self):
complete = self.complete
keys = self.keys
group_by = self.group_by
async def setup(scheduler):
p = MultiProgress(
keys,
scheduler,
complete=complete,
group_by=group_by,
)
await p.setup()
return p
def function(scheduler, p):
result = {
"all": valmap(len, p.all_keys),
"remaining": valmap(len, p.keys),
"status": p.status,
}
if p.status == "error":
result.update(p.extra)
return result
self.comm = await connect(
self.scheduler, **(self.client().connection_args if self.client else {})
)
logger.debug("Progressbar Connected to scheduler")
await self.comm.write(
{
"op": "feed",
"setup": dumps(setup),
"function": dumps(function),
"interval": self.interval,
}
)
while True:
response = await self.comm.read(
deserializers=self.client()._deserializers if self.client else None
)
self._last_response = response
self.status = response["status"]
> self._draw_bar(**response)
E TypeError: MultiProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
distributed\diagnostics\progressbar.py:326: TypeError
Check warning on line 0 in distributed.diagnostics.tests.test_progress_widgets
github-actions / Unit Test Results
All 10 runs failed: test_multibar_complete (distributed.diagnostics.tests.test_progress_widgets)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: MultiProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:58297', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:58298', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:58301', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_multibar_complete(c, s, a, b):
x1 = c.submit(inc, 1, key="x-1")
x2 = c.submit(inc, x1, key="x-2")
x3 = c.submit(inc, x2, key="x-3")
y1 = c.submit(dec, x3, key="y-1")
y2 = c.submit(dec, y1, key="y-2")
e = c.submit(throws, y2, key="e")
other = c.submit(inc, 123, key="other")
await other.cancel()
p = MultiProgressWidget([e.key], scheduler=s.address, complete=True)
> await p.listen()
distributed\diagnostics\tests\test_progress_widgets.py:147:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <distributed.diagnostics.progressbar.MultiProgressWidget object at 0x000001BFEF68DD60>
async def listen(self):
complete = self.complete
keys = self.keys
group_by = self.group_by
async def setup(scheduler):
p = MultiProgress(
keys,
scheduler,
complete=complete,
group_by=group_by,
)
await p.setup()
return p
def function(scheduler, p):
result = {
"all": valmap(len, p.all_keys),
"remaining": valmap(len, p.keys),
"status": p.status,
}
if p.status == "error":
result.update(p.extra)
return result
self.comm = await connect(
self.scheduler, **(self.client().connection_args if self.client else {})
)
logger.debug("Progressbar Connected to scheduler")
await self.comm.write(
{
"op": "feed",
"setup": dumps(setup),
"function": dumps(function),
"interval": self.interval,
}
)
while True:
response = await self.comm.read(
deserializers=self.client()._deserializers if self.client else None
)
self._last_response = response
self.status = response["status"]
> self._draw_bar(**response)
E TypeError: MultiProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
distributed\diagnostics\progressbar.py:326: TypeError
Check warning on line 0 in distributed.diagnostics.tests.test_progress_widgets
github-actions / Unit Test Results
All 10 runs failed: test_fast (distributed.diagnostics.tests.test_progress_widgets)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 3s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 4s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 2s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 4s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 4s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 6s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 4s]
Raw output
TypeError: MultiProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
client = <Client: 'tcp://127.0.0.1:58318' processes=2 threads=2, memory=32.00 GiB>
def test_fast(client):
L = client.map(inc, range(100))
L2 = client.map(dec, L)
L3 = client.map(add, L, L2)
p = progress(L3, multi=True, complete=True, notebook=True)
> client.sync(p.listen)
distributed\diagnostics\tests\test_progress_widgets.py:160:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
distributed\utils.py:363: in sync
return sync(
distributed\utils.py:439: in sync
raise error
distributed\utils.py:413: in f
result = yield future
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\site-packages\tornado\gen.py:766: in run
value = future.result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <distributed.diagnostics.progressbar.MultiProgressWidget object at 0x000001BFEF68FC50>
async def listen(self):
complete = self.complete
keys = self.keys
group_by = self.group_by
async def setup(scheduler):
p = MultiProgress(
keys,
scheduler,
complete=complete,
group_by=group_by,
)
await p.setup()
return p
def function(scheduler, p):
result = {
"all": valmap(len, p.all_keys),
"remaining": valmap(len, p.keys),
"status": p.status,
}
if p.status == "error":
result.update(p.extra)
return result
self.comm = await connect(
self.scheduler, **(self.client().connection_args if self.client else {})
)
logger.debug("Progressbar Connected to scheduler")
await self.comm.write(
{
"op": "feed",
"setup": dumps(setup),
"function": dumps(function),
"interval": self.interval,
}
)
while True:
response = await self.comm.read(
deserializers=self.client()._deserializers if self.client else None
)
self._last_response = response
self.status = response["status"]
> self._draw_bar(**response)
E TypeError: MultiProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
distributed\diagnostics\progressbar.py:326: TypeError
Check warning on line 0 in distributed.diagnostics.tests.test_progress_widgets
github-actions / Unit Test Results
All 10 runs failed: test_multibar_with_spans (distributed.diagnostics.tests.test_progress_widgets)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 4s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 2s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 4s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 4s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 6s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 4s]
Raw output
TypeError: MultiProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
client = <Client: 'tcp://127.0.0.1:58350' processes=2 threads=2, memory=32.00 GiB>
def test_multibar_with_spans(client):
"""Test progress(group_by='spans'"""
with span("span 1"):
L = client.map(inc, range(100))
with span("span 2"):
L2 = client.map(dec, L)
with span("span 3"):
L3 = client.map(add, L, L2)
with span("other span"):
_ = client.submit(inc, 123)
e = client.submit(throws, L3)
p = progress(e, complete=True, multi=True, notebook=True, group_by="spans")
> client.sync(p.listen)
distributed\diagnostics\tests\test_progress_widgets.py:177:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
distributed\utils.py:363: in sync
return sync(
distributed\utils.py:439: in sync
raise error
distributed\utils.py:413: in f
result = yield future
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\site-packages\tornado\gen.py:766: in run
value = future.result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <distributed.diagnostics.progressbar.MultiProgressWidget object at 0x000001BFEFB0E2A0>
async def listen(self):
complete = self.complete
keys = self.keys
group_by = self.group_by
async def setup(scheduler):
p = MultiProgress(
keys,
scheduler,
complete=complete,
group_by=group_by,
)
await p.setup()
return p
def function(scheduler, p):
result = {
"all": valmap(len, p.all_keys),
"remaining": valmap(len, p.keys),
"status": p.status,
}
if p.status == "error":
result.update(p.extra)
return result
self.comm = await connect(
self.scheduler, **(self.client().connection_args if self.client else {})
)
logger.debug("Progressbar Connected to scheduler")
await self.comm.write(
{
"op": "feed",
"setup": dumps(setup),
"function": dumps(function),
"interval": self.interval,
}
)
while True:
response = await self.comm.read(
deserializers=self.client()._deserializers if self.client else None
)
self._last_response = response
self.status = response["status"]
> self._draw_bar(**response)
E TypeError: MultiProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
distributed\diagnostics\progressbar.py:326: TypeError
Check warning on line 0 in distributed.diagnostics.tests.test_progress_widgets
github-actions / Unit Test Results
All 10 runs failed: test_serializers (distributed.diagnostics.tests.test_progress_widgets)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: Data serialized with dask but only able to deserialize data with ['msgpack']
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:58398', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:58399', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:58402', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True, client_kwargs={"serializers": ["msgpack"]})
async def test_serializers(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait(z)
progress = ProgressWidget([z], scheduler=s.address, complete=True)
> await progress.listen()
distributed\diagnostics\tests\test_progress_widgets.py:215:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
distributed\diagnostics\progressbar.py:93: in listen
response = await self.comm.read(
distributed\comm\tcp.py:248: in read
msg = await from_frames(
distributed\comm\utils.py:78: in from_frames
res = _from_frames()
distributed\comm\utils.py:61: in _from_frames
return protocol.loads(
distributed\protocol\core.py:175: in loads
return msgpack.loads(
msgpack/_unpacker.pyx:194: in msgpack._cmsgpack.unpackb
???
distributed\protocol\core.py:159: in _decode_default
return merge_and_deserialize(
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\contextlib.py:81: in inner
return func(*args, **kwds)
distributed\protocol\serialize.py:525: in merge_and_deserialize
return deserialize(header, merged_frames, deserializers=deserializers)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
header = {'compression': (None,), 'num-sub-frames': 1, 'serializer': 'dask', 'split-num-sub-frames': (1,), ...}
frames = [<memory at 0x000001BFEF59AE00>], deserializers = ['msgpack']
def deserialize(header, frames, deserializers=None):
"""
Convert serialized header and list of bytestrings back to a Python object
Parameters
----------
header : dict
frames : list of bytes
deserializers : dict[str, tuple[Callable, Callable, bool]] | None
An optional dict mapping a name to a (de)serializer.
See `dask_serialize` and `dask_deserialize` for more.
See Also
--------
serialize
"""
if "is-collection" in header:
headers = header["sub-headers"]
lengths = header["frame-lengths"]
cls = {"tuple": tuple, "list": list, "set": set, "dict": dict}[
header["type-serialized"]
]
start = 0
if cls is dict:
d = {}
for _header, _length in zip(headers, lengths):
k = _header.pop("key")
d[k] = deserialize(
_header,
frames[start : start + _length],
deserializers=deserializers,
)
start += _length
return d
else:
lst = []
for _header, _length in zip(headers, lengths):
lst.append(
deserialize(
_header,
frames[start : start + _length],
deserializers=deserializers,
)
)
start += _length
return cls(lst)
name = header.get("serializer")
if deserializers is not None and name not in deserializers:
> raise TypeError(
"Data serialized with %s but only able to deserialize "
"data with %s" % (name, str(list(deserializers)))
)
E TypeError: Data serialized with dask but only able to deserialize data with ['msgpack']
distributed\protocol\serialize.py:447: TypeError
Check warning on line 0 in distributed.diagnostics.tests.test_progress_widgets
github-actions / Unit Test Results
All 10 runs failed: test_tls (distributed.diagnostics.tests.test_progress_widgets)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: ProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
c = <Client: No scheduler connected>
s = <Scheduler 'tls://127.0.0.1:58411', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tls://127.0.0.1:58412', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tls://127.0.0.1:58415', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_tls_cluster(client=True)
async def test_tls(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait(z)
progress = ProgressWidget([z], scheduler=s.address, complete=True)
> await progress.listen()
distributed\diagnostics\tests\test_progress_widgets.py:229:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <distributed.diagnostics.progressbar.ProgressWidget object at 0x000001BFEF810320>
async def listen(self):
complete = self.complete
keys = self.keys
async def setup(scheduler):
p = Progress(keys, scheduler, complete=complete)
await p.setup()
return p
def function(scheduler, p):
result = {
"all": len(p.all_keys),
"remaining": len(p.keys),
"status": p.status,
}
if p.status == "error":
result.update(p.extra)
return result
self.comm = await connect(
self.scheduler, **(self.client().connection_args if self.client else {})
)
logger.debug("Progressbar Connected to scheduler")
await self.comm.write(
{
"op": "feed",
"setup": dumps(setup),
"function": dumps(function),
"interval": self.interval,
},
serializers=self.client()._serializers if self.client else None,
)
while True:
try:
response = await self.comm.read(
deserializers=self.client()._deserializers if self.client else None
)
except CommClosedError:
break
self._last_response = response
self.status = response["status"]
> self._draw_bar(**response)
E TypeError: ProgressWidget._draw_bar() missing 2 required positional arguments: 'remaining' and 'all'
distributed\diagnostics\progressbar.py:100: TypeError
Check warning on line 0 in distributed.shuffle.tests.test_shuffle
github-actions / Unit Test Results
All 11 runs failed: test_restarting_during_transfer_raises_killed_worker (distributed.shuffle.tests.test_shuffle)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 2s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 1s]
Raw output
assert 0 == 1
+ where 0 = sum(<generator object test_restarting_during_transfer_raises_killed_worker.<locals>.<genexpr> at 0x000001BF920172A0>)
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:65456', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:65457', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:65460', name: 1, status: closed, stored: 2, running: 1/1, ready: 1, comm: 0, waiting: 0>
@gen_cluster(
client=True,
nthreads=[("", 1)] * 2,
config={"distributed.scheduler.allowed-failures": 0},
)
async def test_restarting_during_transfer_raises_killed_worker(c, s, a, b):
await c.register_plugin(BlockedShuffleReceiveShuffleWorkerPlugin(), name="shuffle")
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-02-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
shuffle_extA = a.plugins["shuffle"]
shuffle_extB = b.plugins["shuffle"]
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", force=True)
out = c.compute(out.x.size)
await asyncio.gather(
shuffle_extA.in_shuffle_receive.wait(), shuffle_extB.in_shuffle_receive.wait()
)
shuffle_extA.block_shuffle_receive.set()
shuffle_extB.block_shuffle_receive.set()
await assert_worker_cleanup(b, close=True)
with pytest.raises(KilledWorker):
await out
> assert sum(event["action"] == "p2p-failed" for _, event in s.get_events("p2p")) == 1
E assert 0 == 1
E + where 0 = sum(<generator object test_restarting_during_transfer_raises_killed_worker.<locals>.<genexpr> at 0x000001BF920172A0>)
distributed\shuffle\tests\test_shuffle.py:436: AssertionError
Check warning on line 0 in distributed.shuffle.tests.test_shuffle
github-actions / Unit Test Results
1 out of 11 runs failed: test_closed_input_only_worker_during_transfer (distributed.shuffle.tests.test_shuffle)
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 32s]
Raw output
asyncio.exceptions.TimeoutError: Test timeout (30) hit after 32.10364149999987s.
========== Test stack trace starts here ==========
Stack for <Task pending name='Task-178952' coro=<test_closed_input_only_worker_during_transfer() running at D:\a\distributed\distributed\distributed\shuffle\tests\test_shuffle.py:653> wait_for=<Future pending cb=[Task.task_wakeup()]>> (most recent call last):
File "D:\a\distributed\distributed\distributed\shuffle\tests\test_shuffle.py", line 653, in test_closed_input_only_worker_during_transfer
await c.close()
args = (), kwds = {}
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
> return func(*args, **kwds)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\contextlib.py:79:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\contextlib.py:79: in inner
return func(*args, **kwds)
distributed\utils_test.py:1090: in test_func
return _run_and_close_tornado(async_fn_outer)
distributed\utils_test.py:380: in _run_and_close_tornado
return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
distributed\compatibility.py:236: in asyncio_run
return loop.run_until_complete(main)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\asyncio\base_events.py:649: in run_until_complete
return future.result()
distributed\utils_test.py:377: in inner_fn
return await async_fn(*args, **kwargs)
distributed\utils_test.py:1087: in async_fn_outer
return await utils_wait_for(async_fn(), timeout=timeout * 2)
distributed\utils.py:1915: in wait_for
return await asyncio.wait_for(fut, timeout)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\asyncio\tasks.py:445: in wait_for
return fut.result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
async def async_fn():
result = None
with dask.config.set(config):
async with (
_cluster_factory() as (s, workers),
_client_factory(s) as c,
):
args = [s] + workers
if c is not None:
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = utils_wait_for(
asyncio.shield(task), timeout=deadline.remaining
)
result = await coro2
validate_state(s, *workers)
except asyncio.TimeoutError:
assert task
elapsed = deadline.elapsed
buffer = io.StringIO()
# This stack indicates where the coro/test is suspended
task.print_stack(file=buffer)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Hopefully, the hang has been caused by inconsistent
# state, which should be much more meaningful than the
# timeout
validate_state(s, *workers)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio
# and not from the code being tested.
> raise asyncio.TimeoutError(
f"Test timeout ({timeout}) hit after {elapsed}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
E asyncio.exceptions.TimeoutError: Test timeout (30) hit after 32.10364149999987s.
E ========== Test stack trace starts here ==========
E Stack for <Task pending name='Task-178952' coro=<test_closed_input_only_worker_during_transfer() running at D:\a\distributed\distributed\distributed\shuffle\tests\test_shuffle.py:653> wait_for=<Future pending cb=[Task.task_wakeup()]>> (most recent call last):
E File "D:\a\distributed\distributed\distributed\shuffle\tests\test_shuffle.py", line 653, in test_closed_input_only_worker_during_transfer
E await c.close()
distributed\utils_test.py:1041: TimeoutError
Check warning on line 0 in distributed.shuffle.tests.test_shuffle
github-actions / Unit Test Results
All 11 runs failed: test_restarting_during_unpack_raises_killed_worker (distributed.shuffle.tests.test_shuffle)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 4s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 5s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 5s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 5s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 5s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 5s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 5s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 11s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 9s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 8s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 9s]
Raw output
assert 0 == 1
+ where 0 = sum(<generator object test_restarting_during_unpack_raises_killed_worker.<locals>.<genexpr> at 0x000001BF99D68BA0>)
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:49535', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:49536', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:49539', name: 1, status: closed, stored: 2, running: 1/1, ready: 29, comm: 0, waiting: 0>
@gen_cluster(
client=True,
nthreads=[("", 1)] * 2,
config={"distributed.scheduler.allowed-failures": 0},
)
async def test_restarting_during_unpack_raises_killed_worker(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-03-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", force=True)
out = c.compute(out.x.size)
await wait_for_tasks_in_state(UNPACK_PREFIX, "memory", 1, b)
await assert_worker_cleanup(b, close=True)
with pytest.raises(KilledWorker):
await out
> assert sum(event["action"] == "p2p-failed" for _, event in s.get_events("p2p")) == 1
E assert 0 == 1
E + where 0 = sum(<generator object test_restarting_during_unpack_raises_killed_worker.<locals>.<genexpr> at 0x000001BF99D68BA0>)
distributed\shuffle\tests\test_shuffle.py:1023: AssertionError
Check warning on line 0 in distributed.shuffle.tests.test_shuffle
github-actions / Unit Test Results
All 11 runs failed: test_workers_do_not_spam_get_requests (distributed.shuffle.tests.test_shuffle)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 31s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 31s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 31s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 31s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 32s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 31s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 32s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 32s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 31s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 33s]
Raw output
AttributeError: 'tuple' object has no attribute 'state'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:51181', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:51182', name: 0, status: closed, stored: 0, running: 2/2, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:51185', name: 1, status: closed, stored: 0, running: 2/2, ready: 0, comm: 0, waiting: 0>
@mock.patch(
"distributed.shuffle.ShuffleSchedulerPlugin",
RequestCountingSchedulerPlugin,
)
@mock.patch(
"distributed.shuffle._worker_plugin._ShuffleRunManager",
PostFetchBlockingManager,
)
@gen_cluster(
client=True,
nthreads=[("", 2)] * 2,
config={
"distributed.scheduler.allowed-failures": 0,
"distributed.p2p.comm.message-size-limit": "10 B",
},
)
async def test_workers_do_not_spam_get_requests(c, s, a, b):
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-02-01",
dtypes={"x": float, "y": float},
freq="10 s",
)
s.remove_plugin("shuffle")
shuffle_extS = RequestCountingSchedulerPlugin(s)
shuffle_extA = a.plugins["shuffle"]
shuffle_extB = b.plugins["shuffle"]
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", npartitions=100, force=True)
out = c.compute(out.x.size)
shuffle_id = await wait_until_new_shuffle_is_initialized(s)
key = barrier_key(shuffle_id)
await shuffle_extA.shuffle_runs.in_fetch.wait()
await shuffle_extB.shuffle_runs.in_fetch.wait()
shuffle_extA.shuffle_runs.block_fetch.set()
barrier_task = s.tasks[key]
> while any(
ts.state not in ("processing", "memory") for ts in barrier_task.dependencies
):
distributed\shuffle\tests\test_shuffle.py:3000:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.0 = <set_iterator object at 0x000001BF9F48A300>
while any(
> ts.state not in ("processing", "memory") for ts in barrier_task.dependencies
):
E AttributeError: 'tuple' object has no attribute 'state'
distributed\shuffle\tests\test_shuffle.py:3001: AttributeError
Check warning on line 0 in distributed.tests.test_active_memory_manager
github-actions / Unit Test Results
3 out of 13 runs failed: test_noamm_stress (distributed.tests.test_active_memory_manager)
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 35s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 35s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 36s]
Raw output
TimeoutError: Test timeout (30) hit after 30.009321799999725s.
========== Test stack trace starts here ==========
Stack for <Task pending name='Task-232002' coro=<test_noamm_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1314> wait_for=<Future pending cb=[Task.task_wakeup()]>> (most recent call last):
File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1314, in test_noamm_stress
await tensordot_stress(c, s)
args = (), kwds = {}
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
> return func(*args, **kwds)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\contextlib.py:81:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\contextlib.py:81: in inner
return func(*args, **kwds)
distributed\utils_test.py:1090: in test_func
return _run_and_close_tornado(async_fn_outer)
distributed\utils_test.py:380: in _run_and_close_tornado
return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
distributed\compatibility.py:204: in asyncio_run
return runner.run(main)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\asyncio\runners.py:118: in run
return self._loop.run_until_complete(task)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\asyncio\base_events.py:654: in run_until_complete
return future.result()
distributed\utils_test.py:377: in inner_fn
return await async_fn(*args, **kwargs)
distributed\utils_test.py:1087: in async_fn_outer
return await utils_wait_for(async_fn(), timeout=timeout * 2)
distributed\utils.py:1910: in wait_for
return await fut
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
async def async_fn():
result = None
with dask.config.set(config):
async with (
_cluster_factory() as (s, workers),
_client_factory(s) as c,
):
args = [s] + workers
if c is not None:
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = utils_wait_for(
asyncio.shield(task), timeout=deadline.remaining
)
result = await coro2
validate_state(s, *workers)
except asyncio.TimeoutError:
assert task
elapsed = deadline.elapsed
buffer = io.StringIO()
# This stack indicates where the coro/test is suspended
task.print_stack(file=buffer)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Hopefully, the hang has been caused by inconsistent
# state, which should be much more meaningful than the
# timeout
validate_state(s, *workers)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio
# and not from the code being tested.
> raise asyncio.TimeoutError(
f"Test timeout ({timeout}) hit after {elapsed}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
E TimeoutError: Test timeout (30) hit after 30.009321799999725s.
E ========== Test stack trace starts here ==========
E Stack for <Task pending name='Task-232002' coro=<test_noamm_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1314> wait_for=<Future pending cb=[Task.task_wakeup()]>> (most recent call last):
E File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1314, in test_noamm_stress
E await tensordot_stress(c, s)
distributed\utils_test.py:1041: TimeoutError
Check warning on line 0 in distributed.tests.test_active_memory_manager
github-actions / Unit Test Results
3 out of 13 runs failed: test_drop_stress (distributed.tests.test_active_memory_manager)
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 35s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 35s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 36s]
Raw output
TimeoutError: Test timeout (30) hit after 30.033994500000063s.
========== Test stack trace starts here ==========
Stack for <Task pending name='Task-239719' coro=<test_drop_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1336> wait_for=<Future pending cb=[Task.task_wakeup()]>> (most recent call last):
File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1336, in test_drop_stress
await tensordot_stress(c, s)
args = (), kwds = {}
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
> return func(*args, **kwds)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\contextlib.py:81:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\contextlib.py:81: in inner
return func(*args, **kwds)
distributed\utils_test.py:1090: in test_func
return _run_and_close_tornado(async_fn_outer)
distributed\utils_test.py:380: in _run_and_close_tornado
return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
distributed\compatibility.py:204: in asyncio_run
return runner.run(main)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\asyncio\runners.py:118: in run
return self._loop.run_until_complete(task)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\asyncio\base_events.py:654: in run_until_complete
return future.result()
distributed\utils_test.py:377: in inner_fn
return await async_fn(*args, **kwargs)
distributed\utils_test.py:1087: in async_fn_outer
return await utils_wait_for(async_fn(), timeout=timeout * 2)
distributed\utils.py:1910: in wait_for
return await fut
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
async def async_fn():
result = None
with dask.config.set(config):
async with (
_cluster_factory() as (s, workers),
_client_factory(s) as c,
):
args = [s] + workers
if c is not None:
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = utils_wait_for(
asyncio.shield(task), timeout=deadline.remaining
)
result = await coro2
validate_state(s, *workers)
except asyncio.TimeoutError:
assert task
elapsed = deadline.elapsed
buffer = io.StringIO()
# This stack indicates wher
8000
e the coro/test is suspended
task.print_stack(file=buffer)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Hopefully, the hang has been caused by inconsistent
# state, which should be much more meaningful than the
# timeout
validate_state(s, *workers)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio
# and not from the code being tested.
> raise asyncio.TimeoutError(
f"Test timeout ({timeout}) hit after {elapsed}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
E TimeoutError: Test timeout (30) hit after 30.033994500000063s.
E ========== Test stack trace starts here ==========
E Stack for <Task pending name='Task-239719' coro=<test_drop_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1336> wait_for=<Future pending cb=[Task.task_wakeup()]>> (most recent call last):
E File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1336, in test_drop_stress
E await tensordot_stress(c, s)
distributed\utils_test.py:1041: TimeoutError
Check warning on line 0 in distributed.tests.test_active_memory_manager
github-actions / Unit Test Results
3 out of 13 runs failed: test_ReduceReplicas_stress (distributed.tests.test_active_memory_manager)
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 35s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 35s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 36s]
Raw output
TimeoutError: Test timeout (30) hit after 30.06143750000001s.
========== Test stack trace starts here ==========
Stack for <Task pending name='Task-247882' coro=<test_ReduceReplicas_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1357> wait_for=<Future pending cb=[Task.task_wakeup()]>> (most recent call last):
File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1357, in test_ReduceReplicas_stress
await tensordot_stress(c, s)
args = (), kwds = {}
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
> return func(*args, **kwds)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\contextlib.py:81:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\contextlib.py:81: in inner
return func(*args, **kwds)
distributed\utils_test.py:1090: in test_func
return _run_and_close_tornado(async_fn_outer)
distributed\utils_test.py:380: in _run_and_close_tornado
return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
distributed\compatibility.py:204: in asyncio_run
return runner.run(main)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\asyncio\runners.py:118: in run
return self._loop.run_until_complete(task)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\Lib\asyncio\base_events.py:654: in run_until_complete
return future.result()
distributed\utils_test.py:377: in inner_fn
return await async_fn(*args, **kwargs)
distributed\utils_test.py:1087: in async_fn_outer
return await utils_wait_for(async_fn(), timeout=timeout * 2)
distributed\utils.py:1910: in wait_for
return await fut
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
async def async_fn():
result = None
with dask.config.set(config):
async with (
_cluster_factory() as (s, workers),
_client_factory(s) as c,
):
args = [s] + workers
if c is not None:
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = utils_wait_for(
asyncio.shield(task), timeout=deadline.remaining
)
result = await coro2
validate_state(s, *workers)
except asyncio.TimeoutError:
assert task
elapsed = deadline.elapsed
buffer = io.StringIO()
# This stack indicates where the coro/test is suspended
task.print_stack(file=buffer)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Hopefully, the hang has been caused by inconsistent
# state, which should be much more meaningful than the
# timeout
validate_state(s, *workers)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio
# and not from the code being tested.
> raise asyncio.TimeoutError(
f"Test timeout ({timeout}) hit after {elapsed}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
E TimeoutError: Test timeout (30) hit after 30.06143750000001s.
E ========== Test stack trace starts here ==========
E Stack for <Task pending name='Task-247882' coro=<test_ReduceReplicas_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1357> wait_for=<Future pending cb=[Task.task_wakeup()]>> (most recent call last):
E File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1357, in test_ReduceReplicas_stress
E await tensordot_stress(c, s)
distributed\utils_test.py:1041: TimeoutError
Check warning on line 0 in distributed.tests.test_active_memory_manager
github-actions / Unit Test Results
1 out of 13 runs failed: test_RetireWorker_stress[False] (distributed.tests.test_active_memory_manager)
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 35s]
Raw output
asyncio.exceptions.TimeoutError: Test timeout (30) hit after 29.989777899999808s.
========== Test stack trace starts here ==========
Stack for <Task pending name='Task-277358' coro=<test_RetireWorker_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1393> wait_for=<_GatheringFuture pending cb=[Task.task_wakeup()]>> (most recent call last):
File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1393, in test_RetireWorker_stress
await asyncio.gather(*tasks)
args = (), kwds = {'use_ReduceReplicas': False}
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
> return func(*args, **kwds)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\contextlib.py:79:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\contextlib.py:79: in inner
return func(*args, **kwds)
distributed\utils_test.py:1090: in test_func
return _run_and_close_tornado(async_fn_outer)
distributed\utils_test.py:380: in _run_and_close_tornado
return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
distributed\compatibility.py:236: in asyncio_run
return loop.run_until_complete(main)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\asyncio\base_events.py:649: in run_until_complete
return future.result()
distributed\utils_test.py:377: in inner_fn
return await async_fn(*args, **kwargs)
distributed\utils_test.py:1087: in async_fn_outer
return await utils_wait_for(async_fn(), timeout=timeout * 2)
distributed\utils.py:1915: in wait_for
return await asyncio.wait_for(fut, timeout)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\asyncio\tasks.py:445: in wait_for
return fut.result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
async def async_fn():
result = None
with dask.config.set(config):
async with (
_cluster_factory() as (s, workers),
_client_factory(s) as c,
):
args = [s] + workers
if c is not None:
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = utils_wait_for(
asyncio.shield(task), timeout=deadline.remaining
)
result = await coro2
validate_state(s, *workers)
except asyncio.TimeoutError:
assert task
elapsed = deadline.elapsed
buffer = io.StringIO()
# This stack indicates where the coro/test is suspended
task.print_stack(file=buffer)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Hopefully, the hang has been caused by inconsistent
# state, which should be much more meaningful than the
# timeout
validate_state(s, *workers)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio
# and not from the code being tested.
> raise asyncio.TimeoutError(
f"Test timeout ({timeout}) hit after {elapsed}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
E asyncio.exceptions.TimeoutError: Test timeout (30) hit after 29.989777899999808s.
E ========== Test stack trace starts here ==========
E Stack for <Task pending name='Task-277358' coro=<test_RetireWorker_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1393> wait_for=<_GatheringFuture pending cb=[Task.task_wakeup()]>> (most recent call last):
E File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1393, in test_RetireWorker_stress
E await asyncio.gather(*tasks)
distributed\utils_test.py:1041: TimeoutError
Check warning on line 0 in distributed.tests.test_active_memory_manager
github-actions / Unit Test Results
1 out of 13 runs failed: test_RetireWorker_stress[True] (distributed.tests.test_active_memory_manager)
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 35s]
Raw output
asyncio.exceptions.TimeoutError: Test timeout (30) hit after 30.00521030000027s.
========== Test stack trace starts here ==========
Stack for <Task pending name='Task-283725' coro=<test_RetireWorker_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1393> wait_for=<_GatheringFuture pending cb=[Task.task_wakeup()]>> (most recent call last):
File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1393, in test_RetireWorker_stress
await asyncio.gather(*tasks)
args = (), kwds = {'use_ReduceReplicas': True}
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
> return func(*args, **kwds)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\contextlib.py:79:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\contextlib.py:79: in inner
return func(*args, **kwds)
distributed\utils_test.py:1090: in test_func
return _run_and_close_tornado(async_fn_outer)
distributed\utils_test.py:380: in _run_and_close_tornado
return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
distributed\compatibility.py:236: in asyncio_run
return loop.run_until_complete(main)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\asyncio\base_events.py:649: in run_until_complete
return future.result()
distributed\utils_test.py:377: in inner_fn
return await async_fn(*args, **kwargs)
distributed\utils_test.py:1087: in async_fn_outer
return await utils_wait_for(async_fn(), timeout=timeout * 2)
distributed\utils.py:1915: in wait_for
return await asyncio.wait_for(fut, timeout)
C:\Users\runneradmin\miniconda3\envs\dask-distributed\lib\asyncio\tasks.py:445: in wait_for
return fut.result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
async def async_fn():
result = None
with dask.config.set(config):
async with (
_cluster_factory() as (s, workers),
_client_factory(s) as c,
):
args = [s] + workers
if c is not None:
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = utils_wait_for(
asyncio.shield(task), timeout=deadline.remaining
)
result = await coro2
validate_state(s, *workers)
except asyncio.TimeoutError:
assert task
elapsed = deadline.elapsed
buffer = io.StringIO()
# This stack indicates where the coro/test is suspended
task.print_stack(file=buffer)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Hopefully, the hang has been caused by inconsistent
# state, which should be much more meaningful than the
# timeout
validate_state(s, *workers)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio
# and not from the code being tested.
> raise asyncio.TimeoutError(
f"Test timeout ({timeout}) hit after {elapsed}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
E asyncio.exceptions.TimeoutError: Test timeout (30) hit after 30.00521030000027s.
E ========== Test stack trace starts here ==========
E Stack for <Task pending name='Task-283725' coro=<test_RetireWorker_stress() running at D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py:1393> wait_for=<_GatheringFuture pending cb=[Task.task_wakeup()]>> (most recent call last):
E File "D:\a\distributed\distributed\distributed\tests\test_active_memory_manager.py", line 1393, in test_RetireWorker_stress
E await asyncio.gather(*tasks)
distributed\utils_test.py:1041: TimeoutError
Check warning on line 0 in distributed.tests.test_actor
github-actions / Unit Test Results
All 13 runs failed: test_future_dependencies (distributed.tests.test_actor)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
AttributeError: 'str' object has no attribute 'key'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:53398', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:53399', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:53402', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_future_dependencies(c, s, a, b):
counter = c.submit(Counter, actor=True, workers=[a.address])
def f(a):
assert isinstance(a, Actor)
assert a._cls == Counter
x = c.submit(f, counter, workers=[b.address])
await x
> assert {ts.key for ts in s.tasks[x.key].dependencies} == {counter.key}
E AttributeError: 'str' object has no attribute 'key'
distributed\tests\test_actor.py:237: AttributeError
Check warning on line 0 in distributed.tests.test_actor
github-actions / Unit Test Results
All 13 runs failed: test_Actors_create_dependencies (distributed.tests.test_actor)
artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.13-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.13-default-notci1/pytest.xml [took 0s]
Raw output
AssertionError: assert {'Counter-0b2...4dffe6c37149'} == {<TaskState '...7149' memory>}
Extra items in the left set:
#x1B[0m#x1B[33m'#x1B[39;49;00m#x1B[33mCounter-0b2d0b9e-c99c-4506-a7e4-4dffe6c37149#x1B[39;49;00m#x1B[33m'#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
Extra items in the right set:
#x1B[0m<TaskState #x1B[33m'#x1B[39;49;00m#x1B[33mCounter-0b2d0b9e-c99c-4506-a7e4-4dffe6c37149#x1B[39;49;00m#x1B[33m'#x1B[39;49;00m memory>#x1B[90m#x1B[39;49;00m
Full diff:
#x1B[0m#x1B[90m #x1B[39;49;00m {#x1B[90m#x1B[39;49;00m
#x1B[91m- <TaskState 'Counter-0b2d0b9e-c99c-4506-a7e4-4dffe6c37149' memory>,#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
? ----------- --------#x1B[90m#x1B[39;49;00m
#x1B[92m+ 'Counter-0b2d0b9e-c99c-4506-a7e4-4dffe6c37149',#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
#x1B[90m #x1B[39;49;00m }#x1B[90m#x1B[39;49;00m
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:53592', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:53593', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:53596', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
@gen_cluster(client=True)
async def test_Actors_create_dependencies(c, s, a, b):
counter = await c.submit(Counter, actor=True)
future = c.submit(lambda x: None, counter)
await wait(future)
> assert s.tasks[future.key].dependencies == {s.tasks[counter.key]}
E AssertionError: assert {'Counter-0b2...4dffe6c37149'} == {<TaskState '...7149' memory>}
E
E Extra items in the left set:
E #x1B[0m#x1B[33m'#x1B[39;49;00m#x1B[33mCounter-0b2d0b9e-c99c-4506-a7e4-4dffe6c37149#x1B[39;49;00m#x1B[33m'#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
E Extra items in the right set:
E #x1B[0m<TaskState #x1B[33m'#x1B[39;49;00m#x1B[33mCounter-0b2d0b9e-c99c-4506-a7e4-4dffe6c37149#x1B[39;49;00m#x1B[33m'#x1B[39;49;00m memory>#x1B[90m#x1B[39;49;00m
E
E Full diff:
E #x1B[0m#x1B[90m #x1B[39;49;00m {#x1B[90m#x1B[39;49;00m
E #x1B[91m- <TaskState 'Counter-0b2d0b9e-c99c-4506-a7e4-4dffe6c37149' memory>,#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
E ? ----------- --------#x1B[90m#x1B[39;49;00m
E #x1B[92m+ 'Counter-0b2d0b9e-c99c-4506-a7e4-4dffe6c37149',#x1B[39;49;00m#x1B[90m#x1B[39;49;00m
E #x1B[90m #x1B[39;49;00m }#x1B[90m#x1B[39;49;00m
distributed\tests\test_actor.py:402: AssertionError