8000 Day 19 · mjpieters/adventofcode@7840243 · GitHub
[go: up one dir, main page]

Skip to content
8000

Commit 7840243

Browse files
committed
Day 19
1 parent 4602907 commit 7840243

File tree

1 file changed

+345
-0
lines changed

1 file changed

+345
-0
lines changed

2023/Day 19.ipynb

Lines changed: 345 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,345 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"id": "208d7d2d",
6+
"metadata": {},
7+
"source": [
8+
"# I feel like I'm in the flow\n",
9+
"\n",
10+
"- https://adventofcode.com/2023/day/19\n",
11+
"\n",
12+
"Part one seems to me to be a straightforward system of operator tests; just pass each part to the first workflow and have the workflow return the name of the next workflow or a sorting result.\n"
13+
]
14+
},
15+
{
16+
"cell_type": "code",
17+
"execution_count": 1,
18+
"id": "53c98f1c-cc24-47a8-927b-3b44116506c3",
19+
"metadata": {},
20+
"outputs": [
21+
{
22+
"data": {
23+
"text/plain": [
24+
"19114"
25+
]
26+
},
27+
"execution_count": 1,
28+
"metadata": {},
29+
"output_type": "execute_result"
30+
}
31+
],
32+
"source": [
33+
"import typing as t\n",
34+
"from dataclasses import dataclass\n",
35+
"from enum import Enum, IntEnum\n",
36+
"from operator import gt, lt\n",
37+
"\n",
38+
"OPERATORS = {\">\": gt, \"<\": lt}\n",
39+
"\n",
40+
"\n",
41+
"class Category(IntEnum):\n",
42+
" x = 0\n",
43+
" m = 1\n",
44+
" a = 2\n",
45+
" s = 3\n",
46+
"\n",
47+
"\n",
48+
"class Part(t.NamedTuple):\n",
49+
" x: int\n",
50+
" m: int\n",
51+
" a: int\n",
52+
" s: int\n",
53+
"\n",
54+
" @classmethod\n",
55+
" def from_line(cls, line: str) -> t.Self:\n",
56+
" kvpairs = (pair.split(\"=\") for pair in line.strip(\"{}\").split(\",\"))\n",
57+
" return cls(**{k: int(v) for k, v in kvpairs})\n",
58+
"\n",
59+
"\n",
60+
"class Result(Enum):\n",
61+
" accepted = \"A\"\n",
62+
" rejected = \"R\"\n",
63+
"\n",
64+
" @classmethod\n",
65+
" def from_target(cls, target: str) -> t.Self | str:\n",
66+
" try:\n",
67+
" return cls(target)\n",
68+
" except ValueError:\n",
69+
" return target\n",
70+
"\n",
71+
"\n",
72+
"@dataclass\n",
73+
"class Rule:\n",
74+
" op: t.Callable[[int, int], bool]\n",
75+
" category: Category\n",
76+
" value: int\n",
77+
" target: str | Result\n",
78+
"\n",
79+
" def __call__(self, part: Part) -> str | Result | None:\n",
80+
" return self.target if self.op(part[self.category], self.value) else None\n",
81+
"\n",
82+
" @classmethod\n",
83+
" def from_str(cls, rule: str) -> t.Self:\n",
84+
" expr, _, target = rule.partition(\":\")\n",
85+
" cat, op, value = expr.partition(\">\") if \">\" in expr else expr.partition(\"<\")\n",
86+
" return cls(OPERATORS[op], Category[cat], int(value), Result.from_target(target))\n",
87+
"\n",
88+
"\n",
89+
"@dataclass\n",
90+
"class Workflow:\n",
91+
" name: str\n",
92+
" rules: tuple[Rule, ...]\n",
93+
" else_: str | Result\n",
94+
"\n",
95+
" def __call__(self, part: Part) -> str | Result:\n",
96+
" return next(filter(None, (rule(part) for rule in self.rules)), self.else_)\n",
97+
"\n",
98+
" @classmethod\n",
99+
" def from_line(cls, line: str) -> t.Self:\n",
100+
" name, _, rules_and_fallback = line.rstrip(\"}\").partition(\"{\")\n",
101+
" *rules, fallback = rules_and_fallback.split(\",\")\n",
102+
" fallback = Result.from_target(fallback)\n",
103+
" return cls(name, tuple(map(Rule.from_str, rules)), fallback)\n",
104+
"\n",
105+
"\n",
106+
"@dataclass\n",
107+
"class System:\n",
108+
" workflows: dict[str, Workflow]\n",
109+
"\n",
110+
" def __call__(self, part: Part) -> bool:\n",
111+
" workflow = self.workflows[\"in\"]\n",
112+
" while True:\n",
113+
" match workflow(part):\n",
114+
" case Result.accepted:\n",
115+
" return True\n",
116+
" case Result.rejected:\n",
117+
" return False\n",
118+
" case str(target):\n",
119+
" workflow = self.workflows[target]\n",
120+
"\n",
121+
" @classmethod\n",
122+
" def from_text(cls, text: str) -> t.Self:\n",
123+
" return cls(\n",
124+
" {(wf := Workflow.from_line(line)).name: wf for line in text.splitlines()}\n",
125+
" )\n",
126+
"\n",
127+
"\n",
128+
"def parse(text: str) -> tuple[System, list[Part]]:\n",
129+
" workflows, parts = text.split(\"\\n\\n\")\n",
130+
" return System.from_text(workflows), [\n",
131+
" Part.from_line(line) for line in parts.splitlines()\n",
132+
" ]\n",
133+
"\n",
134+
"\n",
135+
"test_workflows_and_parts = \"\"\"\\\n",
136+
"px{a<2006:qkq,m>2090:A,rfg}\n",
137+
"pv{a>1716:R,A}\n",
138+
"lnx{m>1548:A,A}\n",
139+
"rfg{s<537:gd,x>2440:R,A}\n",
140+
"qs{s>3448:A,lnx}\n",
141+
"qkq{x<1416:A,crn}\n",
142+
"crn{x>2662:A,R}\n",
143+
"in{s<1351:px,qqz}\n",
144+
"qqz{s>2770:qs,m<1801:hdj,R}\n",
145+
"gd{a>3333:R,R}\n",
146+
"hdj{m>838:A,pv}\n",
147+
"\n",
148+
"{x=787,m=2655,a=1222,s=2876}\n",
149+
"{x=1679,m=44,a=2067,s=496}\n",
150+
"{x=2036,m=264,a=79,s=2244}\n",
151+
"{x=2461,m=1339,a=466,s=291}\n",
152+
"{x=2127,m=1623,a=2188,s=1013}\n",
153+
"\"\"\"\n",
154+
"\n",
155+
"test_system, test_parts = parse(test_workflows_and_parts)\n",
156+
"sum(map(sum, filter(test_system, test_parts)))"
157+
]
158+
},
159+
{
160+
"cell_type": "code",
161+
"execution_count": 2,
162+
"id": "66462832-595c-465a-b414-270effc27e96",
163+
"metadata": {},
164+
"outputs": [
165+
{
166+
"name": "stdout",
167+
"output_type": "stream",
168+
"text": [
169+
"Part 1: 409898\n"
170+
]
171+
}
172+
],
173+
"source": [
174+
"import aocd\n",
175+
"\n",
176+
"system, parts = parse(aocd.get_data(day=19, year=2023))\n",
177+
"print(\"Part 1:\", sum(map(sum, filter(system, parts))))"
178+
]
179+
},
180+
{
181+
"cell_type": "markdown",
182+
"id": "5b81a7fb",
183+
"metadata": {},
184+
"source": [
185+
"# Homing in on the goldilocks range\n",
186+
"\n",
187+
"Part two is a bit more interesting; instead of a single part we are now dealing with a range of values for each part category. Workflow rules sort these ranges into those that match and don't match.\n",
188+
"\n",
189+
"I've created replacements for each of my classes to handle ranges now:\n",
190+
"\n",
191+
"- Parts are replaced by a `PartsRange` class, which hold `range()` objects for each category\n",
192+
"- Rules have become `RangeRule` instances, and they return their target with two `PartsRange` objects, one for where the rule applies, and one where it doesn't.\n",
193+
"- The `RangeWorkflow` class models a workflow, and yields tuples with the next workflow name or a result, together with the `PartsRange` that this applies to. It'll apply the next rule to the _other_ `PartsRange`, where the current rule didn't apply, until the end of the rule list is reached and we return the alternative result with the remainder.\n",
194+
"- Finally, the `RangeSystem` keeps a queue of `RangeWorkflow` and `PartsRange` objects, and runs them until the queue is empty. Each workflow produces an iterable of targets and `PartsRange` objects, and if the target is a workflow name, then that workflow is added to the queue together with the constrained `PartsRange`. For _Accepted_ result, we can update a running total (the product of the range lengths if accepted), and we can just ignore _Rejected_ ranges.\n",
195+
"\n",
196+
"This completes part two very nice and fast, in about 2.5 ms.\n"
197+
]
198+
},
199+
{
200+
"cell_type": "code",
201+
"execution_count": 3,
202+
"id": "8cc4cfe4",
203+
"metadata": {},
204+
"outputs": [],
205+
"source": [
206+
"from collections import deque\n",
207+
"from math import prod\n",
208+
"\n",
209+
"\n",
210+
"class PartsRange(t.NamedTuple):\n",
211+
" x: range = range(1, 4001)\n",
212+
" m: range = range(1, 4001)\n",
213+
" a: range = range(1, 4001)\n",
214+
" s: range = range(1, 4001)\n",
215+
"\n",
216+
" @property\n",
217+
" def size(self) -> int:\n",
218+
" return prod(map(len, self))\n",
219+
"\n",
220+
"\n",
221+
"@dataclass\n",
222+
"class RangeRule:\n",
223+
" op: t.Literal[\">\", \"<\"]\n",
224+
" category: Category\n",
225+
" value: int\n",
226+
" target: str | Result\n",
227+
"\n",
228+
" def __call__(self, part: PartsRange) -> tuple[str | Result, PartsRange, PartsRange]:\n",
229+
" cat, bound = self.category, self.value\n",
230+
" r = part[cat]\n",
231+
" f, t = r.start, r.stop\n",
232+
" if self.op == \"<\":\n",
233+
" tr = range(0) if f >= bound else range(f, min(t, bound))\n",
234+
" fr = range(0) if t <= bound else range(max(f, bound), t)\n",
235+
" else: # \">\"\n",
236+
" tr = range(0) if t <= bound + 1 else range(max(f, bound + 1), t)\n",
237+
" fr = range(0) if f > bound else range(f, min(t, bound + 1))\n",
238+
" return (\n",
239+
" self.target,\n",
240+
" part._replace(**{cat.name: tr}),\n",
241+
" part._replace(**{cat.name: fr}),\n",
242+
" )\n",
243+
"\n",
244+
" @classmethod\n",
245+
" def from_rule(cls, rule: Rule) -> t.Self:\n",
246+
" op = \">\" if rule.op is gt else \"<\"\n",
247+
" return cls(op, rule.category, rule.value, rule.target)\n",
248+
"\n",
249+
"\n",
250+
"@dataclass\n",
251+
"class RangeWorkflow:\n",
252+
" name: str\n",
253+
" rules: tuple[RangeRule, ...]\n",
254+
" else_: str | Result\n",
255+
"\n",
256+
" def __call__(\n",
257+
" self, parts_range: PartsRange\n",
258+
" ) -> t.Iterator[tuple[str | Result, PartsRange]]:\n",
259+
" for rule in self.rules:\n",
260+
" res, true_range, parts_range = rule(parts_range)\n",
261+
" yield res, true_range\n",
262+
" yield self.else_, parts_range\n",
263+
"\n",
264+
" @classmethod\n",
265+
" def from_workflow(cls, wf: Workflow) -> t.Self:\n",
266+
" return cls(wf.name, tuple(map(RangeRule.from_rule, wf.rules)), wf.else_)\n",
267+
"\n",
268+
"\n",
269+
"@dataclass\n",
270+
"class RangeSystem:\n",
271+
" workflows: dict[str, RangeWorkflow]\n",
272+
"\n",
273+
" def __call__(self) -> int:\n",
274+
" wfs = self.workflows\n",
275+
" todo: deque[tuple[RangeWorkflow, PartsRange]] = deque(\n",
276+
" [(wfs[\"in\"], PartsRange())]\n",
277+
" )\n",
278+
" total = 0\n",
279+
" while todo:\n",
280+
" workflow, parts_range = todo.popleft()\n",
281+
" for res, pr in workflow(parts_range):\n",
282+
" match res:\n",
283+
" case Result.accepted:\n",
284+
" total += pr.size\n",
285+
" case Result.rejected:\n",
286+
" pass\n",
287+
" case str(target):\n",
288+
" todo.append((wfs[target], pr))\n",
289+
" return total\n",
290+
"\n",
291+
" @classmethod\n",
292+
" def from_system(cls, system: System) -> t.Self:\n",
293+
" wfs = {\n",
294+
" name: RangeWorkflow.from_workflow(wf)\n",
295+
" for name, wf in system.workflows.items()\n",
296+
" }\n",
297+
" return cls(wfs)\n",
298+
"\n",
299+
"\n",
300+
"test_range_system = RangeSystem.from_system(test_system)\n",
301+
"assert test_range_system() == 167409079868000"
302+
]
303+
},
304+
{
305+
"cell_type": "code",
306+
"execution_count": 4,
307+
"id": "cd77bb4a",
308+
"metadata": {},
309+
"outputs": [
310+
{
311+
"name": "stdout",
312+
"output_type": "stream",
313+
"text": [
314+
"Part 2: 113057405770956\n"
315+
]
316+
}
317+
],
318+
"source": [
319+
"range_system = RangeSystem.from_system(system)\n",
320+
"print(\"Part 2:\", range_system())"
321+
]
322+
}
323+
],
324+
"metadata": {
325+
"kernelspec": {
326+
"display_name": "Python 3 (ipykernel)",
327+
"language": "python",
328+
"name": "python3"
329+
},
330+
"language_info": {
331+
"codemirror_mode": {
332+
"name": "ipython",
333+
"version": 3
334+
},
335+
"file_extension": ".py",
336+
"mimetype": "text/x-python",
337+
"name": "python",
338+
"nbconvert_exporter": "python",
339+
"pygments_lexer": "ipython3",
341A
340+
"version": "3.12.1"
341+
}
342+
},
343+
"nbformat": 4,
344+
"nbformat_minor": 5
345+
}

0 commit comments

Comments
 (0)
0