8000 remove args · pytorch/executorch@f3845fe · GitHub
[go: up one dir, main page]

Skip to content

Commit f3845fe

Browse files
angelayilucylq
authored andcommitted
remove args
1 parent 94b4114 commit f3845fe

File tree

5 files changed

+26
-4
lines changed

5 files changed

+26
-4
lines changed

examples/models/llama/export_llama_lib.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -663,6 +663,14 @@ def _export_llama(args) -> LLMEdgeManager: # noqa: C901
663663
quantizers
664664
).export_to_edge()
665665

666+
accuracy = torch.allclose(
667+
builder_exported_to_edge.edge_manager.exported_program().module()(
668+
*builder_exported_to_edge.example_inputs
669+
),
670+
builder_exported_to_edge.model(*builder_exported_to_edge.example_inputs),
671+
)
672+
print(f"lfq: post to_edge accuracy: {accuracy}")
673+
666674
modelname = builder_exported_to_edge.modelname
667675

668676
# to_backend
@@ -800,6 +808,11 @@ def _export_llama(args) -> LLMEdgeManager: # noqa: C901
800808
# pyre-fixme[16]: Module `backends` has no attribute `qualcomm`.
801809
canonicalize_program(builder.edge_manager.exported_program())
802810

811+
print("lfq: exported program after to_backend, graph_module")
812+
print(builder.edge_manager.exported_program().graph_module)
813+
print("lfq: exported program after to_backend, print_readable")
814+
print(builder.edge_manager.exported_program().graph_module.print_readable())
815+
803816
builder = builder.to_executorch()
804817

805818
if args.profile_memory:

examples/models/llama3_2_vision/text_decoder/test/test_text_decoder.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,6 @@ def test_llama3_2_text_decoder_aoti(self) -> None:
7474
with tempfile.TemporaryDirectory() as tmpdir:
7575
path = torch._inductor.aoti_compile_and_package(
7676
ep,
77-
model.get_example_inputs(),
78-
kwargs=model.get_example_kwarg_inputs(),
7977
package_path=os.path.join(tmpdir, "text_decoder.pt2"),
8078
)
8179
encoder_aoti = torch._inductor.aoti_load_package(path)

examples/models/llama3_2_vision/vision_encoder/test/test_vision_encoder.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@ def test_flamingo_vision_encoder(self) -> None:
3636
with tempfile.TemporaryDirectory() as tmpdir:
3737
path = torch._inductor.aoti_compile_and_package(
3838
ep,
39-
model.get_example_inputs(),
4039
package_path=os.path.join(tmpdir, "vision_encoder.pt2"),
4140
)
4241
print(path)

extension/llm/export/builder.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,19 @@ def export(self) -> "LLMEdgeManager":
193193
dynamic_shapes=dynamic_shape,
194194
strict=True,
195195
)
196+
print("lfq: pre exported_module ", exported_module.graph_module)
197+
print("lfq: pre exported graph ", exported_module.graph)
198+
199+
accuracy = torch.allclose(
200+
exported_module.module()(*self.example_inputs),
201+
self.model(*self.example_inputs),
202+
)
203+
print("lfq: pre to_edge accuracy ", accuracy)
204+
205+
exported_module = exported_module.run_decompositions({})
206+
print("lfq: post exported_module ", exported_module.graph_module)
207+
print("lfq: post exported graph ", exported_module.graph)
208+
196209
else:
197210
logging.info("Exporting with:")
198211
logging.info(f"inputs: {self.example_inputs}")

extension/llm/modules/test/test_position_embeddings.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,6 @@ def test_tiled_token_positional_embedding_aoti(self):
177177
with tempfile.TemporaryDirectory() as tmpdir:
178178
path = torch._inductor.aoti_compile_and_package(
179179
tpe_ep,
180-
(self.x, self.aspect_ratio),
181180
package_path=os.path.join(tmpdir, "tpe.pt2"),
182181
)
183182
tpe_aoti = load_package(path)

0 commit comments

Comments
 (0)
0