Skip to content

Commit e3441b0

Browse files
YIWENX14facebook-github-bot
authored andcommitted
Add buck rules in coreml llama transformer (#9017)
Summary: Add buck rules in coreml llama transformer for importing the modules to internal repo. Differential Revision: D70415647
1 parent 73acde9 commit e3441b0

File tree

4 files changed

+70
-8
lines changed

4 files changed

+70
-8
lines changed

examples/apple/coreml/llama/TARGETS

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
# Any targets that should be shared between fbcode and xplat must be defined in
2+
# targets.bzl. This file can contain fbcode-only targets.
3+
4+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
5+
6+
runtime.python_library(
7+
name = "llama_transformer",
8+
srcs = [
9+
"llama_transformer.py",
10+
],
11+
_is_external_target = True,
12+
base_module = "executorch.examples.apple.coreml.llama",
13+
visibility = [
14+
"//executorch/...",
15+
"@EXECUTORCH_CLIENTS",
16+
],
17+
deps = [
18+
"//caffe2:torch",
19+
"//executorch/examples/models/llama:llama_transformer",
20+
],
21+
)
22+
23+
runtime.python_library(
24+
name = "utils",
25+
srcs = [
26+
"utils.py",
27+
],
28+
_is_external_target = True,
29+
base_module = "executorch.examples.apple.coreml.llama",
30+
visibility = [
31+
"//executorch/...",
32+
"@EXECUTORCH_CLIENTS",
33+
],
34+
deps = [
35+
"//caffe2:torch",
36+
],
37+
)
38+
39+
runtime.python_binary(
40+
name = "export",
41+
srcs = [
42+
"export.py",
43+
],
44+
main_function = "executorch.examples.apple.coreml.llama.export.main",
45+
visibility = [
46+
"//executorch/...",
47+
"@EXECUTORCH_CLIENTS",
48+
],
49+
deps = [
50+
"fbsource//third-party/pypi/coremltools:coremltools",
51+
":llama_transformer",
52+
":utils",
53+
"//caffe2:torch",
54+
"//executorch/backends/apple/coreml:backend",
55+
"//executorch/backends/apple/coreml:partitioner",
56+
"//executorch/examples/models/llama:source_transformation",
57+
"//executorch/exir/backend:utils",
58+
"//executorch/exir/capture:config",
59+
"//executorch/exir/passes:lib",
60+
"//executorch/exir/passes:quant_fusion_pass",
61+
"//executorch/exir/passes:sym_shape_eval_pass",
62+
"//executorch/exir/program:program",
63+
"//executorch/extension/export_util:export_util",
64+
"//executorch/extension/llm/export:export_lib",
65+
],
66+
)

examples/apple/coreml/llama/export.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66

77
import argparse
88

9-
import sys
10-
119
import coremltools as ct
1210
import torch
1311
from executorch.backends.apple.coreml.compiler import CoreMLBackend # pyre-ignore
@@ -24,9 +22,8 @@
2422
from executorch.exir.program._program import to_edge_with_preserved_ops
2523
from executorch.extension.export_util.utils import save_pte_program
2624

27-
sys.path.insert(0, ".")
28-
from llama_transformer import InputManager, load_model
29-
from utils import replace_linear_with_split_linear
25+
from executorch.examples.apple.coreml.llama.llama_transformer import InputManager, load_model
26+
from executorch.examples.apple.coreml.llama.utils import replace_linear_with_split_linear
3027

3128

3229
def main() -> None:

examples/apple/coreml/llama/run.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
# LICENSE file in the root directory of this source tree.
66

77
import argparse
8-
import sys
98

109
import sentencepiece as spm
1110

@@ -14,10 +13,9 @@
1413
from executorch.runtime import Runtime
1514

1615

17-
sys.path.insert(0, ".")
1816
from executorch.examples.models.llama.runner.generation import next_token
1917
from executorch.examples.models.llama.tokenizer import tiktoken
20-
from llama_transformer import InputManager, load_model
18+
from executorch.examples.apple.coreml.llama.llama_transformer import InputManager, load_model
2119

2220

2321
class Tokenizer:

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ flatc = "executorch.data.bin:flatc"
9292
# TODO(mnachin T180504136): Do not put examples/models
9393
# into core pip packages. Refactor out the necessary utils
9494
# or core models files into a separate package.
95+
"executorch.examples.apple" = "examples/apple"
9596
"executorch.examples.models" = "examples/models"
9697
"executorch.exir" = "exir"
9798
"executorch.extension" = "extension"

0 commit comments

Comments
 (0)