4
4
push :
5
5
branches :
6
6
- main
7
- paths:
8
- - .github/workflows/compile.yml
9
7
pull_request :
10
8
workflow_dispatch :
11
9
12
10
jobs :
13
- do_compile :
11
+ run-tinystories :
14
12
runs-on : macos-12
15
13
# runs-on: self-hosted
16
14
steps :
17
15
- name : Checkout repo
18
16
uses : actions/checkout@v2
19
- - name: Compile and run
20
- # shell: arch -arch arm64 zsh {0}
17
+ - name : Setup Python
18
+ uses : actions/setup-python@v2
19
+ with :
20
+ python-version : 3.8
21
+ - name : Print machine info
21
22
run : |
22
23
uname -a
23
24
sysctl machdep.cpu.brand_string
24
25
sysctl machdep.cpu.core_count
25
- pip install -r requirements.txt
26
- mkdir -p checkpoints/stories15M
27
- cd checkpoints/stories15M
28
- wget https://huggingface.co/karpathy/tinyllamas/raw/main/stories15M.pt
29
- wget https://huggingface.co/karpathy/tinyllamas/raw/main/stories15M.bin
30
- wget https://github.com/karpathy/llama2.c/blob/master/tokenizer.model
31
- cd ../..
32
- export MODEL_REPO=stories15M
33
- python aoti_export.py --checkpoint_path checkpoints/$MODEL_REPO/ --output-path ./${MODEL_REPO}.so
34
- python generate.py --checkpoint_path checkpoints/$MODEL_REPO/ --temperature 0 |& tee output_eager
35
- python generate.py --compile --checkpoint_path checkpoints/$MODEL_REPO/ --temperature 0 |& tee output_compiled
36
- python generate.py --checkpoint_path checkpoints/$MODEL_REPO/ --temperature 0 --dso ./${MODEL_REPO}.so |& tee output_aoti
37
- echo "******************************************"
38
- echo "********* EAGER vs TORCH.COMPILE *********"
39
- echo "******************************************"
40
- diff output_eager output_compiled
41
- echo "******************************************"
42
- echo "********* EAGER vs AOT INDUCTOR *********
43
- echo "******************************************"
44
- diff output_eager output_aoti
45
-
26
+ - name : Install requirements
27
+ run : |
28
+ pip install -r requirements.txt
29
+ - name : Download checkpoints
30
+ run : |
31
+ mkdir -p checkpoints/stories15M
32
+ pushd checkpoints/stories15M
33
+ wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt
34
+ wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.model
35
+ popd
36
+ - name : Run inference
37
+ run : |
38
+ export MODEL_PATH=checkpoints/stories15M/stories15M.pt
39
+ # python aoti_export.py --checkpoint_path ${MODEL_PATH} --output_path ./${MODEL_REPO}.so
40
+ python generate.py --checkpoint_path ${MODEL_PATH} --temperature 0 --device cpu | tee output_eager
41
+ python generate.py --compile --checkpoint_path ${MODEL_PATH} --temperature 0 --device cpu | tee output_compiled
42
+ # python generate.py --checkpoint_path ${MODEL_PATH} --temperature 0 --dso ./${MODEL_REPO}.so |& tee output_aoti
43
+ # echo "******************************************"
44
+ # echo "********* EAGER vs TORCH.COMPILE *********"
45
+ # echo "******************************************"
46
+ # diff output_eager output_compiled
47
+ # echo "******************************************"
48
+ # echo "********* EAGER vs AOT INDUCTOR *********"
49
+ # echo "******************************************"
50
+ # diff output_eager output_aoti
0 commit comments