Skip to content

Commit 3c66c45

Browse files
mikekgfbmalfet
authored andcommitted
remove macos-12 test (#501)
1 parent efea63a commit 3c66c45

File tree

2 files changed

+75
-75
lines changed

2 files changed

+75
-75
lines changed

.github/workflows/pull.yml

Lines changed: 0 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -458,81 +458,6 @@ jobs:
458458
python torchchat.py generate stories15M
459459
python torchchat.py remove stories15m
460460
461-
test-tinystories-eager:
462-
strategy:
463-
matrix:
464-
runner: [macos-12]
465-
runs-on: ${{matrix.runner}}
466-
steps:
467-
- name: Checkout repo
468-
uses: actions/checkout@v2
469-
- name: Setup Python
470-
uses: actions/setup-python@v2
471-
with:
472-
python-version: 3.11
473-
- name: Print machine info
474-
run: |
475-
uname -a
476-
if [ $(uname -s) == Darwin ]; then
477-
sysctl machdep.cpu.brand_string
478-
sysctl machdep.cpu.core_count
479-
fi
480-
- name: Install requirements
481-
run: |
482-
pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
483-
pip install -r requirements.txt
484-
python3 -c 'import torch;print(f"torch: {torch.__version__, torch.version.git_version}")'
485-
- name: Download checkpoints
486-
run: |
487-
mkdir -p checkpoints/stories15M
488-
pushd checkpoints/stories15M
489-
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt
490-
wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.model
491-
popd
492-
- name: Run inference
493-
run: |
494-
export MODEL_PATH=checkpoints/stories15M/stories15M.pt
495-
export MODEL_NAME=stories15M
496-
export MODEL_DIR=/tmp
497-
for DTYPE in bfloat16 float16 float32; do
498-
# if [ $(uname -s) == Darwin ]; then
499-
# export DTYPE=float16
500-
# fi
501-
python3 torchchat.py generate --dtype ${DTYPE} --checkpoint-path ${MODEL_PATH} --temperature 0
502-
503-
echo "******************************************"
504-
echo "******* Emb: channel-wise quantized ******"
505-
echo "******************************************"
506-
python3 torchchat.py generate --dtype ${DTYPE} --quant '{"embedding" : {"bitwidth": 8, "groupsize": 0}}' --checkpoint-path ${MODEL_PATH} --temperature 0
507-
508-
echo "******************************************"
509-
echo "******** Emb: group-wise quantized *******"
510-
echo "******************************************"
511-
python3 torchchat.py generate --dtype ${DTYPE} --quant '{"embedding" : {"bitwidth": 8, "groupsize": 8}}' --checkpoint-path ${MODEL_PATH} --temperature 0
512-
513-
echo "******************************************"
514-
echo "******* INT8 channel-wise quantized ******"
515-
echo "******************************************"
516-
python3 torchchat.py generate --dtype ${DTYPE} --quant '{"linear:int8" : {"bitwidth": 8, "groupsize": 0}}' --checkpoint-path ${MODEL_PATH} --temperature 0
517-
518-
echo "******************************************"
519-
echo "******** INT8 group-wise quantized *******"
520-
echo "******************************************"
521-
python3 torchchat.py generate --dtype ${DTYPE} --quant '{"linear:int8" : {"bitwidth": 8, "groupsize": 8}}' --checkpoint-path ${MODEL_PATH} --temperature 0
522-
523-
echo "******************************************"
524-
echo "******** INT4 group-wise quantized *******"
525-
echo "******************************************"
526-
527-
echo "INT4 should work on MacOS on x86, but cannot be tested"
528-
echo "because nightlies are too old!"
529-
530-
# python3 torchchat.py generate --dtype ${DTYPE} --quant '{"linear:int4" : {"groupsize": 32}}' --checkpoint-path ${MODEL_PATH} --temperature 0
531-
532-
echo "tests complete for ${DTYPE}"
533-
done
534-
535-
echo "tests complete for all dtypes!"
536461
test-mps:
537462
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
538463
with:

parking_lot/macos-12-x86.yml

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
test-tinystories-eager:
2+
strategy:
3+
matrix:
4+
runner: [macos-12]
5+
runs-on: ${{matrix.runner}}
6+
steps:
7+
- name: Checkout repo
8+
uses: actions/checkout@v2
9+
- name: Setup Python
10+
uses: actions/setup-python@v2
11+
with:
12+
python-version: 3.11
13+
- name: Print machine info
14+
run: |
15+
uname -a
16+
if [ $(uname -s) == Darwin ]; then
17+
sysctl machdep.cpu.brand_string
18+
sysctl machdep.cpu.core_count
19+
fi
20+
- name: Install requirements
21+
run: |
22+
pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
23+
pip install -r requirements.txt
24+
python3 -c 'import torch;print(f"torch: {torch.__version__, torch.version.git_version}")'
25+
- name: Download checkpoints
26+
run: |
27+
mkdir -p checkpoints/stories15M
28+
pushd checkpoints/stories15M
29+
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt
30+
wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.model
31+
popd
32+
- name: Run inference
33+
run: |
34+
export MODEL_PATH=checkpoints/stories15M/stories15M.pt
35+
export MODEL_NAME=stories15M
36+
export MODEL_DIR=/tmp
37+
for DTYPE in bfloat16 float16 float32; do
38+
# if [ $(uname -s) == Darwin ]; then
39+
# export DTYPE=float16
40+
# fi
41+
python3 torchchat.py generate --dtype ${DTYPE} --checkpoint-path ${MODEL_PATH} --temperature 0
42+
43+
echo "******************************************"
44+
echo "******* Emb: channel-wise quantized ******"
45+
echo "******************************************"
46+
python3 torchchat.py generate --dtype ${DTYPE} --quant '{"embedding" : {"bitwidth": 8, "groupsize": 0}}' --checkpoint-path ${MODEL_PATH} --temperature 0
47+
48+
echo "******************************************"
49+
echo "******** Emb: group-wise quantized *******"
50+
echo "******************************************"
51+
python3 torchchat.py generate --dtype ${DTYPE} --quant '{"embedding" : {"bitwidth": 8, "groupsize": 8}}' --checkpoint-path ${MODEL_PATH} --temperature 0
52+
53+
echo "******************************************"
54+
echo "******* INT8 channel-wise quantized ******"
55+
echo "******************************************"
56+
python3 torchchat.py generate --dtype ${DTYPE} --quant '{"linear:int8" : {"bitwidth": 8, "groupsize": 0}}' --checkpoint-path ${MODEL_PATH} --temperature 0
57+
58+
echo "******************************************"
59+
echo "******** INT8 group-wise quantized *******"
60+
echo "******************************************"
61+
python3 torchchat.py generate --dtype ${DTYPE} --quant '{"linear:int8" : {"bitwidth": 8, "groupsize": 8}}' --checkpoint-path ${MODEL_PATH} --temperature 0
62+
63+
echo "******************************************"
64+
echo "******** INT4 group-wise quantized *******"
65+
echo "******************************************"
66+
67+
echo "INT4 should work on MacOS on x86, but cannot be tested"
68+
echo "because nightlies are too old!"
69+
70+
# python3 torchchat.py generate --dtype ${DTYPE} --quant '{"linear:int4" : {"groupsize": 32}}' --checkpoint-path ${MODEL_PATH} --temperature 0
71+
72+
echo "tests complete for ${DTYPE}"
73+
done
74+
75+
echo "tests complete for all dtypes!"

0 commit comments

Comments
 (0)