@@ -1355,15 +1355,14 @@ model = SwarmFormerModel.from_pretrained("${model.id}")
1355
1355
1356
1356
const mlx_unknown = ( model : ModelData ) : string [ ] => [
1357
1357
`# Download the model from the Hub
1358
- pip install huggingface_hub hf_transfer
1358
+ pip install huggingface_hub[hf_xet]
1359
1359
1360
- export HF_HUB_ENABLE_HF_TRANSFER=1
1361
1360
huggingface-cli download --local-dir ${ nameWithoutNamespace ( model . id ) } ${ model . id } ` ,
1362
1361
] ;
1363
1362
1364
1363
const mlxlm = ( model : ModelData ) : string [ ] => [
1365
1364
`# Make sure mlx-lm is installed
1366
- pip install --upgrade mlx-lm
1365
+ # pip install --upgrade mlx-lm
1367
1366
1368
1367
# Generate text with mlx-lm
1369
1368
from mlx_lm import load, generate
@@ -1376,7 +1375,7 @@ text = generate(model, tokenizer, prompt=prompt, verbose=True)`,
1376
1375
1377
1376
const mlxchat = ( model : ModelData ) : string [ ] => [
1378
1377
`# Make sure mlx-lm is installed
1379
- pip install --upgrade mlx-lm
1378
+ # pip install --upgrade mlx-lm
1380
1379
1381
1380
# Generate text with mlx-lm
1382
1381
from mlx_lm import load, generate
@@ -1393,7 +1392,9 @@ text = generate(model, tokenizer, prompt=prompt, verbose=True)`,
1393
1392
] ;
1394
1393
1395
1394
const mlxvlm = ( model : ModelData ) : string [ ] => [
1396
- `Make sure mlx-vlm is installed
1395
+ `# Make sure mlx-vlm is installed
1396
+ # pip install --upgrade mlx-vlm
1397
+
1397
1398
from mlx_vlm import load, generate
1398
1399
from mlx_vlm.prompt_utils import apply_chat_template
1399
1400
from mlx_vlm.utils import load_config
0 commit comments