@@ -13,7 +13,7 @@ export interface LocalAppSnippet {
13
13
/**
14
14
* Content (or command) to be run
15
15
*/
16
- content : string ;
16
+ content : string | string [ ] ;
17
17
}
18
18
19
19
/**
@@ -149,7 +149,7 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
149
149
`curl -X POST "http://localhost:8000/v1/chat/completions" \\ ` ,
150
150
` -H "Content-Type: application/json" \\ ` ,
151
151
` --data '{` ,
152
- ` "model": "${ model . id } "` ,
152
+ ` "model": "${ model . id } ", ` ,
153
153
` "messages": [` ,
154
154
` {"role": "user", "content": "Hello!"}` ,
155
155
` ]` ,
@@ -159,7 +159,7 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
159
159
{
160
160
title : "Install from pip" ,
161
161
setup : [ "# Install vLLM from pip:" , "pip install vllm" ] . join ( "\n" ) ,
162
- content : [ " # Load and run the model:" , `vllm serve "${ model . id } "`, ... runCommand ] . join ( "\n" ) ,
162
+ content : [ ` # Load and run the model:\nvllm serve "${ model . id } "`, runCommand . join ( "\n" ) ] ,
163
163
} ,
164
164
{
165
165
title : "Use Docker images" ,
@@ -175,10 +175,9 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
175
175
` --model ${ model . id } ` ,
176
176
] . join ( "\n" ) ,
177
177
content : [
178
- "# Load and run the model:" ,
179
- `docker exec -it my_vllm_container bash -c "vllm serve ${ model . id } "` ,
180
- ...runCommand ,
181
- ] . join ( "\n" ) ,
178
+ `# Load and run the model:\ndocker exec -it my_vllm_container bash -c "vllm serve ${ model . id } "` ,
179
+ runCommand . join ( "\n" ) ,
180
+ ] ,
182
181
} ,
183
182
] ;
184
183
} ;
0 commit comments