Skip to content

Commit 7fcc1bf

Browse files
nunjunjnunjunj
authored andcommitted
run format.sh
1 parent 189b100 commit 7fcc1bf

File tree

3 files changed

+181
-154
lines changed

3 files changed

+181
-154
lines changed

examples/offline_inference_chat.py

Lines changed: 17 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -14,63 +14,37 @@ def print_outputs(outputs):
1414

1515
print("=" * 80)
1616

17-
# In this script, we demonstrate two ways to pass input to the chat method of the LLM class:
17+
# In this script, we demonstrate two ways to pass input to the chat method:
1818

1919
# Conversation with a list of dictionaries
2020
conversation = [
21+
{"role": "system", "content": "You are a helpful assistant"},
22+
{"role": "user", "content": "Hello"},
23+
{"role": "assistant", "content": "Hello! How can I assist you today?"},
2124
{
22-
'role': 'system',
23-
'content': "You are a helpful assistant"
24-
},
25-
{
26-
'role': 'user',
27-
'content': "Hello"
28-
},
29-
{
30-
'role': 'assistant',
31-
'content': "Hello! How can I assist you today?"
32-
},
33-
{
34-
'role': 'user',
35-
'content': "Write an essay about the importance of higher education."
25+
"role": "user",
26+
"content": "Write an essay about the importance of higher education.",
3627
},
3728
]
38-
outputs = llm.chat(conversation,
39-
sampling_params=sampling_params,
40-
use_tqdm=False)
29+
outputs = llm.chat(
30+
conversation, sampling_params=sampling_params, use_tqdm=False
31+
)
4132
print_outputs(outputs)
4233

43-
# Multiple conversations
34+
# Multiple conversations
4435
conversations = [
4536
[
46-
{
47-
'role': 'system',
48-
'content': "You are a helpful assistant"
49-
},
50-
{
51-
'role': 'user',
52-
'content': "What is dark matter?"
53-
},
37+
{"role": "system", "content": "You are a helpful assistant"},
38+
{"role": "user", "content": "What is dark matter?"},
5439
],
5540
[
41+
{"role": "system", "content": "You are a helpful assistant"},
42+
{"role": "user", "content": "How are you?"},
5643
{
57-
'role': 'system',
58-
'content': "You are a helpful assistant"
59-
},
60-
{
61-
'role': 'user',
62-
'content': "How are you?"
63-
},
64-
{
65-
'role':
66-
'assistant',
67-
'content':
68-
"I'm an AI, so I don't have feelings, but I'm here to help you!"
69-
},
70-
{
71-
'role': 'user',
72-
'content': "Tell me a joke."
44+
"role": "assistant",
45+
"content": "I'm an AI without feelings, but I'm here to help!",
7346
},
47+
{"role": "user", "content": "Tell me a joke."},
7448
],
7549
]
7650

tests/entrypoints/llm/test_generate.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ def test_multiple_sampling_params(llm: LLM):
141141
outputs = llm.generate(PROMPTS, sampling_params=None)
142142
assert len(PROMPTS) == len(outputs)
143143

144+
144145
def test_chat():
145146

146147
llm = LLM(model=MODEL_NAME)
@@ -160,7 +161,7 @@ def test_chat():
160161
assert len(outputs) == 1
161162

162163
prompt2 = "Describe Bangkok in 150 words."
163-
messages = [messages] + [[
164+
multiple_messages = [messages] + [[
164165
{
165166
"role": "system",
166167
"content": "You are a helpful assistant"
@@ -170,8 +171,8 @@ def test_chat():
170171
"content": prompt2
171172
},
172173
]]
173-
outputs = llm.chat(messages)
174-
assert len(outputs) == len(messages)
174+
outputs = llm.chat(multiple_messages)
175+
assert len(outputs) == len(multiple_messages)
175176

176177
sampling_params = [
177178
SamplingParams(temperature=0.01, top_p=0.95),

0 commit comments

Comments
 (0)