-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun.py
52 lines (43 loc) · 1.25 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import openai
# replace this with LLM of your choice. un-alignment-scripted o1 currently gives best results.
def llm(x):
return openai.chat.completions.create(
model="gpt-4o",
messages=[{"role": "system", "content": x}]
).choices[0].message.content
goal = input('what do you want, human?\n')
agent_code = open(__file__).read()
history = []
while True:
prompt = f'''
Your output will be sent to python `exec`.
This means you must only output python code.
Specifically, you are responding as `llm` in this python code:
```
{agent_code}
```
This means you can:
- ask for input from the user
- access previously assigned variables
- call the `llm` function
- access the internet
history:
{history}
Your goal: {goal}
'''
print(prompt)
print('-----------------')
response = llm(prompt)
response = response.lstrip('```python').lstrip('```').rstrip('```')
print(response)
input('Press enter to execute above code...')
print('-----------------')
try:
out = exec(response, globals(), locals())
except Exception as e:
import traceback
out = traceback.format_exc()
print(out)
input('Press enter to let agent continue...')
print('-----------------')
history.append((response, out))