From 23730862bb34978f7bef6d0ea0c642ffce5e257a Mon Sep 17 00:00:00 2001 From: franz101 Date: Mon, 6 Nov 2023 15:51:30 -0800 Subject: [PATCH 1/3] Update streaming.py Removed legacy model --- examples/streaming.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/streaming.py b/examples/streaming.py index 168877dfc5..6a69487a9e 100755 --- a/examples/streaming.py +++ b/examples/streaming.py @@ -13,7 +13,7 @@ def sync_main() -> None: client = OpenAI() response = client.completions.create( - model="text-davinci-002", + model="gpt-3.5-turbo", prompt="1,2,3,", max_tokens=5, temperature=0, From 5ee1ca6c70da90085fd6c0e6411084f75aabfad3 Mon Sep 17 00:00:00 2001 From: franz101 Date: Mon, 6 Nov 2023 15:54:15 -0800 Subject: [PATCH 2/3] Update streaming.py --- examples/streaming.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/streaming.py b/examples/streaming.py index 6a69487a9e..27cda3db73 100755 --- a/examples/streaming.py +++ b/examples/streaming.py @@ -13,7 +13,7 @@ def sync_main() -> None: client = OpenAI() response = client.completions.create( - model="gpt-3.5-turbo", + model="gpt-3.5-turbo-instruct", prompt="1,2,3,", max_tokens=5, temperature=0, From 8c55fecc470457eb9b754b32c32d528650a253e7 Mon Sep 17 00:00:00 2001 From: franz101 Date: Mon, 6 Nov 2023 16:04:21 -0800 Subject: [PATCH 3/3] more upgrades --- examples/async_demo.py | 2 +- examples/streaming.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/async_demo.py b/examples/async_demo.py index 92c267c38f..793b4e43fb 100755 --- a/examples/async_demo.py +++ b/examples/async_demo.py @@ -10,7 +10,7 @@ async def main() -> None: stream = await client.completions.create( - model="text-davinci-003", + model="gpt-3.5-turbo-instruct", prompt="Say this is a test", stream=True, ) diff --git a/examples/streaming.py b/examples/streaming.py index 27cda3db73..368fa5f911 100755 --- a/examples/streaming.py +++ b/examples/streaming.py @@ -33,7 +33,7 @@ def sync_main() -> None: async def async_main() -> None: client = AsyncOpenAI() response = await client.completions.create( - model="text-davinci-002", + model="gpt-3.5-turbo-instruct", prompt="1,2,3,", max_tokens=5, temperature=0,