diff --git a/listeners/assistant/assistant.py b/listeners/assistant/assistant.py index 9bb6f16..320d41f 100644 --- a/listeners/assistant/assistant.py +++ b/listeners/assistant/assistant.py @@ -118,23 +118,23 @@ def respond_in_assistant_thread( returned_message = call_llm(messages_in_thread) - stream_response = client.chat_startStream( + streamer = client.chat_stream( channel=channel_id, recipient_team_id=team_id, recipient_user_id=user_id, thread_ts=thread_ts, ) - stream_ts = stream_response["ts"] - # use of this for loop is specific to openai response method + # Loop over OpenAI response stream + # https://platform.openai.com/docs/api-reference/responses/create for event in returned_message: if event.type == "response.output_text.delta": - client.chat_appendStream(channel=channel_id, ts=stream_ts, markdown_text=f"{event.delta}") + streamer.append(markdown_text=f"{event.delta}") else: continue feedback_block = create_feedback_block() - client.chat_stopStream(channel=channel_id, ts=stream_ts, blocks=feedback_block) + streamer.stop(blocks=feedback_block) except Exception as e: logger.exception(f"Failed to handle a user message event: {e}") diff --git a/listeners/events/app_mentioned.py b/listeners/events/app_mentioned.py index e9876ba..ea34c38 100644 --- a/listeners/events/app_mentioned.py +++ b/listeners/events/app_mentioned.py @@ -34,23 +34,23 @@ def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say: returned_message = call_llm([{"role": "user", "content": text}]) - stream_response = client.chat_startStream( - channel=channel_id, recipient_team_id=team_id, recipient_user_id=user_id, thread_ts=thread_ts + streamer = client.chat_stream( + channel=channel_id, + recipient_team_id=team_id, + recipient_user_id=user_id, + thread_ts=thread_ts, ) - stream_ts = stream_response["ts"] - # Loop over OpenAI response stream # https://platform.openai.com/docs/api-reference/responses/create for event in returned_message: if event.type == "response.output_text.delta": - client.chat_appendStream(channel=channel_id, ts=stream_ts, markdown_text=f"{event.delta}") + streamer.append(markdown_text=f"{event.delta}") else: continue feedback_block = create_feedback_block() - client.chat_stopStream(channel=channel_id, ts=stream_ts, blocks=feedback_block) - + streamer.stop(blocks=feedback_block) except Exception as e: logger.exception(f"Failed to handle a user message event: {e}") say(f":warning: Something went wrong! ({e})") diff --git a/requirements.txt b/requirements.txt index 083826a..5cd8a51 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ -slack-sdk==3.36.0.dev5 -slack-bolt==1.26.0.dev2 +slack-sdk==3.36.0.dev6 +slack-bolt==1.26.0.dev3 # If you use a different LLM vendor, replace this dependency openai