File tree Expand file tree Collapse file tree 2 files changed +7
-7
lines changed Expand file tree Collapse file tree 2 files changed +7
-7
lines changed Original file line number Diff line number Diff line change 3
3
import json
4
4
5
5
import httpx
6
- from openai import AsyncOpenAI
6
+ from openai import OpenAI
7
7
8
8
from vllm .assets .audio import AudioAsset
9
9
13
13
# Modify OpenAI's API key and API base to use vLLM's API server.
14
14
openai_api_key = "EMPTY"
15
15
openai_api_base = "http://localhost:8000/v1"
16
- client = AsyncOpenAI (
16
+ client = OpenAI (
17
17
api_key = openai_api_key ,
18
18
base_url = openai_api_base ,
19
19
)
20
20
21
21
22
- async def main ():
22
+ def sync_openai ():
23
23
with open (str (mary_had_lamb ), "rb" ) as f :
24
- transcription = await client .audio .transcriptions .create (
24
+ transcription = client .audio .transcriptions .create (
25
25
file = f ,
26
26
model = "openai/whisper-small" ,
27
27
language = "en" ,
@@ -30,7 +30,7 @@ async def main():
30
30
print ("transcription result:" , transcription .text )
31
31
32
32
33
- asyncio . run ( main () )
33
+ sync_openai ( )
34
34
35
35
36
36
# OpenAI Transcription API client does not support streaming.
Original file line number Diff line number Diff line change @@ -346,8 +346,8 @@ async def transcription_stream_generator(
346
346
# NOTE(NickLucche) user can't pass encoder prompts directly
347
347
# at least not to Whisper. One indicator of the encoder
348
348
# amount of processing is the log-mel spectogram length.
349
- num_prompt_tokens = ceil (audio_duration_s * self . model_sr /
350
- self .hop_length )
349
+ num_prompt_tokens + = ceil (audio_duration_s *
350
+ self . model_sr / self .hop_length )
351
351
352
352
# We need to do it here, because if there are exceptions in
353
353
# the result_generator, it needs to be sent as the FIRST
You can’t perform that action at this time.
0 commit comments