@@ -3,6 +3,38 @@ import { describe, expect, it } from "vitest";
33import { getPythonInferenceSnippet } from "./python" ;
44
55describe ( "inference API snippets" , ( ) => {
6+ it ( "automatic-speech-recognition" , async ( ) => {
7+ const model : ModelDataMinimal = {
8+ id : "openai/whisper-large-v3-turbo" ,
9+ pipeline_tag : "automatic-speech-recognition" ,
10+ tags : [ ] ,
11+ inference : "" ,
12+ } ;
13+ const snippets = getPythonInferenceSnippet ( model , "api_token" ) as InferenceSnippet [ ] ;
14+
15+ expect ( snippets . length ) . toEqual ( 2 ) ;
16+
17+ expect ( snippets [ 0 ] . client ) . toEqual ( "huggingface_hub" ) ;
18+ expect ( snippets [ 0 ] . content ) . toEqual ( `from huggingface_hub import InferenceClient
19+ client = InferenceClient("openai/whisper-large-v3-turbo", token="api_token")
20+
21+ output = client.automatic_speech_recognition("sample1.flac")` ) ;
22+
23+ expect ( snippets [ 1 ] . client ) . toEqual ( "requests" ) ;
24+ expect ( snippets [ 1 ] . content ) . toEqual ( `import requests
25+
26+ API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3-turbo"
27+ headers = {"Authorization": "Bearer api_token"}
28+
29+ def query(filename):
30+ with open(filename, "rb") as f:
31+ data = f.read()
32+ response = requests.post(API_URL, headers=headers, data=data)
33+ return response.json()
34+
35+ output = query("sample1.flac")` ) ;
36+ } ) ;
37+
638 it ( "conversational llm" , async ( ) => {
739 const model : ModelDataMinimal = {
840 id : "meta-llama/Llama-3.1-8B-Instruct" ,
0 commit comments