Skip to content

Commit 060cd21

Browse files
committed
better snippet for automated-speech-recognition
1 parent 0900c99 commit 060cd21

File tree

2 files changed

+47
-1
lines changed

2 files changed

+47
-1
lines changed

packages/tasks/src/snippets/python.spec.ts

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,38 @@ import { describe, expect, it } from "vitest";
33
import { getPythonInferenceSnippet } from "./python";
44

55
describe("inference API snippets", () => {
6+
it("automatic-speech-recognition", async () => {
7+
const model: ModelDataMinimal = {
8+
id: "openai/whisper-large-v3-turbo",
9+
pipeline_tag: "automatic-speech-recognition",
10+
tags: [],
11+
inference: "",
12+
};
13+
const snippets = getPythonInferenceSnippet(model, "api_token") as InferenceSnippet[];
14+
15+
expect(snippets.length).toEqual(2);
16+
17+
expect(snippets[0].client).toEqual("huggingface_hub");
18+
expect(snippets[0].content).toEqual(`from huggingface_hub import InferenceClient
19+
client = InferenceClient("openai/whisper-large-v3-turbo", token="api_token")
20+
21+
output = client.automatic_speech_recognition("sample1.flac")`);
22+
23+
expect(snippets[1].client).toEqual("requests");
24+
expect(snippets[1].content).toEqual(`import requests
25+
26+
API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3-turbo"
27+
headers = {"Authorization": "Bearer api_token"}
28+
29+
def query(filename):
30+
with open(filename, "rb") as f:
31+
data = f.read()
32+
response = requests.post(API_URL, headers=headers, data=data)
33+
return response.json()
34+
35+
output = query("sample1.flac")`);
36+
});
37+
638
it("conversational llm", async () => {
739
const model: ModelDataMinimal = {
840
id: "meta-llama/Llama-3.1-8B-Instruct",

packages/tasks/src/snippets/python.ts

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,20 @@ output = query(${getModelInputSnippet(model)})`,
3333

3434
// Specific snippets
3535

36+
const snippetAutomaticSpeechRecognition = (model: ModelDataMinimal, accessToken: string): InferenceSnippet[] => {
37+
return [
38+
{
39+
client: "huggingface_hub",
40+
content: `${snippetImportInferenceClient(model, accessToken)}
41+
output = client.automatic_speech_recognition(${getModelInputSnippet(model)})`,
42+
},
43+
{
44+
client: "requests",
45+
content: snippetFile(model).content,
46+
},
47+
];
48+
};
49+
3650
const snippetConversational = (
3751
model: ModelDataMinimal,
3852
accessToken: string,
@@ -285,7 +299,7 @@ const pythonSnippets: Partial<
285299
"image-text-to-text": snippetConversational,
286300
"fill-mask": snippetBasic,
287301
"sentence-similarity": snippetBasic,
288-
"automatic-speech-recognition": snippetFile,
302+
"automatic-speech-recognition": snippetAutomaticSpeechRecognition,
289303
"text-to-image": snippetTextToImage,
290304
"text-to-speech": snippetTextToAudio,
291305
"text-to-audio": snippetTextToAudio,

0 commit comments

Comments
 (0)