diff --git a/packages/inference/src/snippets/python.ts b/packages/inference/src/snippets/python.ts index b1f35bedc2..ef3df2564f 100644 --- a/packages/inference/src/snippets/python.ts +++ b/packages/inference/src/snippets/python.ts @@ -44,11 +44,11 @@ const snippetImportInferenceClient = (accessToken: string, provider: SnippetInfe from huggingface_hub import InferenceClient client = InferenceClient( - provider="${provider}", - api_key="${accessToken || "{API_TOKEN}"}" + provider="${provider}", + api_key="${accessToken || "{API_TOKEN}"}", )`; -export const snippetConversational = ( +const snippetConversational = ( model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, @@ -89,7 +89,7 @@ stream = client.chat.completions.create( model="${model.id}", messages=messages, ${configStr} - stream=True + stream=True, ) for chunk in stream: @@ -159,7 +159,7 @@ print(completion.choices[0].message)`, } }; -export const snippetZeroShotClassification = (model: ModelDataMinimal): InferenceSnippet[] => { +const snippetZeroShotClassification = (model: ModelDataMinimal): InferenceSnippet[] => { return [ { client: "requests", @@ -176,12 +176,11 @@ output = query({ ]; }; -export const snippetZeroShotImageClassification = (model: ModelDataMinimal): InferenceSnippet[] => { +const snippetZeroShotImageClassification = (model: ModelDataMinimal): InferenceSnippet[] => { return [ { client: "requests", - content: `\ -def query(data): + content: `def query(data): with open(data["image_path"], "rb") as f: img = f.read() payload={ @@ -199,7 +198,7 @@ output = query({ ]; }; -export const snippetBasic = ( +const snippetBasic = ( model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider @@ -213,9 +212,8 @@ export const snippetBasic = ( ${snippetImportInferenceClient(accessToken, provider)} result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}( - model="${model.id}", inputs=${getModelInputSnippet(model)}, - provider="${provider}", + model="${model.id}", ) print(result) @@ -237,7 +235,7 @@ output = query({ ]; }; -export const snippetFile = (model: ModelDataMinimal): InferenceSnippet[] => { +const snippetFile = (model: ModelDataMinimal): InferenceSnippet[] => { return [ { client: "requests", @@ -253,7 +251,7 @@ output = query(${getModelInputSnippet(model)})`, ]; }; -export const snippetTextToImage = ( +const snippetTextToImage = ( model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, @@ -268,7 +266,7 @@ ${snippetImportInferenceClient(accessToken, provider)} # output is a PIL.Image object image = client.text_to_image( ${getModelInputSnippet(model)}, - model="${model.id}" + model="${model.id}", )`, }, ...(provider === "fal-ai" @@ -312,7 +310,7 @@ image = Image.open(io.BytesIO(image_bytes))`, ]; }; -export const snippetTextToVideo = ( +const snippetTextToVideo = ( model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider @@ -326,14 +324,14 @@ ${snippetImportInferenceClient(accessToken, provider)} video = client.text_to_video( ${getModelInputSnippet(model)}, - model="${model.id}" + model="${model.id}", )`, }, ] : []; }; -export const snippetTabular = (model: ModelDataMinimal): InferenceSnippet[] => { +const snippetTabular = (model: ModelDataMinimal): InferenceSnippet[] => { return [ { client: "requests", @@ -349,7 +347,7 @@ response = query({ ]; }; -export const snippetTextToAudio = (model: ModelDataMinimal): InferenceSnippet[] => { +const snippetTextToAudio = (model: ModelDataMinimal): InferenceSnippet[] => { // Transformers TTS pipeline and api-inference-community (AIC) pipeline outputs are diverged // with the latest update to inference-api (IA). // Transformers IA returns a byte object (wav file), whereas AIC returns wav and sampling_rate. @@ -374,8 +372,7 @@ Audio(audio_bytes)`, return [ { client: "requests", - content: `\ -def query(payload): + content: `def query(payload): response = requests.post(API_URL, headers=headers, json=payload) return response.json() @@ -390,26 +387,97 @@ Audio(audio, rate=sampling_rate)`, } }; -export const snippetDocumentQuestionAnswering = (model: ModelDataMinimal): InferenceSnippet[] => { +const snippetAutomaticSpeechRecognition = ( + model: ModelDataMinimal, + accessToken: string, + provider: SnippetInferenceProvider +): InferenceSnippet[] => { + return [ + { + client: "huggingface_hub", + content: `${snippetImportInferenceClient(accessToken, provider)} +output = client.automatic_speech_recognition(${getModelInputSnippet(model)}, model="${model.id}")`, + }, + snippetFile(model)[0], + ]; +}; + +const snippetDocumentQuestionAnswering = ( + model: ModelDataMinimal, + accessToken: string, + provider: SnippetInferenceProvider +): InferenceSnippet[] => { + const inputsAsStr = getModelInputSnippet(model) as string; + const inputsAsObj = JSON.parse(inputsAsStr); + return [ + { + client: "huggingface_hub", + content: `${snippetImportInferenceClient(accessToken, provider)} +output = client.document_question_answering( + "${inputsAsObj.image}", + question="${inputsAsObj.question}", + model="${model.id}", +)`, + }, { client: "requests", - content: `\ -def query(payload): + content: `def query(payload): with open(payload["image"], "rb") as f: img = f.read() - payload["image"] = base64.b64encode(img).decode("utf-8") + payload["image"] = base64.b64encode(img).decode("utf-8") response = requests.post(API_URL, headers=headers, json=payload) return response.json() output = query({ - "inputs": ${getModelInputSnippet(model)}, + "inputs": ${inputsAsStr}, })`, }, ]; }; -export const pythonSnippets: Partial< +const snippetImageToImage = ( + model: ModelDataMinimal, + accessToken: string, + provider: SnippetInferenceProvider +): InferenceSnippet[] => { + const inputsAsStr = getModelInputSnippet(model) as string; + const inputsAsObj = JSON.parse(inputsAsStr); + + return [ + { + client: "huggingface_hub", + content: `${snippetImportInferenceClient(accessToken, provider)} +# output is a PIL.Image object +image = client.image_to_image( + "${inputsAsObj.image}", + prompt="${inputsAsObj.prompt}", + model="${model.id}", +)`, + }, + { + client: "requests", + content: `def query(payload): + with open(payload["inputs"], "rb") as f: + img = f.read() + payload["inputs"] = base64.b64encode(img).decode("utf-8") + response = requests.post(API_URL, headers=headers, json=payload) + return response.content + +image_bytes = query({ + "inputs": "${inputsAsObj.image}", + "parameters": {"prompt": "${inputsAsObj.prompt}"}, +}) + +# You can access the image with PIL.Image for example +import io +from PIL import Image +image = Image.open(io.BytesIO(image_bytes))`, + }, + ]; +}; + +const pythonSnippets: Partial< Record< PipelineType, ( @@ -435,7 +503,7 @@ export const pythonSnippets: Partial< "image-text-to-text": snippetConversational, "fill-mask": snippetBasic, "sentence-similarity": snippetBasic, - "automatic-speech-recognition": snippetFile, + "automatic-speech-recognition": snippetAutomaticSpeechRecognition, "text-to-image": snippetTextToImage, "text-to-video": snippetTextToVideo, "text-to-speech": snippetTextToAudio, @@ -449,6 +517,7 @@ export const pythonSnippets: Partial< "image-segmentation": snippetFile, "document-question-answering": snippetDocumentQuestionAnswering, "image-to-text": snippetFile, + "image-to-image": snippetImageToImage, "zero-shot-image-classification": snippetZeroShotImageClassification, }; @@ -471,17 +540,24 @@ export function getPythonInferenceSnippet( return snippets.map((snippet) => { return { ...snippet, - content: - snippet.client === "requests" - ? `\ -import requests - -API_URL = "${openAIbaseUrl(provider)}" -headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}} - -${snippet.content}` - : snippet.content, + content: addImportsToSnippet(snippet.content, model, accessToken), }; }); } } + +const addImportsToSnippet = (snippet: string, model: ModelDataMinimal, accessToken: string): string => { + if (snippet.includes("requests")) { + snippet = `import requests + +API_URL = "https://router.huggingface.co/hf-inference/models/${model.id}" +headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}} + +${snippet}`; + } + if (snippet.includes("base64")) { + snippet = `import base64 +${snippet}`; + } + return snippet; +}; diff --git a/packages/tasks-gen/scripts/generate-snippets-fixtures.ts b/packages/tasks-gen/scripts/generate-snippets-fixtures.ts index a15a7e2c5f..b22d7551e4 100644 --- a/packages/tasks-gen/scripts/generate-snippets-fixtures.ts +++ b/packages/tasks-gen/scripts/generate-snippets-fixtures.ts @@ -31,6 +31,17 @@ const TEST_CASES: { providers: SnippetInferenceProvider[]; opts?: Record; }[] = [ + { + testName: "automatic-speech-recognition", + model: { + id: "openai/whisper-large-v3-turbo", + pipeline_tag: "automatic-speech-recognition", + tags: [], + inference: "", + }, + languages: ["py"], + providers: ["hf-inference"], + }, { testName: "conversational-llm-non-stream", model: { @@ -79,6 +90,28 @@ const TEST_CASES: { providers: ["hf-inference", "fireworks-ai"], opts: { streaming: true }, }, + { + testName: "document-question-answering", + model: { + id: "impira/layoutlm-invoices", + pipeline_tag: "document-question-answering", + tags: [], + inference: "", + }, + languages: ["py"], + providers: ["hf-inference"], + }, + { + testName: "image-to-image", + model: { + id: "stabilityai/stable-diffusion-xl-refiner-1.0", + pipeline_tag: "image-to-image", + tags: [], + inference: "", + }, + languages: ["py"], + providers: ["hf-inference"], + }, { testName: "text-to-image", model: { diff --git a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/0.huggingface_hub.hf-inference.py new file mode 100644 index 0000000000..9a945fcf65 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/0.huggingface_hub.hf-inference.py @@ -0,0 +1,7 @@ +from huggingface_hub import InferenceClient + +client = InferenceClient( + provider="hf-inference", + api_key="api_token", +) +output = client.automatic_speech_recognition("sample1.flac", model="openai/whisper-large-v3-turbo") \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/1.requests.hf-inference.py b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/1.requests.hf-inference.py new file mode 100644 index 0000000000..4acaa0ed5b --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/1.requests.hf-inference.py @@ -0,0 +1,12 @@ +import requests + +API_URL = "https://router.huggingface.co/hf-inference/models/openai/whisper-large-v3-turbo" +headers = {"Authorization": "Bearer api_token"} + +def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.post(API_URL, headers=headers, data=data) + return response.json() + +output = query("sample1.flac") \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.hf-inference.py index 5edd9b64e4..44e29d2a0b 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.hf-inference.py @@ -1,8 +1,8 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="hf-inference", - api_key="api_token" + provider="hf-inference", + api_key="api_token", ) messages = [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.together.py index 8a1753c525..ee348d6987 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.together.py @@ -1,8 +1,8 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="together", - api_key="api_token" + provider="together", + api_key="api_token", ) messages = [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface_hub.hf-inference.py index cd4f46e355..0f1fd0dbdc 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface_hub.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface_hub.hf-inference.py @@ -1,8 +1,8 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="hf-inference", - api_key="api_token" + provider="hf-inference", + api_key="api_token", ) messages = [ @@ -16,7 +16,7 @@ model="meta-llama/Llama-3.1-8B-Instruct", messages=messages, max_tokens=500, - stream=True + stream=True, ) for chunk in stream: diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface_hub.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface_hub.together.py index d125f8415d..31d00d979c 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface_hub.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface_hub.together.py @@ -1,8 +1,8 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="together", - api_key="api_token" + provider="together", + api_key="api_token", ) messages = [ @@ -16,7 +16,7 @@ model="meta-llama/Llama-3.1-8B-Instruct", messages=messages, max_tokens=500, - stream=True + stream=True, ) for chunk in stream: diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.fireworks-ai.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.fireworks-ai.py index f85b19fff8..41f26d30a7 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.fireworks-ai.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.fireworks-ai.py @@ -1,8 +1,8 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="fireworks-ai", - api_key="api_token" + provider="fireworks-ai", + api_key="api_token", ) messages = [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.hf-inference.py index 726de28b45..507809756d 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.hf-inference.py @@ -1,8 +1,8 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="hf-inference", - api_key="api_token" + provider="hf-inference", + api_key="api_token", ) messages = [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface_hub.fireworks-ai.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface_hub.fireworks-ai.py index 52c40bd259..ad86364e4d 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface_hub.fireworks-ai.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface_hub.fireworks-ai.py @@ -1,8 +1,8 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="fireworks-ai", - api_key="api_token" + provider="fireworks-ai", + api_key="api_token", ) messages = [ @@ -27,7 +27,7 @@ model="meta-llama/Llama-3.2-11B-Vision-Instruct", messages=messages, max_tokens=500, - stream=True + stream=True, ) for chunk in stream: diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface_hub.hf-inference.py index 5bcc6a9bcc..7d58f8020a 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface_hub.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface_hub.hf-inference.py @@ -1,8 +1,8 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="hf-inference", - api_key="api_token" + provider="hf-inference", + api_key="api_token", ) messages = [ @@ -27,7 +27,7 @@ model="meta-llama/Llama-3.2-11B-Vision-Instruct", messages=messages, max_tokens=500, - stream=True + stream=True, ) for chunk in stream: diff --git a/packages/tasks-gen/snippets-fixtures/document-question-answering/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/document-question-answering/0.huggingface_hub.hf-inference.py new file mode 100644 index 0000000000..46e437c63a --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/document-question-answering/0.huggingface_hub.hf-inference.py @@ -0,0 +1,11 @@ +from huggingface_hub import InferenceClient + +client = InferenceClient( + provider="hf-inference", + api_key="api_token", +) +output = client.document_question_answering( + "cat.png", + question="What is in this image?", + model="impira/layoutlm-invoices", +) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/document-question-answering/1.requests.hf-inference.py b/packages/tasks-gen/snippets-fixtures/document-question-answering/1.requests.hf-inference.py new file mode 100644 index 0000000000..51fed678d8 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/document-question-answering/1.requests.hf-inference.py @@ -0,0 +1,19 @@ +import base64 +import requests + +API_URL = "https://router.huggingface.co/hf-inference/models/impira/layoutlm-invoices" +headers = {"Authorization": "Bearer api_token"} + +def query(payload): + with open(payload["image"], "rb") as f: + img = f.read() + payload["image"] = base64.b64encode(img).decode("utf-8") + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": { + "image": "cat.png", + "question": "What is in this image?" + }, +}) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/image-to-image/0.huggingface_hub.hf-inference.py new file mode 100644 index 0000000000..c21949966a --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/0.huggingface_hub.hf-inference.py @@ -0,0 +1,12 @@ +from huggingface_hub import InferenceClient + +client = InferenceClient( + provider="hf-inference", + api_key="api_token", +) +# output is a PIL.Image object +image = client.image_to_image( + "cat.png", + prompt="Turn the cat into a tiger.", + model="stabilityai/stable-diffusion-xl-refiner-1.0", +) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/1.requests.hf-inference.py b/packages/tasks-gen/snippets-fixtures/image-to-image/1.requests.hf-inference.py new file mode 100644 index 0000000000..2d16ba05f5 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/1.requests.hf-inference.py @@ -0,0 +1,22 @@ +import base64 +import requests + +API_URL = "https://router.huggingface.co/hf-inference/models/stabilityai/stable-diffusion-xl-refiner-1.0" +headers = {"Authorization": "Bearer api_token"} + +def query(payload): + with open(payload["inputs"], "rb") as f: + img = f.read() + payload["inputs"] = base64.b64encode(img).decode("utf-8") + response = requests.post(API_URL, headers=headers, json=payload) + return response.content + +image_bytes = query({ + "inputs": "cat.png", + "parameters": {"prompt": "Turn the cat into a tiger."}, +}) + +# You can access the image with PIL.Image for example +import io +from PIL import Image +image = Image.open(io.BytesIO(image_bytes)) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/text-classification/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/text-classification/0.huggingface_hub.hf-inference.py index ab26849845..d6b9b9aa45 100644 --- a/packages/tasks-gen/snippets-fixtures/text-classification/0.huggingface_hub.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/text-classification/0.huggingface_hub.hf-inference.py @@ -1,14 +1,13 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="hf-inference", - api_key="api_token" + provider="hf-inference", + api_key="api_token", ) result = client.text_classification( - model="distilbert/distilbert-base-uncased-finetuned-sst-2-english", inputs="I like you. I love you", - provider="hf-inference", + model="distilbert/distilbert-base-uncased-finetuned-sst-2-english", ) print(result) diff --git a/packages/tasks-gen/snippets-fixtures/text-classification/1.requests.hf-inference.py b/packages/tasks-gen/snippets-fixtures/text-classification/1.requests.hf-inference.py index b362a49bf6..281fca0df2 100644 --- a/packages/tasks-gen/snippets-fixtures/text-classification/1.requests.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/text-classification/1.requests.hf-inference.py @@ -1,6 +1,6 @@ import requests -API_URL = "https://router.huggingface.co/hf-inference/v1" +API_URL = "https://router.huggingface.co/hf-inference/models/distilbert/distilbert-base-uncased-finetuned-sst-2-english" headers = {"Authorization": "Bearer api_token"} def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/0.huggingface_hub.fal-ai.py b/packages/tasks-gen/snippets-fixtures/text-to-image/0.huggingface_hub.fal-ai.py index f96622c78f..8f8a6b1021 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/0.huggingface_hub.fal-ai.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/0.huggingface_hub.fal-ai.py @@ -1,12 +1,12 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="fal-ai", - api_key="api_token" + provider="fal-ai", + api_key="api_token", ) # output is a PIL.Image object image = client.text_to_image( "Astronaut riding a horse", - model="black-forest-labs/FLUX.1-schnell" + model="black-forest-labs/FLUX.1-schnell", ) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/text-to-image/0.huggingface_hub.hf-inference.py index 2a488324f8..b4c48cec43 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/0.huggingface_hub.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/0.huggingface_hub.hf-inference.py @@ -1,12 +1,12 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="hf-inference", - api_key="api_token" + provider="hf-inference", + api_key="api_token", ) # output is a PIL.Image object image = client.text_to_image( "Astronaut riding a horse", - model="black-forest-labs/FLUX.1-schnell" + model="black-forest-labs/FLUX.1-schnell", ) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/1.requests.hf-inference.py b/packages/tasks-gen/snippets-fixtures/text-to-image/1.requests.hf-inference.py index 1c1fe427dc..a8e1ad9de0 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/1.requests.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/1.requests.hf-inference.py @@ -1,6 +1,6 @@ import requests -API_URL = "https://router.huggingface.co/hf-inference/v1" +API_URL = "https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-schnell" headers = {"Authorization": "Bearer api_token"} def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/text-to-video/0.huggingface_hub.fal-ai.py b/packages/tasks-gen/snippets-fixtures/text-to-video/0.huggingface_hub.fal-ai.py index 54cc650531..fc96fd381b 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-video/0.huggingface_hub.fal-ai.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-video/0.huggingface_hub.fal-ai.py @@ -1,11 +1,11 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="fal-ai", - api_key="api_token" + provider="fal-ai", + api_key="api_token", ) video = client.text_to_video( "A young man walking on the street", - model="tencent/HunyuanVideo" + model="tencent/HunyuanVideo", ) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/text-to-video/0.huggingface_hub.replicate.py b/packages/tasks-gen/snippets-fixtures/text-to-video/0.huggingface_hub.replicate.py index eecb00a94e..208c583119 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-video/0.huggingface_hub.replicate.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-video/0.huggingface_hub.replicate.py @@ -1,11 +1,11 @@ from huggingface_hub import InferenceClient client = InferenceClient( - provider="replicate", - api_key="api_token" + provider="replicate", + api_key="api_token", ) video = client.text_to_video( "A young man walking on the street", - model="tencent/HunyuanVideo" + model="tencent/HunyuanVideo", ) \ No newline at end of file diff --git a/packages/tasks/src/snippets/inputs.ts b/packages/tasks/src/snippets/inputs.ts index 4d08209c44..3c81a53bd8 100644 --- a/packages/tasks/src/snippets/inputs.ts +++ b/packages/tasks/src/snippets/inputs.ts @@ -27,9 +27,9 @@ const inputsTableQuestionAnswering = () => const inputsVisualQuestionAnswering = () => `{ - "image": "cat.png", - "question": "What is in this image?" -}`; + "image": "cat.png", + "question": "What is in this image?" + }`; const inputsQuestionAnswering = () => `{ @@ -86,6 +86,11 @@ const inputsImageClassification = () => `"cats.jpg"`; const inputsImageToText = () => `"cats.jpg"`; +const inputsImageToImage = () => `{ + "image": "cat.png", + "prompt": "Turn the cat into a tiger." +}`; + const inputsImageSegmentation = () => `"cats.jpg"`; const inputsObjectDetection = () => `"cats.jpg"`; @@ -120,6 +125,7 @@ const modelInputSnippets: { "fill-mask": inputsFillMask, "image-classification": inputsImageClassification, "image-to-text": inputsImageToText, + "image-to-image": inputsImageToImage, "image-segmentation": inputsImageSegmentation, "object-detection": inputsObjectDetection, "question-answering": inputsQuestionAnswering,