diff --git a/src/lib/openai.ts b/src/lib/openai.ts index 4e04edb..7240cc8 100644 --- a/src/lib/openai.ts +++ b/src/lib/openai.ts @@ -11,6 +11,7 @@ import { backOff } from "exponential-backoff"; export type DalleImageSize = 256 | 512 | 1024; export interface OpenAIOptions { apiKey: string; + customHeaders: string, completionEngine?: string; temperature?: number; maxTokens?: number; @@ -21,6 +22,7 @@ export interface OpenAIOptions { const OpenAIDefaults = (apiKey: string): OpenAIOptions => ({ apiKey, + customHeaders: "", completionEngine: "gpt-3.5-turbo", temperature: 1.0, maxTokens: 1000, @@ -57,7 +59,7 @@ const retryOptions = { export async function whisper(file: File,openAiOptions:OpenAIOptions): Promise { const apiKey = openAiOptions.apiKey; - const baseUrl = openAiOptions.completionEndpoint ? openAiOptions.completionEndpoint : "https://api.openai.com/v1"; + const baseUrl = openAiOptions.completionEndpoint ? openAiOptions.completionEndpoint : "https://api.openai.com/v1/chat/completions"; const model = 'whisper-1'; // Create a FormData object and append the file @@ -194,6 +196,7 @@ export async function openAI( } export async function openAIWithStream( + prompt: string, input: string, openAiOptions: OpenAIOptions, onContent: (content: string) => void, @@ -205,7 +208,9 @@ export async function openAIWithStream( try { if (engine.startsWith("gpt-3.5") || engine.startsWith("gpt-4")) { const inputMessages: ChatCompletionRequestMessage[] = [{ role: "user", content: input }]; - if (openAiOptions.chatPrompt && openAiOptions.chatPrompt.length > 0) { + if(prompt) { + inputMessages.unshift({ role: "system", content: prompt }); + } else if (openAiOptions.chatPrompt && openAiOptions.chatPrompt.length > 0) { inputMessages.unshift({ role: "system", content: openAiOptions.chatPrompt }); } const body = { @@ -219,15 +224,22 @@ export async function openAIWithStream( stream: true } const response = await backOff( - () => - fetch(`${options.completionEndpoint}/chat/completions`, { + () => { + let headers: Record = { + Authorization: `Bearer ${options.apiKey}`, + 'Content-Type': 'application/json', + 'Accept': 'text/event-stream' + }; + if(options.customHeaders && options.customHeaders.length > 0) { + options.customHeaders.split("\n").forEach(header => { + const [key, value] = header.split(":"); + headers[key.trim()] = value.trim(); + }); + } + return fetch(`${options.completionEndpoint}`, { method: "POST", body: JSON.stringify(body), - headers: { - Authorization: `Bearer ${options.apiKey}`, - 'Content-Type': 'application/json', - 'Accept': 'text/event-stream' - } + headers: headers }).then((response) => { if (response.ok && response.body) { const reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); @@ -250,7 +262,8 @@ export async function openAIWithStream( let res = "" for (let i = 0; i < data.length; i++) { - res += data[i].choices[0]?.delta?.content || "" + if(data[i]) + res += data[i].choices[0]?.delta?.content || "" } result += res onContent(res) @@ -260,7 +273,8 @@ export async function openAIWithStream( } else { return Promise.reject(response); } - }), + }) + }, retryOptions ); const choices = (response as CreateChatCompletionResponse)?.choices; diff --git a/src/lib/rawCommands.ts b/src/lib/rawCommands.ts index 62a16b7..b855f96 100644 --- a/src/lib/rawCommands.ts +++ b/src/lib/rawCommands.ts @@ -88,7 +88,7 @@ export async function runGptBlock(b: IHookEvent) { result = openAISettings.injectPrefix + result; } - await openAIWithStream(currentBlock.content, openAISettings, async (content: string) => { + await openAIWithStream(currentBlock.prompt, currentBlock.content, openAISettings, async (content: string) => { result += content || ""; if(null != insertBlock) { await logseq.Editor.updateBlock(insertBlock.uuid, result); @@ -108,6 +108,7 @@ export async function runGptPage(b: IHookEvent) { const openAISettings = getOpenaiSettings(); validateSettings(openAISettings); + const prompt = await b.uuid.prompt; const pageContents = await getPageContentFromBlock(b.uuid); const currentBlock = await logseq.Editor.getBlock(b.uuid); @@ -135,7 +136,7 @@ export async function runGptPage(b: IHookEvent) { result = openAISettings.injectPrefix + result; } - await openAIWithStream(pageContents, openAISettings, async (content: string) => { + await openAIWithStream(prompt, pageContents, openAISettings, async (content: string) => { result += content || ""; if(null != insertBlock) { await logseq.Editor.updateBlock(insertBlock.uuid, result); diff --git a/src/lib/settings.ts b/src/lib/settings.ts index 0cd4d32..0108429 100644 --- a/src/lib/settings.ts +++ b/src/lib/settings.ts @@ -24,10 +24,17 @@ export const settingsSchema: SettingSchemaDesc[] = [ { key: "chatCompletionEndpoint", type: "string", - default: "http://api.openai.com/v1", + default: "http://api.openai.com/v1/chat/completions", title: "OpenAI API Completion Endpoint", description: "The endpoint to use for OpenAI API completion requests. You shouldn't need to change this." }, + { + key: "customHeaders", + type: "string", + default: "", + title: "OpenAI API Custom Headers3", + description: "Custom headers to use for OpenAI API completion requests. You typically don't need to change this." + }, { key: "chatPrompt", type: "string", @@ -90,6 +97,7 @@ function unescapeNewlines(s: string) { export function getOpenaiSettings(): PluginOptions { const apiKey = logseq.settings!["openAIKey"]; + const customHeaders = logseq.settings!["customHeaders"]; const completionEngine = logseq.settings!["openAICompletionEngine"]; const injectPrefix = unescapeNewlines(logseq.settings!["injectPrefix"]); const temperature = Number.parseFloat(logseq.settings!["openAITemperature"]); @@ -101,6 +109,7 @@ export function getOpenaiSettings(): PluginOptions { const completionEndpoint = logseq.settings!["chatCompletionEndpoint"]; return { apiKey, + customHeaders, completionEngine, temperature, maxTokens, diff --git a/src/main.tsx b/src/main.tsx index 51685d1..494d945 100644 --- a/src/main.tsx +++ b/src/main.tsx @@ -194,7 +194,7 @@ const LogseqApp = () => { if (command.temperature!=null && !Number.isNaN(command.temperature)) { openAISettings.temperature = command.temperature; } - const response = await openAIWithStream(command.prompt + inputText, openAISettings, onContent, () => { + const response = await openAIWithStream(command.prompt, inputText, openAISettings, onContent, () => { }); if (response) { return response;