diff --git a/src/lib/openai.ts b/src/lib/openai.ts index 4e04edb..07f0797 100644 --- a/src/lib/openai.ts +++ b/src/lib/openai.ts @@ -12,6 +12,8 @@ export type DalleImageSize = 256 | 512 | 1024; export interface OpenAIOptions { apiKey: string; completionEngine?: string; + completionCharacter?: string; + useChatCompletionRequestMessage?: boolean; temperature?: number; maxTokens?: number; dalleImageSize?: DalleImageSize; @@ -22,6 +24,8 @@ export interface OpenAIOptions { const OpenAIDefaults = (apiKey: string): OpenAIOptions => ({ apiKey, completionEngine: "gpt-3.5-turbo", + completionCharacter: "Assistant", + useChatCompletionRequestMessage: false, temperature: 1.0, maxTokens: 1000, dalleImageSize: 1024, @@ -127,7 +131,7 @@ export async function openAI( const openai = new OpenAIApi(configuration); try { - if (engine.startsWith("gpt-3.5") || engine.startsWith("gpt-4")) { + if (engine.startsWith("gpt-3.5") || engine.startsWith("gpt-4") || options.useChatCompletionRequestMessage) { const inputMessages:ChatCompletionRequestMessage[] = [{ role: "user", content: input }]; if (openAiOptions.chatPrompt && openAiOptions.chatPrompt.length > 0) { inputMessages.unshift({ role: "system", content: openAiOptions.chatPrompt }); @@ -201,9 +205,10 @@ export async function openAIWithStream( ): Promise { const options = { ...OpenAIDefaults(openAiOptions.apiKey), ...openAiOptions }; const engine = options.completionEngine!; + const character = options.completionCharacter!; try { - if (engine.startsWith("gpt-3.5") || engine.startsWith("gpt-4")) { + if (engine.startsWith("gpt-3.5") || engine.startsWith("gpt-4") || options.useChatCompletionRequestMessage) { const inputMessages: ChatCompletionRequestMessage[] = [{ role: "user", content: input }]; if (openAiOptions.chatPrompt && openAiOptions.chatPrompt.length > 0) { inputMessages.unshift({ role: "system", content: openAiOptions.chatPrompt }); @@ -216,7 +221,8 @@ export async function openAIWithStream( frequency_penalty: 0, presence_penalty: 0, model: engine, - stream: true + stream: true, + character: character } const response = await backOff( () => @@ -277,70 +283,70 @@ export async function openAIWithStream( } } else { const body = { - prompt: input, - temperature: options.temperature, - max_tokens: options.maxTokens, - top_p: 1, - frequency_penalty: 0, - presence_penalty: 0, - model: engine, - stream: true + prompt: input, + temperature: options.temperature, + max_tokens: options.maxTokens, + top_p: 1, + frequency_penalty: 0, + presence_penalty: 0, + model: engine, + stream: true } const response = await backOff( - () => - fetch(`${options.completionEndpoint}/completions`, { - method: "POST", - body: JSON.stringify(body), - headers: { - Authorization: `Bearer ${options.apiKey}`, - 'Content-Type': 'application/json', - 'Accept': 'text/event-stream' - } + () => + fetch(`${options.completionEndpoint}/completions`, { + method: "POST", + body: JSON.stringify(body), + headers: { + Authorization: `Bearer ${options.apiKey}`, + 'Content-Type': 'application/json', + 'Accept': 'text/event-stream' + } }).then((response) => { - if (response.ok && response.body) { - const reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); - let result = "" - const readStream = (): any => - reader.read().then(({ - value, - done - }) => { - if (done) { - reader.cancel(); - onStop(); - return Promise.resolve({ choices: [{ text: result }]}); - } + if (response.ok && response.body) { + const reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); + let result = "" + const readStream = (): any => + reader.read().then(({ + value, + done + }) => { + if (done) { + reader.cancel(); + onStop(); + return Promise.resolve({ choices: [{ text: result }]}); + } - const data = getDataFromStreamValue(value); - if (!data || !data[0]) { - return readStream(); - } + const data = getDataFromStreamValue(value); + if (!data || !data[0]) { + return readStream(); + } - let res = "" - for (let i = 0; i < data.length; i++) { - res += data[i].choices[0]?.text || "" - } + let res = "" + for (let i = 0; i < data.length; i++) { + res += data[i].choices[0]?.text || "" + } result += res - onContent(res) - return readStream(); - }); - return readStream(); - } else { - return Promise.reject(response); - } + onContent(res) + return readStream(); + }); + return readStream(); + } else { + return Promise.reject(response); + } }), - retryOptions + retryOptions ); const choices = (response as CreateCompletionResponse)?.choices; if ( - choices && - choices[0] && - choices[0].text && - choices[0].text.length > 0 + choices && + choices[0] && + choices[0].text && + choices[0].text.length > 0 ) { - return trimLeadingWhitespace(choices[0].text); + return trimLeadingWhitespace(choices[0].text); } else { - return null; + return null; } } } catch (e: any) { diff --git a/src/lib/settings.ts b/src/lib/settings.ts index 0cd4d32..dee52f2 100644 --- a/src/lib/settings.ts +++ b/src/lib/settings.ts @@ -28,6 +28,20 @@ export const settingsSchema: SettingSchemaDesc[] = [ title: "OpenAI API Completion Endpoint", description: "The endpoint to use for OpenAI API completion requests. You shouldn't need to change this." }, + { + key: "useChatCompletionRequestMessage", + type: "boolean", + default: false, + title: "Use OpenAI Chat Completion Request Message", + description: "Send chat completion message using ChatCompletionRequestMessage interface rather than raw string. Useful for Oobabooga Text-Generation-WebUI. See https://github.com/oobabooga/text-generation-webui for more info.", + }, + { + key: "chatCompletionCharacter", + type: "string", + default: "Assistant", + title: "Completion Character", + description: "Only used for Oobabooga Text-Generation-WebUI. See https://github.com/oobabooga/text-generation-webui for more info." + }, { key: "chatPrompt", type: "string", @@ -91,6 +105,8 @@ function unescapeNewlines(s: string) { export function getOpenaiSettings(): PluginOptions { const apiKey = logseq.settings!["openAIKey"]; const completionEngine = logseq.settings!["openAICompletionEngine"]; + const completionCharacter = logseq.settings!["chatCompletionCharacter"]; + const useChatCompletionRequestMessage = logseq.settings!["useChatCompletionRequestMessage"]; const injectPrefix = unescapeNewlines(logseq.settings!["injectPrefix"]); const temperature = Number.parseFloat(logseq.settings!["openAITemperature"]); const maxTokens = Number.parseInt(logseq.settings!["openAIMaxTokens"]); @@ -102,6 +118,8 @@ export function getOpenaiSettings(): PluginOptions { return { apiKey, completionEngine, + completionCharacter, + useChatCompletionRequestMessage, temperature, maxTokens, dalleImageSize, diff --git a/src/main.tsx b/src/main.tsx index 51685d1..4d8e4a0 100644 --- a/src/main.tsx +++ b/src/main.tsx @@ -163,7 +163,7 @@ const LogseqApp = () => { logseq.Editor.registerBlockContextMenuItem("gpt-page", runGptPage); logseq.Editor.registerSlashCommand("gpt-block", runGptBlock); logseq.Editor.registerBlockContextMenuItem("gpt-block", runGptBlock); - logseq.Editor.registerSlashCommand("dalle", runDalleBlock); +logseq.Editor.registerSlashCommand("dalle", runDalleBlock); logseq.Editor.registerBlockContextMenuItem("dalle", runDalleBlock); logseq.Editor.registerSlashCommand("whisper", runWhisper); logseq.Editor.registerBlockContextMenuItem("whisper", runWhisper);