Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 25 additions & 11 deletions src/lib/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import { backOff } from "exponential-backoff";
export type DalleImageSize = 256 | 512 | 1024;
export interface OpenAIOptions {
apiKey: string;
customHeaders: string,
completionEngine?: string;
temperature?: number;
maxTokens?: number;
Expand All @@ -21,6 +22,7 @@ export interface OpenAIOptions {

const OpenAIDefaults = (apiKey: string): OpenAIOptions => ({
apiKey,
customHeaders: "",
completionEngine: "gpt-3.5-turbo",
temperature: 1.0,
maxTokens: 1000,
Expand Down Expand Up @@ -57,7 +59,7 @@ const retryOptions = {

export async function whisper(file: File,openAiOptions:OpenAIOptions): Promise<string> {
const apiKey = openAiOptions.apiKey;
const baseUrl = openAiOptions.completionEndpoint ? openAiOptions.completionEndpoint : "https://api.openai.com/v1";
const baseUrl = openAiOptions.completionEndpoint ? openAiOptions.completionEndpoint : "https://api.openai.com/v1/chat/completions";
const model = 'whisper-1';

// Create a FormData object and append the file
Expand Down Expand Up @@ -194,6 +196,7 @@ export async function openAI(
}

export async function openAIWithStream(
prompt: string,
input: string,
openAiOptions: OpenAIOptions,
onContent: (content: string) => void,
Expand All @@ -205,7 +208,9 @@ export async function openAIWithStream(
try {
if (engine.startsWith("gpt-3.5") || engine.startsWith("gpt-4")) {
const inputMessages: ChatCompletionRequestMessage[] = [{ role: "user", content: input }];
if (openAiOptions.chatPrompt && openAiOptions.chatPrompt.length > 0) {
if(prompt) {
inputMessages.unshift({ role: "system", content: prompt });
} else if (openAiOptions.chatPrompt && openAiOptions.chatPrompt.length > 0) {
inputMessages.unshift({ role: "system", content: openAiOptions.chatPrompt });
}
const body = {
Expand All @@ -219,15 +224,22 @@ export async function openAIWithStream(
stream: true
}
const response = await backOff(
() =>
fetch(`${options.completionEndpoint}/chat/completions`, {
() => {
let headers: Record<string, string> = {
Authorization: `Bearer ${options.apiKey}`,
'Content-Type': 'application/json',
'Accept': 'text/event-stream'
};
if(options.customHeaders && options.customHeaders.length > 0) {
options.customHeaders.split("\n").forEach(header => {
const [key, value] = header.split(":");
headers[key.trim()] = value.trim();
});
}
return fetch(`${options.completionEndpoint}`, {
method: "POST",
body: JSON.stringify(body),
headers: {
Authorization: `Bearer ${options.apiKey}`,
'Content-Type': 'application/json',
'Accept': 'text/event-stream'
}
headers: headers
}).then((response) => {
if (response.ok && response.body) {
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
Expand All @@ -250,7 +262,8 @@ export async function openAIWithStream(

let res = ""
for (let i = 0; i < data.length; i++) {
res += data[i].choices[0]?.delta?.content || ""
if(data[i])
res += data[i].choices[0]?.delta?.content || ""
}
result += res
onContent(res)
Expand All @@ -260,7 +273,8 @@ export async function openAIWithStream(
} else {
return Promise.reject(response);
}
}),
})
},
retryOptions
);
const choices = (response as CreateChatCompletionResponse)?.choices;
Expand Down
5 changes: 3 additions & 2 deletions src/lib/rawCommands.ts
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ export async function runGptBlock(b: IHookEvent) {
result = openAISettings.injectPrefix + result;
}

await openAIWithStream(currentBlock.content, openAISettings, async (content: string) => {
await openAIWithStream(currentBlock.prompt, currentBlock.content, openAISettings, async (content: string) => {
result += content || "";
if(null != insertBlock) {
await logseq.Editor.updateBlock(insertBlock.uuid, result);
Expand All @@ -108,6 +108,7 @@ export async function runGptPage(b: IHookEvent) {
const openAISettings = getOpenaiSettings();
validateSettings(openAISettings);

const prompt = await b.uuid.prompt;
const pageContents = await getPageContentFromBlock(b.uuid);
const currentBlock = await logseq.Editor.getBlock(b.uuid);

Expand Down Expand Up @@ -135,7 +136,7 @@ export async function runGptPage(b: IHookEvent) {
result = openAISettings.injectPrefix + result;
}

await openAIWithStream(pageContents, openAISettings, async (content: string) => {
await openAIWithStream(prompt, pageContents, openAISettings, async (content: string) => {
result += content || "";
if(null != insertBlock) {
await logseq.Editor.updateBlock(insertBlock.uuid, result);
Expand Down
11 changes: 10 additions & 1 deletion src/lib/settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,17 @@ export const settingsSchema: SettingSchemaDesc[] = [
{
key: "chatCompletionEndpoint",
type: "string",
default: "http://api.openai.com/v1",
default: "http://api.openai.com/v1/chat/completions",
title: "OpenAI API Completion Endpoint",
description: "The endpoint to use for OpenAI API completion requests. You shouldn't need to change this."
},
{
key: "customHeaders",
type: "string",
default: "",
title: "OpenAI API Custom Headers3",
description: "Custom headers to use for OpenAI API completion requests. You typically don't need to change this."
},
{
key: "chatPrompt",
type: "string",
Expand Down Expand Up @@ -90,6 +97,7 @@ function unescapeNewlines(s: string) {

export function getOpenaiSettings(): PluginOptions {
const apiKey = logseq.settings!["openAIKey"];
const customHeaders = logseq.settings!["customHeaders"];
const completionEngine = logseq.settings!["openAICompletionEngine"];
const injectPrefix = unescapeNewlines(logseq.settings!["injectPrefix"]);
const temperature = Number.parseFloat(logseq.settings!["openAITemperature"]);
Expand All @@ -101,6 +109,7 @@ export function getOpenaiSettings(): PluginOptions {
const completionEndpoint = logseq.settings!["chatCompletionEndpoint"];
return {
apiKey,
customHeaders,
completionEngine,
temperature,
maxTokens,
Expand Down
2 changes: 1 addition & 1 deletion src/main.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ const LogseqApp = () => {
if (command.temperature!=null && !Number.isNaN(command.temperature)) {
openAISettings.temperature = command.temperature;
}
const response = await openAIWithStream(command.prompt + inputText, openAISettings, onContent, () => {
const response = await openAIWithStream(command.prompt, inputText, openAISettings, onContent, () => {
});
if (response) {
return response;
Expand Down