@@ -12,7 +12,6 @@ import { ModelsAPI } from "./models-api.js";
1212
1313const server = createServer ( async ( request , response ) => {
1414 if ( request . method === "GET" ) {
15- // health check
1615 response . statusCode = 200 ;
1716 response . end ( `OK` ) ;
1817 return ;
@@ -90,14 +89,18 @@ const server = createServer(async (request, response) => {
9089
9190 console . time ( "tool-call" ) ;
9291 const toolCaller = await capiClient . chat . completions . create ( {
92+ // @ts -expect-error - type error due to Copilot/OpenAI SDKs interop, I'll look into it ~@gr2m
9393 messages : toolCallMessages ,
9494 stream : false ,
9595 model : "gpt-4" ,
9696 tools : functions . map ( ( f ) => f . tool ) ,
97- } )
97+ } ) ;
9898 console . timeEnd ( "tool-call" ) ;
9999
100- const [ functionToCall ] = getFunctionCalls ( toolCaller )
100+ const [ functionToCall ] = getFunctionCalls (
101+ // @ts -expect-error - type error due to Copilot/OpenAI SDKs interop, I'll look into it ~@gr2m
102+ toolCaller . choices [ 0 ] . message
103+ )
101104
102105 if (
103106 ! functionToCall
@@ -107,6 +110,8 @@ const server = createServer(async (request, response) => {
107110 const stream = await capiClient . chat . completions . create ( {
108111 stream : true ,
109112 model : "gpt-4" ,
113+ // @ts -expect-error - type error due to Copilot/OpenAI SDKs interop, I'll look into it ~@gr2m
114+ messages : payload . messages
110115 } ) ;
111116
112117 for await ( const chunk of stream ) {
@@ -133,7 +138,11 @@ const server = createServer(async (request, response) => {
133138
134139 console . log ( "\t with args" , args ) ;
135140 const func = new funcClass ( modelsAPI ) ;
136- functionCallRes = await func . execute ( payload . messages , args ) ;
141+ functionCallRes = await func . execute (
142+ // @ts -expect-error - type error due to Copilot/OpenAI SDKs interop, I'll look into it ~@gr2m
143+ payload . messages ,
144+ args
145+ ) ;
137146 } catch ( err ) {
138147 console . error ( err ) ;
139148 response . statusCode = 500
0 commit comments