matthoffner's picture
Use turbo model
d446a2d
raw
history blame
880 Bytes
import { Configuration, OpenAIApi } from "openai-edge";
import { OpenAIStream, StreamingTextResponse } from "ai";
import { createUrlSurfer } from "@/app/tools/surfer";
import { createSerpApi } from "@/app/tools/serp-api";
const [, urlSurferSchema] = createUrlSurfer();
const [, serpApiSchema] = createSerpApi({ apiKey: process.env.SERP_API_KEY || '' });
const config = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(config);
const functions: any[] = [
urlSurferSchema,
serpApiSchema
];
export async function POST(req: Request) {
const { messages, function_call } = await req.json()
const response = await openai.createChatCompletion({
model: 'gpt-4-1106-preview',
stream: true,
messages,
functions,
function_call
})
const stream = OpenAIStream(response)
return new StreamingTextResponse(stream)
}