blob: dc7baf81dcbda021bcf027773ff4f99af6ab3566 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
|
import { streamText, type ModelMessage } from "ai"
import { openai } from "@ai-sdk/openai"
import { withSupermemory } from "../../../../../src/vercel"
const model = withSupermemory(openai("gpt-4"), "user-123", {
mode: "full",
addMemory: "always",
conversationId: "chat-session",
verbose: true,
})
export async function POST(req: Request) {
const { messages }: { messages: ModelMessage[] } = await req.json()
// Commented out generateText implementation
// const { response } = await generateText({
// model,
// system: "You are a helpful assistant.",
// messages,
// })
// return Response.json({ messages: response.messages })
// New streaming implementation
const result = await streamText({
model,
system: "You are a helpful assistant.",
messages,
})
return result.toUIMessageStreamResponse()
}
|