aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaheshtheDev <[email protected]>2025-10-27 20:08:11 +0000
committerMaheshtheDev <[email protected]>2025-10-27 20:08:11 +0000
commitb3aab91489af0adb2e16215a02c20bc83259b42f (patch)
treeb36622bb273c09b9e6dd4542388630453bf2463e
parentfeat: more clarity on the connectors sync (#523) (diff)
downloadsupermemory-b3aab91489af0adb2e16215a02c20bc83259b42f.tar.xz
supermemory-b3aab91489af0adb2e16215a02c20bc83259b42f.zip
feat: withSupermemory for openai sdk (#531)
### TL;DR Added OpenAI SDK middleware support for SuperMemory integration, allowing direct memory injection without AI SDK dependency. ### What changed? - Added `withSupermemory` middleware for OpenAI SDK that automatically injects relevant memories into chat completions - Implemented memory search and injection functionality for OpenAI clients - Restructured the OpenAI module to separate tools and middleware functionality - Updated README with comprehensive documentation and examples for the new OpenAI middleware - Added test implementation with a Next.js API route example - Reorganized package exports to support the new structure
-rw-r--r--packages/tools/README.md103
-rw-r--r--packages/tools/package.json4
-rw-r--r--packages/tools/src/index.ts3
-rw-r--r--packages/tools/src/openai/index.ts92
-rw-r--r--packages/tools/src/openai/middleware.ts393
-rw-r--r--packages/tools/src/openai/tools.ts (renamed from packages/tools/src/openai.ts)4
-rw-r--r--packages/tools/test/chatapp/app/api/openai-chat/route.ts31
-rw-r--r--packages/tools/test/chatapp/app/openai-chat/[chatId]/page.tsx109
-rw-r--r--packages/tools/test/chatapp/app/openai-chat/new/page.tsx10
-rw-r--r--packages/tools/test/chatapp/package.json1
-rw-r--r--packages/tools/tsdown.config.ts2
11 files changed, 744 insertions, 8 deletions
diff --git a/packages/tools/README.md b/packages/tools/README.md
index aae102f3..6db2f7f8 100644
--- a/packages/tools/README.md
+++ b/packages/tools/README.md
@@ -19,8 +19,8 @@ yarn add @supermemory/tools
## Usage
The package provides two submodule imports:
-- `@supermemory/tools/ai-sdk` - For use with the AI SDK framework
-- `@supermemory/tools/openai` - For use with OpenAI's function calling
+- `@supermemory/tools/ai-sdk` - For use with the AI SDK framework (includes `withSupermemory` middleware)
+- `@supermemory/tools/openai` - For use with OpenAI SDK (includes `withSupermemory` middleware and function calling tools)
### AI SDK Usage
@@ -223,6 +223,105 @@ const modelWithOptions = withSupermemory(openai("gpt-4"), "user-123", {
})
```
+### OpenAI SDK Usage
+
+#### OpenAI Middleware with Supermemory
+
+The `withSupermemory` function creates an OpenAI client with SuperMemory middleware automatically injected:
+
+```typescript
+import { withSupermemory } from "@supermemory/tools/openai"
+
+// Create OpenAI client with supermemory middleware
+const openaiWithSupermemory = withSupermemory("user-123", {
+ conversationId: "conversation-456",
+ mode: "full",
+ addMemory: "always",
+ verbose: true,
+})
+
+// Use directly with chat completions - memories are automatically injected
+const completion = await openaiWithSupermemory.chat.completions.create({
+ model: "gpt-4o-mini",
+ messages: [
+ { role: "user", content: "What do you remember about my preferences?" }
+ ],
+})
+
+console.log(completion.choices[0]?.message?.content)
+```
+
+#### OpenAI Middleware Options
+
+The middleware supports the same configuration options as the AI SDK version:
+
+```typescript
+const openaiWithSupermemory = withSupermemory("user-123", {
+ conversationId: "conversation-456", // Group messages for contextual memory
+ mode: "full", // "profile" | "query" | "full"
+ addMemory: "always", // "always" | "never"
+ verbose: true, // Enable detailed logging
+})
+```
+
+#### Advanced Usage with Custom OpenAI Options
+
+You can also pass custom OpenAI client options:
+
+```typescript
+import { withSupermemory } from "@supermemory/tools/openai"
+
+const openaiWithSupermemory = withSupermemory(
+ "user-123",
+ {
+ mode: "profile",
+ addMemory: "always",
+ },
+ {
+ baseURL: "https://api.openai.com/v1",
+ organization: "org-123",
+ },
+ "custom-api-key" // Optional: custom API key
+)
+
+const completion = await openaiWithSupermemory.chat.completions.create({
+ model: "gpt-4o-mini",
+ messages: [{ role: "user", content: "Tell me about my preferences" }],
+})
+```
+
+#### Next.js API Route Example
+
+Here's a complete example for a Next.js API route:
+
+```typescript
+// app/api/chat/route.ts
+import { withSupermemory } from "@supermemory/tools/openai"
+import type { OpenAI as OpenAIType } from "openai"
+
+export async function POST(req: Request) {
+ const { messages, conversationId } = (await req.json()) as {
+ messages: OpenAIType.Chat.Completions.ChatCompletionMessageParam[]
+ conversationId: string
+ }
+
+ const openaiWithSupermemory = withSupermemory("user-123", {
+ conversationId,
+ mode: "full",
+ addMemory: "always",
+ verbose: true,
+ })
+
+ const completion = await openaiWithSupermemory.chat.completions.create({
+ model: "gpt-4o-mini",
+ messages,
+ })
+
+ const message = completion.choices?.[0]?.message
+ return Response.json({ message, usage: completion.usage })
+}
+```
+
### OpenAI Function Calling Usage
```typescript
diff --git a/packages/tools/package.json b/packages/tools/package.json
index 2a4f0a0a..371f865b 100644
--- a/packages/tools/package.json
+++ b/packages/tools/package.json
@@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
- "version": "1.2.16",
+ "version": "1.2.5",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
@@ -38,7 +38,7 @@
".": "./dist/index.js",
"./ai-sdk": "./dist/ai-sdk.js",
"./claude-memory": "./dist/claude-memory.js",
- "./openai": "./dist/openai.js",
+ "./openai": "./dist/openai/index.js",
"./package.json": "./package.json"
},
"repository": {
diff --git a/packages/tools/src/index.ts b/packages/tools/src/index.ts
index 4f21246e..404e0943 100644
--- a/packages/tools/src/index.ts
+++ b/packages/tools/src/index.ts
@@ -1,2 +1,3 @@
-// Export shared types and utilities
export type { SupermemoryToolsConfig } from "./types"
+
+export type { OpenAIMiddlewareOptions } from "./openai"
diff --git a/packages/tools/src/openai/index.ts b/packages/tools/src/openai/index.ts
new file mode 100644
index 00000000..517fe282
--- /dev/null
+++ b/packages/tools/src/openai/index.ts
@@ -0,0 +1,92 @@
+import type OpenAI from "openai"
+import {
+ createOpenAIMiddleware,
+ type OpenAIMiddlewareOptions,
+} from "./middleware"
+
+/**
+ * Wraps an OpenAI client with SuperMemory middleware to automatically inject relevant memories
+ * into the system prompt based on the user's message content.
+ *
+ * This middleware searches the supermemory API for relevant memories using the container tag
+ * and user message, then either appends memories to an existing system prompt or creates
+ * a new system prompt with the memories.
+ *
+ * @param openaiClient - The OpenAI client to wrap with SuperMemory middleware
+ * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID)
+ * @param options - Optional configuration options for the middleware
+ * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation
+ * @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false)
+ * @param options.mode - Optional mode for memory search: "profile" (default), "query", or "full"
+ * @param options.addMemory - Optional mode for memory addition: "always" (default), "never"
+ *
+ * @returns An OpenAI client with SuperMemory middleware injected
+ *
+ * @example
+ * ```typescript
+ * import { withSupermemory } from "@supermemory/tools/openai"
+ * import OpenAI from "openai"
+ *
+ * // Create OpenAI client with supermemory middleware
+ * const openai = new OpenAI({
+ * apiKey: process.env.OPENAI_API_KEY,
+ * })
+ * const openaiWithSupermemory = withSupermemory(openai, "user-123", {
+ * conversationId: "conversation-456",
+ * mode: "full",
+ * addMemory: "always"
+ * })
+ *
+ * // Use normally - memories will be automatically injected
+ * const response = await openaiWithSupermemory.chat.completions.create({
+ * model: "gpt-4",
+ * messages: [
+ * { role: "user", content: "What's my favorite programming language?" }
+ * ]
+ * })
+ * ```
+ *
+ * @throws {Error} When SUPERMEMORY_API_KEY environment variable is not set
+ * @throws {Error} When supermemory API request fails
+ */
+export function withSupermemory(
+ openaiClient: OpenAI,
+ containerTag: string,
+ options?: OpenAIMiddlewareOptions,
+) {
+ if (!process.env.SUPERMEMORY_API_KEY) {
+ throw new Error("SUPERMEMORY_API_KEY is not set")
+ }
+
+ const conversationId = options?.conversationId
+ const verbose = options?.verbose ?? false
+ const mode = options?.mode ?? "profile"
+ const addMemory = options?.addMemory ?? "never"
+
+ const openaiWithSupermemory = createOpenAIMiddleware(
+ openaiClient,
+ containerTag,
+ {
+ conversationId,
+ verbose,
+ mode,
+ addMemory,
+ },
+ )
+
+ return openaiWithSupermemory
+}
+
+export type { OpenAIMiddlewareOptions }
+export type { MemorySearchResult, MemoryAddResult } from "./tools"
+export {
+ createSearchMemoriesFunction,
+ createAddMemoryFunction,
+ supermemoryTools,
+ getToolDefinitions,
+ createToolCallExecutor,
+ createToolCallsExecutor,
+ createSearchMemoriesTool,
+ createAddMemoryTool,
+ memoryToolSchemas,
+} from "./tools"
diff --git a/packages/tools/src/openai/middleware.ts b/packages/tools/src/openai/middleware.ts
new file mode 100644
index 00000000..8d83874d
--- /dev/null
+++ b/packages/tools/src/openai/middleware.ts
@@ -0,0 +1,393 @@
+import type OpenAI from "openai"
+import Supermemory from "supermemory"
+import { createLogger, type Logger } from "../vercel/logger"
+import { convertProfileToMarkdown } from "../vercel/util"
+
+export interface OpenAIMiddlewareOptions {
+ conversationId?: string
+ verbose?: boolean
+ mode?: "profile" | "query" | "full"
+ addMemory?: "always" | "never"
+}
+
+interface SupermemoryProfileSearch {
+ profile: {
+ static?: Array<{ memory: string; metadata?: Record<string, unknown> }>
+ dynamic?: Array<{ memory: string; metadata?: Record<string, unknown> }>
+ }
+ searchResults: {
+ results: Array<{ memory: string; metadata?: Record<string, unknown> }>
+ }
+}
+
+/**
+ * Extracts the last user message from an array of chat completion messages.
+ *
+ * Searches through the messages array in reverse order to find the most recent
+ * message with role "user" and returns its content as a string.
+ *
+ * @param messages - Array of chat completion message parameters
+ * @returns The content of the last user message, or empty string if none found
+ *
+ * @example
+ * ```typescript
+ * const messages = [
+ * { role: "system", content: "You are a helpful assistant." },
+ * { role: "user", content: "Hello there!" },
+ * { role: "assistant", content: "Hi! How can I help you?" },
+ * { role: "user", content: "What's the weather like?" }
+ * ]
+ *
+ * const lastMessage = getLastUserMessage(messages)
+ * // Returns: "What's the weather like?"
+ * ```
+ */
+const getLastUserMessage = (
+ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[],
+) => {
+ const lastUserMessage = messages
+ .slice()
+ .reverse()
+ .find((msg) => msg.role === "user")
+
+ return typeof lastUserMessage?.content === "string"
+ ? lastUserMessage.content
+ : ""
+}
+
+/**
+ * Searches for memories using the SuperMemory profile API.
+ *
+ * Makes a POST request to the SuperMemory API to retrieve user profile memories
+ * and search results based on the provided container tag and optional query text.
+ *
+ * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID)
+ * @param queryText - Optional query text to search for specific memories. If empty, returns all profile memories
+ * @returns Promise that resolves to the SuperMemory profile search response
+ * @throws {Error} When the API request fails or returns an error status
+ *
+ * @example
+ * ```typescript
+ * // Search with query
+ * const results = await supermemoryProfileSearch("user-123", "favorite programming language")
+ *
+ * // Get all profile memories
+ * const profile = await supermemoryProfileSearch("user-123", "")
+ * ```
+ */
+const supermemoryProfileSearch = async (
+ containerTag: string,
+ queryText: string,
+): Promise<SupermemoryProfileSearch> => {
+ const payload = queryText
+ ? JSON.stringify({
+ q: queryText,
+ containerTag: containerTag,
+ })
+ : JSON.stringify({
+ containerTag: containerTag,
+ })
+
+ try {
+ const response = await fetch("https://api.supermemory.ai/v4/profile", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${process.env.SUPERMEMORY_API_KEY}`,
+ },
+ body: payload,
+ })
+
+ if (!response.ok) {
+ const errorText = await response.text().catch(() => "Unknown error")
+ throw new Error(
+ `Supermemory profile search failed: ${response.status} ${response.statusText}. ${errorText}`,
+ )
+ }
+
+ return await response.json()
+ } catch (error) {
+ if (error instanceof Error) {
+ throw error
+ }
+ throw new Error(`Supermemory API request failed: ${error}`)
+ }
+}
+
+/**
+ * Adds memory-enhanced system prompts to chat completion messages.
+ *
+ * Searches for relevant memories based on the specified mode and injects them
+ * into the conversation. If a system prompt already exists, memories are appended
+ * to it. Otherwise, a new system prompt is created with the memories.
+ *
+ * @param messages - Array of chat completion message parameters
+ * @param containerTag - The container tag/identifier for memory search
+ * @param logger - Logger instance for debugging and info output
+ * @param mode - Memory search mode: "profile" (all memories), "query" (search-based), or "full" (both)
+ * @returns Promise that resolves to enhanced messages with memory-injected system prompt
+ *
+ * @example
+ * ```typescript
+ * const messages = [
+ * { role: "user", content: "What's my favorite programming language?" }
+ * ]
+ *
+ * const enhancedMessages = await addSystemPrompt(
+ * messages,
+ * "user-123",
+ * logger,
+ * "full"
+ * )
+ * // Returns messages with system prompt containing relevant memories
+ * ```
+ */
+const addSystemPrompt = async (
+ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[],
+ containerTag: string,
+ logger: Logger,
+ mode: "profile" | "query" | "full",
+) => {
+ const systemPromptExists = messages.some((msg) => msg.role === "system")
+
+ const queryText = mode !== "profile" ? getLastUserMessage(messages) : ""
+
+ const memoriesResponse = await supermemoryProfileSearch(
+ containerTag,
+ queryText,
+ )
+
+ const memoryCountStatic = memoriesResponse.profile.static?.length || 0
+ const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0
+
+ logger.info("Memory search completed", {
+ containerTag,
+ memoryCountStatic,
+ memoryCountDynamic,
+ queryText:
+ queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""),
+ mode,
+ })
+
+ const profileData =
+ mode !== "query"
+ ? convertProfileToMarkdown({
+ profile: {
+ static: memoriesResponse.profile.static?.map((item) => item.memory),
+ dynamic: memoriesResponse.profile.dynamic?.map(
+ (item) => item.memory,
+ ),
+ },
+ searchResults: {
+ results: memoriesResponse.searchResults.results.map((item) => ({
+ memory: item.memory,
+ })) as [{ memory: string }],
+ },
+ })
+ : ""
+ const searchResultsMemories =
+ mode !== "profile"
+ ? `Search results for user's recent message: \n${memoriesResponse.searchResults.results
+ .map((result) => `- ${result.memory}`)
+ .join("\n")}`
+ : ""
+
+ const memories = `${profileData}\n${searchResultsMemories}`.trim()
+
+ if (memories) {
+ logger.debug("Memory content preview", {
+ content: memories,
+ fullLength: memories.length,
+ })
+ }
+
+ if (systemPromptExists) {
+ logger.debug("Added memories to existing system prompt")
+ return messages.map((msg) =>
+ msg.role === "system"
+ ? { ...msg, content: `${msg.content} \n ${memories}` }
+ : msg,
+ )
+ }
+
+ logger.debug(
+ "System prompt does not exist, created system prompt with memories",
+ )
+ return [{ role: "system" as const, content: memories }, ...messages]
+}
+
+/**
+ * Converts an array of chat completion messages into a formatted conversation string.
+ *
+ * Transforms the messages array into a readable conversation format where each
+ * message is prefixed with its role (User/Assistant) and messages are separated
+ * by double newlines.
+ *
+ * @param messages - Array of chat completion message parameters
+ * @returns Formatted conversation string with role prefixes
+ *
+ * @example
+ * ```typescript
+ * const messages = [
+ * { role: "user", content: "Hello!" },
+ * { role: "assistant", content: "Hi there!" },
+ * { role: "user", content: "How are you?" }
+ * ]
+ *
+ * const conversation = getConversationContent(messages)
+ * // Returns: "User: Hello!\n\nAssistant: Hi there!\n\nUser: How are you?"
+ * ```
+ */
+const getConversationContent = (
+ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[],
+) => {
+ return messages
+ .map((msg) => {
+ const role = msg.role === "user" ? "User" : "Assistant"
+ const content = typeof msg.content === "string" ? msg.content : ""
+ return `${role}: ${content}`
+ })
+ .join("\n\n")
+}
+
+/**
+ * Adds a new memory to the SuperMemory system.
+ *
+ * Saves the provided content as a memory with the specified container tag and
+ * optional custom ID. Logs success or failure information for debugging.
+ *
+ * @param client - SuperMemory client instance
+ * @param containerTag - The container tag/identifier for the memory
+ * @param content - The content to save as a memory
+ * @param customId - Optional custom ID for the memory (e.g., conversation ID)
+ * @param logger - Logger instance for debugging and info output
+ * @returns Promise that resolves when memory is saved (or fails silently)
+ *
+ * @example
+ * ```typescript
+ * await addMemoryTool(
+ * supermemoryClient,
+ * "user-123",
+ * "User prefers React with TypeScript",
+ * "conversation-456",
+ * logger
+ * )
+ * ```
+ */
+const addMemoryTool = async (
+ client: Supermemory,
+ containerTag: string,
+ content: string,
+ customId: string | undefined,
+ logger: Logger,
+): Promise<void> => {
+ try {
+ const response = await client.memories.add({
+ content,
+ containerTags: [containerTag],
+ customId,
+ })
+
+ logger.info("Memory saved successfully", {
+ containerTag,
+ customId,
+ contentLength: content.length,
+ memoryId: response.id,
+ })
+ } catch (error) {
+ logger.error("Error saving memory", {
+ error: error instanceof Error ? error.message : "Unknown error",
+ })
+ }
+}
+
+/**
+ * Creates SuperMemory middleware for OpenAI clients.
+ *
+ * This function creates middleware that automatically injects relevant memories
+ * into OpenAI chat completions and optionally saves new memories. The middleware
+ * can wrap existing OpenAI clients or create new ones with SuperMemory capabilities.
+ *
+ * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID)
+ * @param options - Optional configuration options for the middleware
+ * @param options.conversationId - Optional conversation ID to group messages for contextual memory generation
+ * @param options.verbose - Enable detailed logging of memory operations (default: false)
+ * @param options.mode - Memory search mode: "profile" (all memories), "query" (search-based), or "full" (both) (default: "profile")
+ * @param options.addMemory - Automatic memory storage mode: "always" or "never" (default: "never")
+ * @returns Object with `wrapClient` and `createClient` methods
+ * @throws {Error} When SUPERMEMORY_API_KEY environment variable is not set
+ *
+ * @example
+ * ```typescript
+ * const openaiWithSupermemory = createOpenAIMiddleware(openai, "user-123", {
+ * conversationId: "conversation-456",
+ * mode: "full",
+ * addMemory: "always",
+ * verbose: true
+ * })
+ *
+ * ```
+ */
+export function createOpenAIMiddleware(
+ openaiClient: OpenAI,
+ containerTag: string,
+ options?: OpenAIMiddlewareOptions,
+) {
+ const logger = createLogger(options?.verbose ?? false)
+ const client = new Supermemory({
+ apiKey: process.env.SUPERMEMORY_API_KEY,
+ })
+
+ const originalCreate = openaiClient.chat.completions.create
+
+ const createWithMemory = async (
+ params: OpenAI.Chat.Completions.ChatCompletionCreateParams,
+ ) => {
+ const messages = Array.isArray(params.messages) ? params.messages : []
+
+ if (addMemory === "always") {
+ const userMessage = getLastUserMessage(messages)
+ if (userMessage?.trim()) {
+ const content = conversationId
+ ? getConversationContent(messages)
+ : userMessage
+ const customId = conversationId
+ ? `conversation:${conversationId}`
+ : undefined
+
+ addMemoryTool(client, containerTag, content, customId, logger)
+ }
+ }
+
+ if (mode !== "profile") {
+ const userMessage = getLastUserMessage(messages)
+ if (!userMessage) {
+ logger.debug("No user message found, skipping memory search")
+ return originalCreate.call(openaiClient.chat.completions, params)
+ }
+ }
+
+ logger.info("Starting memory search", {
+ containerTag,
+ conversationId,
+ mode,
+ })
+
+ const enhancedMessages = await addSystemPrompt(
+ messages,
+ containerTag,
+ logger,
+ mode,
+ )
+
+ return originalCreate.call(openaiClient.chat.completions, {
+ ...params,
+ messages: enhancedMessages,
+ })
+ }
+
+ openaiClient.chat.completions.create =
+ createWithMemory as typeof originalCreate
+
+ return openaiClient
+}
diff --git a/packages/tools/src/openai.ts b/packages/tools/src/openai/tools.ts
index 5c79a9c1..4078df09 100644
--- a/packages/tools/src/openai.ts
+++ b/packages/tools/src/openai/tools.ts
@@ -5,8 +5,8 @@ import {
PARAMETER_DESCRIPTIONS,
TOOL_DESCRIPTIONS,
getContainerTags,
-} from "./shared"
-import type { SupermemoryToolsConfig } from "./types"
+} from "../shared"
+import type { SupermemoryToolsConfig } from "../types"
/**
* Result types for memory operations
diff --git a/packages/tools/test/chatapp/app/api/openai-chat/route.ts b/packages/tools/test/chatapp/app/api/openai-chat/route.ts
new file mode 100644
index 00000000..1787cc58
--- /dev/null
+++ b/packages/tools/test/chatapp/app/api/openai-chat/route.ts
@@ -0,0 +1,31 @@
+import { OpenAI } from "openai"
+//import { withSupermemory } from "@supermemory/tools/openai"
+import { withSupermemory } from "../../../../../src/openai"
+
+export const runtime = "nodejs"
+
+export async function POST(req: Request) {
+ const { messages, conversationId } = (await req.json()) as {
+ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[]
+ conversationId: string
+ }
+
+ const openai = new OpenAI({
+ apiKey: process.env.OPENAI_API_KEY,
+ })
+
+ const openaiWithSupermemory = withSupermemory(openai, "user-123", {
+ conversationId,
+ mode: "full",
+ addMemory: "always",
+ verbose: true,
+ })
+
+ const completion = await openaiWithSupermemory.chat.completions.create({
+ model: "gpt-4o-mini",
+ messages,
+ })
+
+ const message = completion.choices?.[0]?.message
+ return Response.json({ message, usage: completion.usage })
+}
diff --git a/packages/tools/test/chatapp/app/openai-chat/[chatId]/page.tsx b/packages/tools/test/chatapp/app/openai-chat/[chatId]/page.tsx
new file mode 100644
index 00000000..4ed094cf
--- /dev/null
+++ b/packages/tools/test/chatapp/app/openai-chat/[chatId]/page.tsx
@@ -0,0 +1,109 @@
+"use client"
+
+import { useParams, useRouter } from "next/navigation"
+import { useState } from "react"
+import type { OpenAI as OpenAIType } from "openai"
+
+type ChatMessage = OpenAIType.Chat.Completions.ChatCompletionMessageParam
+
+export default function ChatPage() {
+ const { chatId } = useParams() as { chatId: string }
+ const router = useRouter()
+ const [input, setInput] = useState("")
+ const [messages, setMessages] = useState<ChatMessage[]>([
+ { role: "system", content: "You are a helpful assistant." },
+ ])
+ const [isLoading, setIsLoading] = useState(false)
+
+ async function send() {
+ if (!input.trim() || isLoading) return
+
+ const userMessage: ChatMessage = { role: "user", content: input }
+ setMessages((prev) => [...prev, userMessage])
+ setInput("")
+ setIsLoading(true)
+
+ try {
+ const res = await fetch("/api/openai-chat", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ messages: [...messages, userMessage],
+ conversationId: chatId,
+ }),
+ })
+ const data = await res.json()
+ const assistant = data.message as ChatMessage | undefined
+ if (assistant) {
+ setMessages((prev) => [...prev, assistant])
+ }
+ } catch (error) {
+ console.error("Error sending message:", error)
+ } finally {
+ setIsLoading(false)
+ }
+ }
+
+ return (
+ <div className="min-h-screen bg-background">
+ <div className="max-w-4xl mx-auto p-4 h-screen flex flex-col">
+ <div className="mb-6 flex items-center justify-between">
+ <h1 className="text-2xl font-bold text-foreground">
+ OpenAI + SuperMemory
+ </h1>
+ <button
+ type="button"
+ onClick={() => router.push("/openai-chat/new")}
+ className="text-sm underline"
+ >
+ New Chat
+ </button>
+ </div>
+
+ <div className="flex-1 overflow-y-auto space-y-4 mb-4">
+ {messages.map((message, index) => (
+ <div
+ key={`${message.role}-${index}`}
+ className={message.role === "user" ? "text-right" : "text-left"}
+ >
+ <span className="inline-block rounded px-3 py-2 bg-gray-100 dark:bg-gray-800">
+ {typeof message.content === "string" ? message.content : ""}
+ </span>
+ </div>
+ ))}
+ {isLoading && (
+ <div className="text-left">
+ <span className="inline-block rounded px-3 py-2 bg-gray-100 dark:bg-gray-800">
+ Thinking...
+ </span>
+ </div>
+ )}
+ </div>
+
+ <div className="flex gap-2">
+ <input
+ className="flex-1 border px-3 py-2"
+ placeholder="Type a message"
+ value={input}
+ onChange={(e) => setInput(e.target.value)}
+ onKeyDown={(e) => {
+ if (e.key === "Enter" && !e.shiftKey) {
+ e.preventDefault()
+ send()
+ }
+ }}
+ disabled={isLoading}
+ />
+ <button
+ type="button"
+ className="border px-3 py-2"
+ onClick={send}
+ disabled={!input.trim() || isLoading}
+ >
+ {isLoading ? "Sending..." : "Send"}
+ </button>
+ </div>
+ </div>
+ </div>
+ )
+}
diff --git a/packages/tools/test/chatapp/app/openai-chat/new/page.tsx b/packages/tools/test/chatapp/app/openai-chat/new/page.tsx
new file mode 100644
index 00000000..fb168085
--- /dev/null
+++ b/packages/tools/test/chatapp/app/openai-chat/new/page.tsx
@@ -0,0 +1,10 @@
+import { redirect } from "next/navigation"
+
+function generateId() {
+ return crypto.randomUUID?.() || Math.random().toString(36).slice(2)
+}
+
+export default function NewChatPage() {
+ const chatId = generateId()
+ redirect(`/openai-chat/${chatId}`)
+} \ No newline at end of file
diff --git a/packages/tools/test/chatapp/package.json b/packages/tools/test/chatapp/package.json
index 746ca050..2649035d 100644
--- a/packages/tools/test/chatapp/package.json
+++ b/packages/tools/test/chatapp/package.json
@@ -14,6 +14,7 @@
"next": "16.0.0",
"ai": "^4.0.0",
"@ai-sdk/openai": "^1.0.0",
+ "openai": "^4.104.0",
"supermemory": "^1.0.0",
"@supermemory/tools": "workspace:*"
},
diff --git a/packages/tools/tsdown.config.ts b/packages/tools/tsdown.config.ts
index cebdda44..48e11ae1 100644
--- a/packages/tools/tsdown.config.ts
+++ b/packages/tools/tsdown.config.ts
@@ -4,8 +4,8 @@ export default defineConfig({
entry: [
"src/index.ts",
"src/ai-sdk.ts",
- "src/openai.ts",
"src/claude-memory.ts",
+ "src/openai/index.ts",
],
format: "esm",
sourcemap: false,