1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
|
import type OpenAI from "openai"
import {
createOpenAIMiddleware,
type OpenAIMiddlewareOptions,
} from "./middleware"
/**
* Wraps an OpenAI client with SuperMemory middleware to automatically inject relevant memories
* into both Chat Completions and Responses APIs based on the user's input content.
*
* For Chat Completions API: Searches for memories using the user message content and injects
* them into the system prompt (appends to existing or creates new system prompt).
*
* For Responses API: Searches for memories using the input parameter and injects them into
* the instructions parameter (appends to existing or creates new instructions).
*
* @param openaiClient - The OpenAI client to wrap with SuperMemory middleware
* @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID)
* @param options - Optional configuration options for the middleware
* @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation
* @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false)
* @param options.mode - Optional mode for memory search: "profile" (default), "query", or "full"
* @param options.addMemory - Optional mode for memory addition: "always", "never" (default)
*
* @returns An OpenAI client with SuperMemory middleware injected for both Chat Completions and Responses APIs
*
* @example
* ```typescript
* import { withSupermemory } from "@supermemory/tools/openai"
* import OpenAI from "openai"
*
* // Create OpenAI client with supermemory middleware
* const openai = new OpenAI({
* apiKey: process.env.OPENAI_API_KEY,
* })
* const openaiWithSupermemory = withSupermemory(openai, "user-123", {
* conversationId: "conversation-456",
* mode: "full",
* addMemory: "always"
* })
*
* // Use with Chat Completions API - memories injected into system prompt
* const chatResponse = await openaiWithSupermemory.chat.completions.create({
* model: "gpt-4",
* messages: [
* { role: "user", content: "What's my favorite programming language?" }
* ]
* })
*
* // Use with Responses API - memories injected into instructions
* const response = await openaiWithSupermemory.responses.create({
* model: "gpt-4o",
* instructions: "You are a helpful coding assistant",
* input: "What's my favorite programming language?"
* })
* ```
*
* @throws {Error} When SUPERMEMORY_API_KEY environment variable is not set
* @throws {Error} When supermemory API request fails
*/
export function withSupermemory(
openaiClient: OpenAI,
containerTag: string,
options?: OpenAIMiddlewareOptions,
) {
if (!process.env.SUPERMEMORY_API_KEY) {
throw new Error("SUPERMEMORY_API_KEY is not set")
}
const conversationId = options?.conversationId
const verbose = options?.verbose ?? false
const mode = options?.mode ?? "profile"
const addMemory = options?.addMemory ?? "never"
const baseUrl = options?.baseUrl
const openaiWithSupermemory = createOpenAIMiddleware(
openaiClient,
containerTag,
{
conversationId,
verbose,
mode,
addMemory,
baseUrl,
},
)
return openaiWithSupermemory
}
export type { OpenAIMiddlewareOptions }
export type { MemorySearchResult, MemoryAddResult } from "./tools"
export {
createSearchMemoriesFunction,
createAddMemoryFunction,
supermemoryTools,
getToolDefinitions,
createToolCallExecutor,
createToolCallsExecutor,
createSearchMemoriesTool,
createAddMemoryTool,
memoryToolSchemas,
} from "./tools"
|