aboutsummaryrefslogtreecommitdiff
path: root/apps/docs
diff options
context:
space:
mode:
authorDhravya Shah <[email protected]>2025-10-03 02:41:49 -0700
committerDhravya Shah <[email protected]>2025-10-03 02:41:49 -0700
commit4d6fd37c99fd6af46d2f1aedbeb750e0029b3b8b (patch)
tree869d415d87b152bfb5e55601311a2c8f6e64b6fc /apps/docs
parentchore: fix docs again (diff)
downloadsupermemory-4d6fd37c99fd6af46d2f1aedbeb750e0029b3b8b.tar.xz
supermemory-4d6fd37c99fd6af46d2f1aedbeb750e0029b3b8b.zip
fix: model names
Diffstat (limited to 'apps/docs')
-rw-r--r--apps/docs/ai-sdk/examples.mdx8
-rw-r--r--apps/docs/ai-sdk/infinite-chat.mdx4
-rw-r--r--apps/docs/ai-sdk/memory-tools.mdx8
-rw-r--r--apps/docs/cookbook/ai-sdk-integration.mdx8
-rw-r--r--apps/docs/cookbook/customer-support.mdx64
-rw-r--r--apps/docs/cookbook/document-qa.mdx42
-rw-r--r--apps/docs/cookbook/personal-assistant.mdx4
-rw-r--r--apps/docs/memory-api/sdks/openai-plugins.mdx16
-rw-r--r--apps/docs/memory-router/usage.mdx12
-rw-r--r--apps/docs/memory-router/with-memory-api.mdx4
-rw-r--r--apps/docs/migration/from-mem0.mdx35
-rw-r--r--apps/docs/model-enhancement/context-extender.mdx2
-rw-r--r--apps/docs/model-enhancement/getting-started.mdx4
-rw-r--r--apps/docs/model-enhancement/identifying-users.mdx16
-rw-r--r--apps/docs/quickstart.mdx6
15 files changed, 116 insertions, 117 deletions
diff --git a/apps/docs/ai-sdk/examples.mdx b/apps/docs/ai-sdk/examples.mdx
index cb2b1a7f..61ad8d50 100644
--- a/apps/docs/ai-sdk/examples.mdx
+++ b/apps/docs/ai-sdk/examples.mdx
@@ -98,7 +98,7 @@ export async function POST(request: Request) {
const { messages, customerId } = await request.json()
const result = await streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-5'),
messages,
tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
containerTags: [customerId]
@@ -136,7 +136,7 @@ export async function POST(request: Request) {
const { messages } = await request.json()
const result = await streamText({
- model: supermemoryInfiniteChat('gpt-4-turbo'),
+ model: supermemoryInfiniteChat('gpt-5'),
messages,
system: `You are a documentation assistant. You have access to all previous
conversations and can reference earlier discussions. Help users understand
@@ -222,7 +222,7 @@ export async function POST(request: Request) {
const { messages, projectId } = await request.json()
const result = await streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-5'),
messages,
tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
containerTags: [projectId]
@@ -357,7 +357,7 @@ export async function POST(request: Request) {
const { messages } = await request.json()
const result = await streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-5'),
messages,
tools: {
// Spread Supermemory tools
diff --git a/apps/docs/ai-sdk/infinite-chat.mdx b/apps/docs/ai-sdk/infinite-chat.mdx
index b287eb5b..c382bcbf 100644
--- a/apps/docs/ai-sdk/infinite-chat.mdx
+++ b/apps/docs/ai-sdk/infinite-chat.mdx
@@ -45,7 +45,7 @@ const infiniteChat = createOpenAI({
})
const result = await streamText({
- model: infiniteChat("gpt-4-turbo"),
+ model: infiniteChat("gpt-5"),
messages: [...]
})
```
@@ -128,7 +128,7 @@ const infiniteChat = createOpenAI({
})
const result = await streamText({
- model: infiniteChat("gpt-4-turbo"),
+ model: infiniteChat("gpt-5"),
messages: [
{ role: "user", content: "What did we discuss yesterday?" }
]
diff --git a/apps/docs/ai-sdk/memory-tools.mdx b/apps/docs/ai-sdk/memory-tools.mdx
index 69b961e1..cc84097f 100644
--- a/apps/docs/ai-sdk/memory-tools.mdx
+++ b/apps/docs/ai-sdk/memory-tools.mdx
@@ -18,7 +18,7 @@ const openai = createOpenAI({
})
const result = await streamText({
- model: openai("gpt-4-turbo"),
+ model: openai("gpt-5"),
prompt: "Remember that my name is Alice",
tools: supermemoryTools("YOUR_SUPERMEMORY_KEY")
})
@@ -32,7 +32,7 @@ Semantic search through user memories:
```typescript
const result = await streamText({
- model: openai("gpt-4"),
+ model: openai("gpt-5"),
prompt: "What are my dietary preferences?",
tools: supermemoryTools("API_KEY")
})
@@ -64,7 +64,7 @@ Retrieve specific memory by ID:
```typescript
const result = await streamText({
- model: openai("gpt-4"),
+ model: openai("gpt-5"),
prompt: "Get the details of memory abc123",
tools: supermemoryTools("API_KEY")
})
@@ -87,7 +87,7 @@ import {
// Use only search tool
const result = await streamText({
- model: openai("gpt-4"),
+ model: openai("gpt-5"),
prompt: "What do you know about me?",
tools: {
searchMemories: searchMemoriesTool("API_KEY", {
diff --git a/apps/docs/cookbook/ai-sdk-integration.mdx b/apps/docs/cookbook/ai-sdk-integration.mdx
index 9bde2f42..8bdafef7 100644
--- a/apps/docs/cookbook/ai-sdk-integration.mdx
+++ b/apps/docs/cookbook/ai-sdk-integration.mdx
@@ -97,7 +97,7 @@ export async function POST(request: Request) {
const { messages, customerId } = await request.json()
const result = await streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-5'),
messages,
tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
containerTags: [customerId]
@@ -135,7 +135,7 @@ export async function POST(request: Request) {
const { messages } = await request.json()
const result = await streamText({
- model: infiniteChat('gpt-4-turbo'),
+ model: infiniteChat('gpt-5'),
messages,
system: `You are a documentation assistant. You have access to all previous
conversations and can reference earlier discussions. Help users understand
@@ -221,7 +221,7 @@ export async function POST(request: Request) {
const { messages, projectId } = await request.json()
const result = await streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-5'),
messages,
tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
containerTags: [projectId]
@@ -354,7 +354,7 @@ export async function POST(request: Request) {
const { messages } = await request.json()
const result = await streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-5'),
messages,
tools: {
// Spread Supermemory tools
diff --git a/apps/docs/cookbook/customer-support.mdx b/apps/docs/cookbook/customer-support.mdx
index 250def78..32e0417e 100644
--- a/apps/docs/cookbook/customer-support.mdx
+++ b/apps/docs/cookbook/customer-support.mdx
@@ -9,7 +9,7 @@ Create a customer support system that remembers every interaction, tracks issues
A customer support bot that:
- **Remembers customer history** across all conversations and channels
-- **Tracks ongoing issues** and follows up automatically
+- **Tracks ongoing issues** and follows up automatically
- **Provides personalized responses** based on customer tier and preferences
- **Escalates complex issues** to human agents with full context
- **Learns from resolutions** to improve future responses
@@ -231,7 +231,7 @@ A customer support bot that:
"""Add a customer interaction to memory"""
try:
content = f"{interaction['type'].upper()}: {interaction['content']}"
-
+
result = self.client.memories.add(
content=content,
container_tag=self._get_container_tag(customer_id),
@@ -378,10 +378,10 @@ Status: {issue['status']}"""
}
export async function POST(request: Request) {
- const {
- message,
- customerId,
- customer,
+ const {
+ message,
+ customerId,
+ customer,
conversationHistory = [],
agentId
} = await request.json()
@@ -410,7 +410,7 @@ ${contextResults.map(c => `- ${c.content.substring(0, 150)}... (${(c.similarity
// Determine if escalation is needed
const escalationKeywords = ['angry', 'frustrated', 'cancel', 'refund', 'legal', 'complaint', 'manager', 'supervisor']
- const needsEscalation = escalationKeywords.some(keyword =>
+ const needsEscalation = escalationKeywords.some(keyword =>
message.toLowerCase().includes(keyword)
) || customer.tier === 'enterprise'
@@ -445,7 +445,7 @@ If you cannot resolve the issue completely, prepare a clear summary for escalati
]
const result = await streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-5'),
messages,
temperature: 0.3,
maxTokens: 800,
@@ -468,7 +468,7 @@ If you cannot resolve the issue completely, prepare a clear summary for escalati
if (message.length > 50 && !contextResults.some(c => c.similarity > 0.8)) {
const issueCategory = categorizeIssue(message)
const priority = determinePriority(customer.tier, message)
-
+
await contextManager.trackIssue(customerId, {
subject: message.substring(0, 100),
description: message,
@@ -506,34 +506,34 @@ If you cannot resolve the issue completely, prepare a clear summary for escalati
}
const messageLower = message.toLowerCase()
-
+
for (const [category, keywords] of Object.entries(categories)) {
if (keywords.some(keyword => messageLower.includes(keyword))) {
return category
}
}
-
+
return 'general'
}
function determinePriority(tier: string, message: string): 'low' | 'medium' | 'high' | 'urgent' {
const urgentKeywords = ['urgent', 'critical', 'emergency', 'down', 'broken']
const highKeywords = ['important', 'asap', 'soon', 'problem']
-
+
const messageLower = message.toLowerCase()
-
+
if (urgentKeywords.some(keyword => messageLower.includes(keyword))) {
return 'urgent'
}
-
+
if (tier === 'enterprise') {
return highKeywords.some(keyword => messageLower.includes(keyword)) ? 'urgent' : 'high'
}
-
+
if (tier === 'pro') {
return highKeywords.some(keyword => messageLower.includes(keyword)) ? 'high' : 'medium'
}
-
+
return 'low'
}
```
@@ -580,29 +580,29 @@ If you cannot resolve the issue completely, prepare a clear summary for escalati
}
message_lower = message.lower()
-
+
for category, keywords in categories.items():
if any(keyword in message_lower for keyword in keywords):
return category
-
+
return 'general'
def determine_priority(tier: str, message: str) -> str:
"""Determine issue priority based on tier and message content"""
urgent_keywords = ['urgent', 'critical', 'emergency', 'down', 'broken']
high_keywords = ['important', 'asap', 'soon', 'problem']
-
+
message_lower = message.lower()
-
+
if any(keyword in message_lower for keyword in urgent_keywords):
return 'urgent'
-
+
if tier == 'enterprise':
return 'urgent' if any(keyword in message_lower for keyword in high_keywords) else 'high'
-
+
if tier == 'pro':
return 'high' if any(keyword in message_lower for keyword in high_keywords) else 'medium'
-
+
return 'low'
@app.post("/support/chat")
@@ -662,7 +662,7 @@ If you cannot resolve the issue completely, prepare a clear summary for escalati
]
response = await openai_client.chat.completions.create(
- model="gpt-4-turbo",
+ model="gpt-5",
messages=messages,
temperature=0.3,
max_tokens=800,
@@ -676,7 +676,7 @@ If you cannot resolve the issue completely, prepare a clear summary for escalati
content = chunk.choices[0].delta.content
full_response += content
yield f"data: {json.dumps({'content': content})}\n\n"
-
+
# Store interaction after completion
context_manager.add_interaction(request.customerId, {
'type': 'chat',
@@ -695,7 +695,7 @@ If you cannot resolve the issue completely, prepare a clear summary for escalati
if len(request.message) > 50 and not any(c['similarity'] > 0.8 for c in context_results):
issue_category = categorize_issue(request.message)
priority = determine_priority(request.customer.tier, request.message)
-
+
context_manager.track_issue(request.customerId, {
'subject': request.message[:100],
'description': request.message,
@@ -750,7 +750,7 @@ export default function SupportDashboard() {
const [tickets, setTickets] = useState<SupportTicket[]>([])
const [showEscalation, setShowEscalation] = useState(false)
const [agentId] = useState('agent_001') // In real app, get from auth
-
+
const contextManager = new CustomerContextManager()
const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat({
@@ -781,7 +781,7 @@ export default function SupportDashboard() {
joinDate: '2023-06-15'
},
{
- id: 'cust_002',
+ id: 'cust_002',
name: 'TechCorp Inc',
tier: 'enterprise',
@@ -978,7 +978,7 @@ export default function SupportDashboard() {
</span>
</div>
<p className="text-gray-700 line-clamp-3">
- {interaction.content.length > 100
+ {interaction.content.length > 100
? `${interaction.content.substring(0, 100)}...`
: interaction.content
}
@@ -986,10 +986,10 @@ export default function SupportDashboard() {
{interaction.outcome && (
<div className="mt-2">
<span className={`text-xs px-2 py-1 rounded ${
- interaction.outcome === 'resolved'
+ interaction.outcome === 'resolved'
? 'bg-green-100 text-green-800'
: interaction.outcome === 'escalated'
- ? 'bg-red-100 text-red-800'
+ ? 'bg-red-100 text-red-800'
: 'bg-yellow-100 text-yellow-800'
}`}>
{interaction.outcome}
@@ -1044,4 +1044,4 @@ This comprehensive customer support recipe provides the foundation for building
---
-*Customize this recipe based on your specific support workflows and customer needs.* \ No newline at end of file
+*Customize this recipe based on your specific support workflows and customer needs.*
diff --git a/apps/docs/cookbook/document-qa.mdx b/apps/docs/cookbook/document-qa.mdx
index c3bc224e..d11e947b 100644
--- a/apps/docs/cookbook/document-qa.mdx
+++ b/apps/docs/cookbook/document-qa.mdx
@@ -9,7 +9,7 @@ Create a powerful document Q&A system that can ingest PDFs, text files, and web
A document Q&A system that:
- **Ingests multiple file types** (PDFs, DOCX, text, URLs)
-- **Answers questions accurately** with source citations
+- **Answers questions accurately** with source citations
- **Provides source references** with page numbers and document titles
- **Handles follow-up questions** with conversation context
- **Supports multiple document collections** for different topics
@@ -192,7 +192,7 @@ A document Q&A system that:
"""Upload a local file to Supermemory"""
if metadata is None:
metadata = {}
-
+
try:
with open(file_path, 'rb') as file:
result = self.client.memories.upload_file(
@@ -214,7 +214,7 @@ A document Q&A system that:
"""Upload URL content to Supermemory"""
if metadata is None:
metadata = {}
-
+
try:
result = self.client.memories.add(
content=url,
@@ -258,11 +258,11 @@ A document Q&A system that:
return [
{
'id': memory.id,
- 'title': (memory.title or
- memory.metadata.get('originalName') or
+ 'title': (memory.title or
+ memory.metadata.get('originalName') or
'Untitled' if memory.metadata else 'Untitled'),
- 'type': (memory.metadata.get('fileType') or
- memory.metadata.get('type') or
+ 'type': (memory.metadata.get('fileType') or
+ memory.metadata.get('type') or
'unknown' if memory.metadata else 'unknown'),
'uploadedAt': memory.metadata.get('uploadedAt') if memory.metadata else None,
'status': memory.status,
@@ -349,9 +349,9 @@ A document Q&A system that:
]
const result = await streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-5'),
messages,
- system: `You are a helpful document Q&A assistant. Answer questions based ONLY on the provided document context.
+ system: `You are a helpful document Q&A assistant. Answer questions based ONLY on the provided document context.
CONTEXT FROM DOCUMENTS:
${context}
@@ -450,13 +450,13 @@ If the question cannot be answered from the provided documents, respond with: "I
for index, result in enumerate(search_results.results):
relevant_chunks = [
- chunk.content for chunk in result.chunks
+ chunk.content for chunk in result.chunks
if chunk.is_relevant
][:3]
-
+
chunk_text = '\n\n'.join(relevant_chunks)
context_parts.append(f'[Document {index + 1}: "{result.title}"]\n{chunk_text}')
-
+
sources.append({
'id': result.document_id,
'title': result.title,
@@ -472,7 +472,7 @@ If the question cannot be answered from the provided documents, respond with: "I
messages = [
{
"role": "system",
- "content": f"""You are a helpful document Q&A assistant. Answer questions based ONLY on the provided document context.
+ "content": f"""You are a helpful document Q&A assistant. Answer questions based ONLY on the provided document context.
CONTEXT FROM DOCUMENTS:
{context}
@@ -500,7 +500,7 @@ If the question cannot be answered from the provided documents, respond with: "I
# Get AI response
response = await openai_client.chat.completions.create(
- model="gpt-4-turbo",
+ model="gpt-5",
messages=messages,
temperature=0.1,
max_tokens=1000
@@ -557,7 +557,7 @@ export default function DocumentQA() {
const [isUploading, setIsUploading] = useState(false)
const [uploadProgress, setUploadProgress] = useState<Record<string, number>>({})
const fileInputRef = useRef<HTMLInputElement>(null)
-
+
const processor = new DocumentProcessor()
const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat({
@@ -599,7 +599,7 @@ export default function DocumentQA() {
// Refresh document list
await loadDocuments()
-
+
// Clear file input
if (fileInputRef.current) {
fileInputRef.current.value = ''
@@ -653,7 +653,7 @@ export default function DocumentQA() {
<div className="lg:col-span-1">
<div className="bg-white border border-gray-200 rounded-lg p-6">
<h2 className="text-lg font-semibold mb-4">Document Collection</h2>
-
+
{/* Collection Selector */}
<div className="mb-4">
<label className="block text-sm font-medium text-gray-700 mb-2">
@@ -732,13 +732,13 @@ export default function DocumentQA() {
<div className="lg:col-span-2">
<div className="bg-white border border-gray-200 rounded-lg p-6">
<h2 className="text-lg font-semibold mb-4">Ask Questions</h2>
-
+
{/* Messages */}
<div className="h-96 overflow-y-auto mb-4 space-y-4">
{messages.length === 0 && (
<div className="text-gray-500 text-center py-8">
Upload documents and ask questions to get started!
-
+
<div className="mt-4 text-sm">
<p className="font-medium">Try asking:</p>
<ul className="mt-2 space-y-1">
@@ -760,7 +760,7 @@ export default function DocumentQA() {
}`}
>
<div className="whitespace-pre-wrap">{message.content}</div>
-
+
{message.role === 'assistant' && sources.length > 0 && (
formatSources(sources)
)}
@@ -880,4 +880,4 @@ This recipe provides a complete foundation for building document Q&A systems wit
---
-*Customize this recipe based on your specific document types and use cases.* \ No newline at end of file
+*Customize this recipe based on your specific document types and use cases.*
diff --git a/apps/docs/cookbook/personal-assistant.mdx b/apps/docs/cookbook/personal-assistant.mdx
index 2c977660..c13b7471 100644
--- a/apps/docs/cookbook/personal-assistant.mdx
+++ b/apps/docs/cookbook/personal-assistant.mdx
@@ -73,7 +73,7 @@ A personal AI assistant that:
const { messages, userId = 'default-user' } = await request.json()
const result = await streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-5'),
messages,
tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
containerTags: [userId]
@@ -190,7 +190,7 @@ A personal AI assistant that:
try:
response = await openai_client.chat.completions.create(
- model="gpt-4-turbo",
+ model="gpt-5",
messages=enhanced_messages,
stream=True,
temperature=0.7
diff --git a/apps/docs/memory-api/sdks/openai-plugins.mdx b/apps/docs/memory-api/sdks/openai-plugins.mdx
index 6550cee3..635e0008 100644
--- a/apps/docs/memory-api/sdks/openai-plugins.mdx
+++ b/apps/docs/memory-api/sdks/openai-plugins.mdx
@@ -53,7 +53,7 @@ async def main():
# Chat with memory tools
response = await client.chat.completions.create(
- model="gpt-4o",
+ model="gpt-5",
messages=[
{
"role": "system",
@@ -99,7 +99,7 @@ const executeToolCall = createToolCallExecutor(process.env.SUPERMEMORY_API_KEY!,
// Use with OpenAI Chat Completions
const completion = await client.chat.completions.create({
- model: "gpt-4",
+ model: "gpt-5",
messages: [
{
role: "user",
@@ -300,7 +300,7 @@ async def chat_with_memory():
# Get AI response with tools
response = await client.chat.completions.create(
- model="gpt-4o",
+ model="gpt-5",
messages=messages,
tools=tools.get_tool_definitions()
)
@@ -319,7 +319,7 @@ async def chat_with_memory():
# Get final response after tool execution
final_response = await client.chat.completions.create(
- model="gpt-4o",
+ model="gpt-5",
messages=messages
)
@@ -370,7 +370,7 @@ async function chatWithMemory() {
// Get AI response with tools
const response = await client.chat.completions.create({
- model: "gpt-4",
+ model: "gpt-5",
messages,
tools: getToolDefinitions(),
})
@@ -391,7 +391,7 @@ async function chatWithMemory() {
// Get final response after tool execution
const finalResponse = await client.chat.completions.create({
- model: "gpt-4",
+ model: "gpt-5",
messages,
})
@@ -433,7 +433,7 @@ async def safe_chat():
tools = SupermemoryTools(api_key="your-api-key")
response = await client.chat.completions.create(
- model="gpt-4o",
+ model="gpt-5",
messages=[{"role": "user", "content": "Hello"}],
tools=tools.get_tool_definitions()
)
@@ -453,7 +453,7 @@ async function safeChat() {
const client = new OpenAI()
const response = await client.chat.completions.create({
- model: "gpt-4",
+ model: "gpt-5",
messages: [{ role: "user", content: "Hello" }],
tools: getToolDefinitions(),
})
diff --git a/apps/docs/memory-router/usage.mdx b/apps/docs/memory-router/usage.mdx
index 61dadfbf..68dad6f6 100644
--- a/apps/docs/memory-router/usage.mdx
+++ b/apps/docs/memory-router/usage.mdx
@@ -81,7 +81,7 @@ https://api.supermemory.ai/v3/https://api.groq.com/openai/v1/
# Use as normal
response = client.chat.completions.create(
- model="gpt-4",
+ model="gpt-5",
messages=[
{"role": "user", "content": "Hello!"}
]
@@ -106,7 +106,7 @@ https://api.supermemory.ai/v3/https://api.groq.com/openai/v1/
// Use as normal
const response = await client.chat.completions.create({
- model: 'gpt-4',
+ model: 'gpt-5',
messages: [
{ role: 'user', content: 'Hello!' }
]
@@ -124,7 +124,7 @@ https://api.supermemory.ai/v3/https://api.groq.com/openai/v1/
-H "x-sm-user-id: user123" \
-H "Content-Type: application/json" \
-d '{
- "model": "gpt-4",
+ "model": "gpt-5",
"messages": [{"role": "user", "content": "Hello!"}]
}'
```
@@ -162,7 +162,7 @@ curl -X POST "https://api.supermemory.ai/v3/https://api.openai.com/v1/chat/compl
-H "Authorization: Bearer YOUR_OPENAI_API_KEY" \
-H "x-supermemory-api-key: YOUR_SUPERMEMORY_API_KEY" \
-H "Content-Type: application/json" \
- -d '{"model": "gpt-4", "messages": [{"role": "user", "content": "Hello!"}]}'
+ -d '{"model": "gpt-5", "messages": [{"role": "user", "content": "Hello!"}]}'
```
</CodeGroup>
@@ -176,7 +176,7 @@ Use `x-sm-conversation-id` to maintain conversation context across requests:
```python
# Start a new conversation
response1 = client.chat.completions.create(
- model="gpt-4",
+ model="gpt-5",
messages=[{"role": "user", "content": "My name is Alice"}],
extra_headers={
"x-sm-conversation-id": "conv_123"
@@ -185,7 +185,7 @@ response1 = client.chat.completions.create(
# Continue the same conversation later
response2 = client.chat.completions.create(
- model="gpt-4",
+ model="gpt-5",
messages=[{"role": "user", "content": "What's my name?"}],
extra_headers={
"x-sm-conversation-id": "conv_123"
diff --git a/apps/docs/memory-router/with-memory-api.mdx b/apps/docs/memory-router/with-memory-api.mdx
index fcb09361..ae4396c2 100644
--- a/apps/docs/memory-router/with-memory-api.mdx
+++ b/apps/docs/memory-router/with-memory-api.mdx
@@ -40,7 +40,7 @@ router_client = OpenAI(
# Router automatically has access to the API-created memory
response = router_client.chat.completions.create(
- model="gpt-4",
+ model="gpt-5",
messages=[{"role": "user", "content": "What language should I use for my new backend?"}]
)
# Response will consider the Python preference
@@ -66,7 +66,7 @@ router_client = OpenAI(
# Agent has automatic access to product docs
response = router_client.chat.completions.create(
- model="gpt-4",
+ model="gpt-5",
messages=[{"role": "user", "content": "How does the enterprise pricing work?"}]
)
```
diff --git a/apps/docs/migration/from-mem0.mdx b/apps/docs/migration/from-mem0.mdx
index e24a5548..5a6ccb83 100644
--- a/apps/docs/migration/from-mem0.mdx
+++ b/apps/docs/migration/from-mem0.mdx
@@ -9,7 +9,7 @@ Migrating from Mem0.ai to Supermemory is straightforward. This guide walks you t
## Why Migrate to Supermemory?
Supermemory offers enhanced capabilities over Mem0.ai:
-- **Memory Router** for zero-code LLM integration
+- **Memory Router** for zero-code LLM integration
- **Knowledge graph** architecture for better context relationships
- **Multiple content types** (URLs, PDFs, images, videos)
- **Generous free tier** (100k tokens) with affordable pricing
@@ -58,17 +58,17 @@ print("Migration complete!")
3. Download your memories as JSON
### Option 2: Export via API
-
+
Simple script to export all your memories from Mem0:
```python
from mem0 import MemoryClient
import json
import time
-
+
# Connect to Mem0
client = MemoryClient(api_key="your_mem0_api_key")
-
+
# Create export job
schema = {
"type": "object",
@@ -87,19 +87,19 @@ print("Migration complete!")
}
}
}
-
+
response = client.create_memory_export(schema=schema, filters={})
export_id = response["id"]
-
+
# Wait and retrieve
print("Exporting memories...")
time.sleep(5)
export_data = client.get_memory_export(memory_export_id=export_id)
-
+
# Save to file
with open("mem0_export.json", "w") as f:
json.dump(export_data, f, indent=2)
-
+
print(f"Exported {len(export_data['memories'])} memories")
```
</Step>
@@ -110,7 +110,7 @@ print("Migration complete!")
1. Sign up at [console.supermemory.ai](https://console.supermemory.ai)
2. Create a new project
3. Generate an API key from the dashboard
-
+
```bash
# Set your environment variable
export SUPERMEMORY_API_KEY="your_supermemory_api_key"
@@ -123,22 +123,22 @@ print("Migration complete!")
```python
import json
from supermemory import Supermemory
-
+
# Load your Mem0 export
with open("mem0_export.json", "r") as f:
mem0_data = json.load(f)
-
+
# Connect to Supermemory
client = Supermemory(api_key="your_supermemory_api_key")
-
+
# Import memories
for memory in mem0_data["memories"]:
content = memory.get("content", "")
-
+
# Skip empty memories
if not content:
continue
-
+
# Import to Supermemory
try:
result = client.memories.add(
@@ -153,7 +153,7 @@ print("Migration complete!")
print(f"Imported: {content[:50]}...")
except Exception as e:
print(f"Failed: {e}")
-
+
print("Migration complete!")
```
</Step>
@@ -264,7 +264,7 @@ messages = [
]
response = openai.chat.completions.create(
- model="gpt-4",
+ model="gpt-5",
messages=messages
)
```
@@ -284,7 +284,7 @@ client = OpenAI(
# Memories handled automatically!
response = client.chat.completions.create(
- model="gpt-4",
+ model="gpt-5",
messages=[{"role": "user", "content": "What are my preferences?"}]
)
```
@@ -300,4 +300,3 @@ For enterprise migrations, [contact us](mailto:[email protected]) for assi
1. [Explore](/how-it-works) how Supermemory works
2. Read the [quickstart](/quickstart) and add and retrieve your first memories
3. [Connect](/connectors/overview) to Google Drive, Notion, and OneDrive with automatic syncing
-
diff --git a/apps/docs/model-enhancement/context-extender.mdx b/apps/docs/model-enhancement/context-extender.mdx
index 0f165f1e..a7d7897e 100644
--- a/apps/docs/model-enhancement/context-extender.mdx
+++ b/apps/docs/model-enhancement/context-extender.mdx
@@ -77,7 +77,7 @@ openai.default_headers = {
# Create a chat completion with unlimited context
response = openai.ChatCompletion.create(
- model="gpt-4o-mini",
+ model="gpt-5-nano",
messages=[{"role": "user", "content": "Your message here"}]
)
```
diff --git a/apps/docs/model-enhancement/getting-started.mdx b/apps/docs/model-enhancement/getting-started.mdx
index 9faa7eba..7af3bfab 100644
--- a/apps/docs/model-enhancement/getting-started.mdx
+++ b/apps/docs/model-enhancement/getting-started.mdx
@@ -60,7 +60,7 @@ curl https://api.supermemory.ai/v3/https://api.openai.com/v1/chat/completions \
-H "x-supermemory--api-key: $SUPERMEMORY_API_KEY" \
-H 'x-sm-user-id: user_id' \
-d '{
- "model": "gpt-4o",
+ "model": "gpt-5",
"messages": [
{"role": "user", "content": "What is the capital of France?"}
]
@@ -85,7 +85,7 @@ const openai = new OpenAI({
});
const completion = await openai.chat.completions.create({
- model: "gpt-4o",
+ model: "gpt-5",
/// you can also add user here
user: "user",
messages: [
diff --git a/apps/docs/model-enhancement/identifying-users.mdx b/apps/docs/model-enhancement/identifying-users.mdx
index 518f046a..cdc1bbf2 100644
--- a/apps/docs/model-enhancement/identifying-users.mdx
+++ b/apps/docs/model-enhancement/identifying-users.mdx
@@ -15,18 +15,18 @@ You can add a default header of x-sm-user-id with any client and model
### `user` in body
-For models that support the `user` parameter in the body, such as OpenAI, you can also attach it to the body.
+For models that support the `user` parameter in the body, such as OpenAI, you can also attach it to the body.
### `userId` in search params
-You can also add `?userId=xyz` in the URL search parameters, incase the models don't support it.
+You can also add `?userId=xyz` in the URL search parameters, incase the models don't support it.
## Conversation ID
-If a conversation identifier is provided, You do not need to send the entire array of messages to supermemory.
+If a conversation identifier is provided, You do not need to send the entire array of messages to supermemory.
```typescript
-// if you provide conversation ID, You do not need to send all the messages every single time. supermemory automatically backfills it.
+// if you provide conversation ID, You do not need to send all the messages every single time. supermemory automatically backfills it.
const client = new OpenAI({
baseURL:
"https://api.supermemory.ai/v3/https://api.openai.com/v1",
@@ -93,7 +93,7 @@ async function main() {
'x-sm-user-id': "user_123"
}
});
-
+
console.debug(msg);
}
```
@@ -110,10 +110,10 @@ async function main() {
messages: [
{ role: "user", content: "Hello, Assistant" }
],
- model: "gpt-4o",
+ model: "gpt-5",
user: "user_123"
});
-
+
console.debug(completion.choices[0].message);
}
-``` \ No newline at end of file
+```
diff --git a/apps/docs/quickstart.mdx b/apps/docs/quickstart.mdx
index 131bfd6f..b0d12fad 100644
--- a/apps/docs/quickstart.mdx
+++ b/apps/docs/quickstart.mdx
@@ -466,7 +466,7 @@ https://api.supermemory.ai/v3/[openai-api-url-here]
async function chatWithOpenAI() {
try {
const response = await client.chat.completions.create({
- model: 'gpt-4o',
+ model: 'gpt-5',
messages: [
{ role: 'user', content: 'Hello my name is Naman. How are you?' }
],
@@ -639,7 +639,7 @@ https://api.supermemory.ai/v3/[openai-api-url-here]
def chat_with_openai():
try:
response = client.chat.completions.create(
- model="gpt-4o",
+ model="gpt-5",
messages=[
{"role": "user", "content": "Hello my name is Naman. How are you?"}
],
@@ -785,7 +785,7 @@ https://api.supermemory.ai/v3/[openai-api-url-here]
-H "x-supermemory-api-key: $SUPERMEMORY_API_KEY" \
-H "x-sm-user-id: user_123" \
-d '{
- "model": "gpt-4o",
+ "model": "gpt-5",
"messages": [
{"role": "user", "content": "Hello my name is Naman. How are you?"}
],