aboutsummaryrefslogtreecommitdiff
path: root/apps
diff options
context:
space:
mode:
authorDhravya Shah <[email protected]>2026-01-18 16:55:32 -0800
committerGitHub <[email protected]>2026-01-18 16:55:32 -0800
commit87b361c26bf5fc16049cd2727825891aa14b8e8b (patch)
treec2f9f4f6223a7c1734578b772a16490ba2eb8b96 /apps
parentAdd Claude Code GitHub Workflow (#681) (diff)
downloadsupermemory-87b361c26bf5fc16049cd2727825891aa14b8e8b.tar.xz
supermemory-87b361c26bf5fc16049cd2727825891aa14b8e8b.zip
docs changes (#678)
Co-authored-by: Claude Opus 4.5 <[email protected]>
Diffstat (limited to 'apps')
-rw-r--r--apps/docs/add-memories.mdx373
-rw-r--r--apps/docs/add-memories/examples/basic.mdx32
-rw-r--r--apps/docs/add-memories/examples/file-upload.mdx12
-rw-r--r--apps/docs/add-memories/overview.mdx22
-rw-r--r--apps/docs/ai-sdk/examples.mdx68
-rw-r--r--apps/docs/ai-sdk/infinite-chat.mdx2
-rw-r--r--apps/docs/ai-sdk/memory-tools.mdx4
-rw-r--r--apps/docs/ai-sdk/overview.mdx37
-rw-r--r--apps/docs/ai-sdk/user-profiles.mdx2
-rw-r--r--apps/docs/concepts/content-types.mdx227
-rw-r--r--apps/docs/concepts/customization.mdx172
-rw-r--r--apps/docs/concepts/filtering.mdx359
-rw-r--r--apps/docs/concepts/graph-memory.mdx146
-rw-r--r--apps/docs/concepts/how-it-works.mdx (renamed from apps/docs/how-it-works.mdx)10
-rw-r--r--apps/docs/concepts/memory-vs-rag.mdx (renamed from apps/docs/memory-vs-rag.mdx)26
-rw-r--r--apps/docs/concepts/super-rag.mdx176
-rw-r--r--apps/docs/concepts/user-profiles.mdx141
-rw-r--r--apps/docs/connectors/overview.mdx5
-rw-r--r--apps/docs/connectors/troubleshooting.mdx2
-rw-r--r--apps/docs/cookbook/ai-sdk-integration.mdx68
-rw-r--r--apps/docs/cookbook/customer-support.mdx20
-rw-r--r--apps/docs/cookbook/document-qa.mdx36
-rw-r--r--apps/docs/cookbook/overview.mdx2
-rw-r--r--apps/docs/cookbook/personal-assistant.mdx56
-rw-r--r--apps/docs/docs.json436
-rw-r--r--apps/docs/document-operations.mdx295
-rw-r--r--apps/docs/images/anthropic-1.svg1
-rw-r--r--apps/docs/images/openai.svg1
-rw-r--r--apps/docs/images/pipecat.svg1
-rw-r--r--apps/docs/images/quickstart-icon.svg4
-rw-r--r--apps/docs/images/supermemory.svg1
-rw-r--r--apps/docs/install.md14
-rw-r--r--apps/docs/integrations/ai-sdk.mdx182
-rw-r--r--apps/docs/integrations/claude-memory.mdx269
-rw-r--r--apps/docs/integrations/memory-graph.mdx363
-rw-r--r--apps/docs/integrations/n8n.mdx93
-rw-r--r--apps/docs/integrations/openai.mdx655
-rw-r--r--apps/docs/integrations/pipecat.mdx203
-rw-r--r--apps/docs/integrations/supermemory-sdk.mdx140
-rw-r--r--apps/docs/integrations/zapier.mdx65
-rw-r--r--apps/docs/intro.mdx30
-rw-r--r--apps/docs/introduction.mdx4
-rw-r--r--apps/docs/list-memories/examples/basic.mdx8
-rw-r--r--apps/docs/list-memories/examples/filtering.mdx112
-rw-r--r--apps/docs/list-memories/examples/monitoring.mdx27
-rw-r--r--apps/docs/list-memories/examples/pagination.mdx12
-rw-r--r--apps/docs/list-memories/overview.mdx4
-rw-r--r--apps/docs/memory-api/connectors/managing-resources.mdx1
-rw-r--r--apps/docs/memory-api/features/filtering.mdx4
-rw-r--r--apps/docs/memory-api/ingesting.mdx12
-rw-r--r--apps/docs/memory-api/introduction.mdx2
-rw-r--r--apps/docs/memory-api/sdks/anthropic-claude-memory.mdx4
-rw-r--r--apps/docs/memory-api/sdks/openai-plugins.mdx2
-rw-r--r--apps/docs/memory-api/sdks/overview.mdx6
-rw-r--r--apps/docs/memory-api/sdks/python.mdx16
-rw-r--r--apps/docs/memory-api/sdks/typescript.mdx26
-rw-r--r--apps/docs/memory-api/track-progress.mdx18
-rw-r--r--apps/docs/memory-graph/installation.mdx4
-rw-r--r--apps/docs/memory-graph/quickstart.mdx4
-rw-r--r--apps/docs/memory-operations.mdx98
-rw-r--r--apps/docs/memory-router/overview.mdx2
-rw-r--r--apps/docs/memory-router/with-memory-api.mdx10
-rw-r--r--apps/docs/migration/from-mem0.mdx82
-rw-r--r--apps/docs/migration/from-zep.mdx6
-rw-r--r--apps/docs/migration/mem0-migration-script.py193
-rw-r--r--apps/docs/org-settings.mdx265
-rw-r--r--apps/docs/quickstart.mdx19
-rw-r--r--apps/docs/search.mdx247
-rw-r--r--apps/docs/search/filtering.mdx902
-rw-r--r--apps/docs/search/overview.mdx4
-rw-r--r--apps/docs/search/parameters.mdx2
-rw-r--r--apps/docs/style.css5
-rw-r--r--apps/docs/supermemory-mcp/mcp.mdx1
-rw-r--r--apps/docs/supermemory-mcp/setup.mdx1
-rw-r--r--apps/docs/test.py10
-rw-r--r--apps/docs/update-delete-memories/overview.mdx58
-rw-r--r--apps/docs/user-profiles.mdx266
-rw-r--r--apps/docs/user-profiles/examples.mdx2
-rw-r--r--apps/docs/user-profiles/overview.mdx2
-rw-r--r--apps/docs/vibe-coding.mdx14
-rw-r--r--apps/docs/voice-realtime/pipecat.mdx2
-rw-r--r--apps/mcp/src/client.ts2
82 files changed, 5252 insertions, 1958 deletions
diff --git a/apps/docs/add-memories.mdx b/apps/docs/add-memories.mdx
new file mode 100644
index 00000000..65299249
--- /dev/null
+++ b/apps/docs/add-memories.mdx
@@ -0,0 +1,373 @@
+---
+title: "Ingesting context to supermemory"
+sidebarTitle: "Add context"
+description: "Add text, files, and URLs to Supermemory"
+icon: "plus"
+---
+
+Send any raw content to Supermemory — conversations, documents, files, URLs. We extract the memories automatically.
+
+<Tip>
+**Use `customId`** to identify your content (conversation ID, document ID, etc.). This enables updates and prevents duplicates.
+</Tip>
+
+## Quick Start
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ import Supermemory from 'supermemory';
+
+ const client = new Supermemory();
+
+ // Add text content
+ await client.add({
+ content: "Machine learning enables computers to learn from data",
+ containerTag: "user_123",
+ metadata: { category: "ai" }
+ });
+
+ // Add a URL (auto-extracted)
+ await client.add({
+ content: "https://youtube.com/watch?v=dQw4w9WgXcQ",
+ containerTag: "user_123"
+ });
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ from supermemory import Supermemory
+
+ client = Supermemory()
+
+ # Add text content
+ client.add(
+ content="Machine learning enables computers to learn from data",
+ container_tag="user_123",
+ metadata={"category": "ai"}
+ )
+
+ # Add a URL (auto-extracted)
+ client.add(
+ content="https://youtube.com/watch?v=dQw4w9WgXcQ",
+ container_tag="user_123"
+ )
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl -X POST "https://api.supermemory.ai/v3/documents" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "content": "Machine learning enables computers to learn from data",
+ "containerTag": "user_123",
+ "metadata": {"category": "ai"}
+ }'
+ ```
+ </Tab>
+</Tabs>
+
+**Response:**
+```json
+{ "id": "abc123", "status": "queued" }
+```
+
+---
+
+## Updating Content
+
+Use `customId` to update existing documents or conversations. When you send content with the same `customId`, Supermemory intelligently processes only what's new.
+
+### Two ways to update:
+
+**Option 1: Send only the new content**
+```typescript
+// First request
+await client.add({
+ content: "user: Hi, I'm Sarah.\nassistant: Nice to meet you!",
+ customId: "conv_123",
+ containerTag: "user_sarah"
+});
+
+// Later: send only new messages
+await client.add({
+ content: "user: What's the weather?\nassistant: It's sunny today.",
+ customId: "conv_123", // Same ID — Supermemory links them
+ containerTag: "user_sarah"
+});
+```
+
+**Option 2: Send the full updated content**
+```typescript
+// Supermemory detects the diff and only processes new parts
+await client.add({
+ content: "user: Hi, I'm Sarah.\nassistant: Nice to meet you!\nuser: What's the weather?\nassistant: It's sunny today.",
+ customId: "conv_123",
+ containerTag: "user_sarah"
+});
+```
+
+Both work — choose what fits your architecture.
+
+### Replace entire document
+
+To completely replace a document's content (not append), use `memories.update()`:
+
+```typescript
+// Replace the entire document content
+await client.documents.update("doc_id_123", {
+ content: "Completely new content replacing everything",
+ metadata: { version: 2 }
+});
+```
+
+This triggers full reprocessing of the document.
+
+### Formatting conversations
+
+Format your conversations however you want. Supermemory handles any string format:
+
+```typescript
+// Simple string
+content: "user: Hello\nassistant: Hi there!"
+
+// JSON stringify
+content: JSON.stringify(messages)
+
+// Template literal
+content: messages.map(m => `${m.role}: ${m.content}`).join('\n')
+
+// Any format — just make it a string
+content: formatConversation(messages)
+```
+
+---
+
+## Upload Files
+
+Upload PDFs, images, and documents directly.
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ import fs from 'fs';
+
+ await client.documents.uploadFile({
+ file: fs.createReadStream('document.pdf'),
+ containerTags: 'user_123'
+ });
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ with open('document.pdf', 'rb') as file:
+ client.documents.upload_file(
+ file=file,
+ container_tags='user_123'
+ )
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl -X POST "https://api.supermemory.ai/v3/documents/file" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
+ -F "containerTags=user_123"
+ ```
+ </Tab>
+</Tabs>
+
+### Supported File Types
+
+| Type | Formats | Processing |
+|------|---------|------------|
+| Documents | PDF, DOC, DOCX, TXT, MD | Text extraction, OCR for scans |
+| Images | JPG, PNG, GIF, WebP | OCR text extraction |
+| Spreadsheets | CSV, Google Sheets | Structured data extraction |
+| Videos | YouTube URLs, MP4 | Auto-transcription |
+
+**Limits:** 50MB max file size
+
+---
+
+## Parameters
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `content` | string | **Required.** Any raw content — text, conversations, URLs, HTML |
+| `customId` | string | **Recommended.** Your ID for the content (conversation ID, doc ID). Enables updates and deduplication |
+| `containerTag` | string | Group by user/project. Required for user profiles |
+| `metadata` | object | Key-value pairs for filtering (strings, numbers, booleans) |
+
+<AccordionGroup>
+ <Accordion title="Parameter Details & Examples">
+ **Content Types:**
+ ```typescript
+ // Any text — conversations, notes, documents
+ { content: "Meeting notes from today's standup" }
+ { content: JSON.stringify(messages) }
+
+ // URLs (auto-detected and extracted)
+ { content: "https://example.com/article" }
+ { content: "https://youtube.com/watch?v=abc123" }
+
+ // Markdown, HTML, or any format
+ { content: "# Project Docs\n\n## Features\n- Real-time sync" }
+ ```
+
+ **Container Tags:**
+ ```typescript
+ // By user
+ { containerTag: "user_123" }
+
+ // By project
+ { containerTag: "project_alpha" }
+
+ // Hierarchical
+ { containerTag: "org_456_team_backend" }
+ ```
+
+ **Custom IDs (Recommended):**
+ ```typescript
+ // Use IDs from your system
+ { customId: "conv_abc123" } // Conversation ID
+ { customId: "doc_456" } // Document ID
+ { customId: "thread_789" } // Thread ID
+ { customId: "meeting_2024_01_15" } // Meeting ID
+
+ // Updates: same customId = same document
+ // Supermemory only processes new/changed content
+ await client.add({
+ content: "Updated content...",
+ customId: "doc_456" // Links to existing document
+ });
+ ```
+
+ **Metadata:**
+ ```typescript
+ {
+ metadata: {
+ source: "slack",
+ author: "john",
+ priority: 1,
+ reviewed: true
+ }
+ }
+ ```
+ - No nested objects or arrays
+ - Values: string, number, or boolean only
+ </Accordion>
+</AccordionGroup>
+
+---
+
+## Processing Pipeline
+
+When you add content, Supermemory:
+
+1. **Validates** your request
+2. **Stores** the document and queues for processing
+3. **Extracts** content (OCR, transcription, web scraping)
+4. **Chunks** into searchable memories
+5. **Embeds** for vector search
+6. **Indexes** for retrieval
+
+Track progress with `GET /v3/documents/{id}`:
+```typescript
+const doc = await client.documents.get("abc123");
+console.log(doc.status); // "queued" | "processing" | "done"
+```
+
+<AccordionGroup>
+ <Accordion title="Batch Upload">
+ Process multiple documents with rate limiting:
+
+ ```typescript
+ async function batchUpload(documents: Array<{id: string, content: string}>) {
+ const results = [];
+
+ for (const doc of documents) {
+ try {
+ const result = await client.add({
+ content: doc.content,
+ customId: doc.id,
+ containerTag: "batch_import"
+ });
+ results.push({ id: doc.id, success: true, docId: result.id });
+ } catch (error) {
+ results.push({ id: doc.id, success: false, error });
+ }
+
+ // Rate limit: 1 second between requests
+ await new Promise(r => setTimeout(r, 1000));
+ }
+
+ return results;
+ }
+ ```
+
+ **Tips:**
+ - Batch size: 3-5 documents at once
+ - Delay: 1-2 seconds between requests
+ - Use `customId` to track and deduplicate
+ </Accordion>
+
+ <Accordion title="Error Handling">
+ | Status | Error | Cause |
+ |--------|-------|-------|
+ | 400 | BadRequestError | Missing required fields, invalid parameters |
+ | 401 | AuthenticationError | Invalid or missing API key |
+ | 403 | PermissionDeniedError | Insufficient permissions |
+ | 429 | RateLimitError | Too many requests or quota exceeded |
+ | 500 | InternalServerError | Processing failure |
+
+ ```typescript
+ import { BadRequestError, RateLimitError } from 'supermemory';
+
+ try {
+ await client.add({ content: "..." });
+ } catch (error) {
+ if (error instanceof RateLimitError) {
+ // Wait and retry
+ await new Promise(r => setTimeout(r, 60000));
+ } else if (error instanceof BadRequestError) {
+ // Fix request parameters
+ console.error("Invalid request:", error.message);
+ }
+ }
+ ```
+ </Accordion>
+
+ <Accordion title="Delete Content">
+ **Single delete:**
+ ```typescript
+ await client.documents.delete("doc_id_123");
+ ```
+
+ **Bulk delete by IDs:**
+ ```typescript
+ await client.documents.deleteBulk({
+ ids: ["doc_1", "doc_2", "doc_3"]
+ });
+ ```
+
+ **Bulk delete by container tag:**
+ ```typescript
+ // Delete all content for a user
+ await client.documents.deleteBulk({
+ containerTags: ["user_123"]
+ });
+ ```
+
+ Deletes are permanent — no recovery.
+ </Accordion>
+</AccordionGroup>
+
+---
+
+## Next Steps
+
+- [Search Memories](/search) — Query your content
+- [User Profiles](/user-profiles) — Get user context
+- [Organizing & Filtering](/concepts/filtering) — Container tags and metadata
diff --git a/apps/docs/add-memories/examples/basic.mdx b/apps/docs/add-memories/examples/basic.mdx
index e87893fc..02cac11e 100644
--- a/apps/docs/add-memories/examples/basic.mdx
+++ b/apps/docs/add-memories/examples/basic.mdx
@@ -12,7 +12,7 @@ The most basic operation - adding plain text content.
<CodeGroup>
```typescript TypeScript
-const response = await client.memories.add({
+const response = await client.add({
content: "Artificial intelligence is transforming how we work and live"
});
@@ -21,7 +21,7 @@ console.log(response);
```
```python Python
-response = client.memories.add(
+response = client.add(
content="Artificial intelligence is transforming how we work and live"
)
@@ -47,7 +47,7 @@ Group related content using container tags.
<CodeGroup>
```typescript TypeScript
-const response = await client.memories.add({
+const response = await client.add({
content: "Q4 2024 revenue exceeded projections by 15%",
containerTag: "financial_reports"
});
@@ -57,7 +57,7 @@ console.log(response.id);
```
```python Python
-response = client.memories.add(
+response = client.add(
content="Q4 2024 revenue exceeded projections by 15%",
container_tag="financial_reports"
)
@@ -87,7 +87,7 @@ Attach metadata for better search and filtering.
<CodeGroup>
```typescript TypeScript
-await client.memories.add({
+await client.add({
content: "New onboarding flow reduces drop-off by 30%",
containerTag: "product_updates",
metadata: {
@@ -98,7 +98,7 @@ await client.memories.add({
```
```python Python
-client.memories.add(
+client.add(
content="New onboarding flow reduces drop-off by 30%",
container_tag="product_updates",
metadata={
@@ -136,7 +136,7 @@ const notes = [
const results = await Promise.all(
notes.map(note =>
- client.memories.add({
+ client.add({
content: note,
containerTag: "meeting_2024_01_15"
})
@@ -152,7 +152,7 @@ notes = [
]
for note in notes:
- client.memories.add(
+ client.add(
content=note,
container_tag="meeting_2024_01_15"
)
@@ -176,19 +176,19 @@ Process web pages, YouTube videos, and other URLs automatically.
```typescript TypeScript
// Web page
-await client.memories.add({
+await client.add({
content: "https://example.com/article",
containerTag: "articles"
});
// YouTube video (auto-transcribed)
-await client.memories.add({
+await client.add({
content: "https://youtube.com/watch?v=dQw4w9WgXcQ",
containerTag: "videos"
});
// Google Docs
-await client.memories.add({
+await client.add({
content: "https://docs.google.com/document/d/abc123/edit",
containerTag: "docs"
});
@@ -196,19 +196,19 @@ await client.memories.add({
```python Python
# Web page
-client.memories.add(
+client.add(
content="https://example.com/article",
container_tag="articles"
)
# YouTube video (auto-transcribed)
-client.memories.add(
+client.add(
content="https://youtube.com/watch?v=dQw4w9WgXcQ",
container_tag="videos"
)
# Google Docs
-client.memories.add(
+client.add(
content="https://docs.google.com/document/d/abc123/edit",
container_tag="docs"
)
@@ -246,7 +246,7 @@ const markdown = `
- **Enterprise security**
`;
-await client.memories.add({
+await client.add({
content: markdown,
containerTag: "docs"
});
@@ -262,7 +262,7 @@ markdown = """
- **Enterprise security**
"""
-client.memories.add(
+client.add(
content=markdown,
container_tag="docs"
)
diff --git a/apps/docs/add-memories/examples/file-upload.mdx b/apps/docs/add-memories/examples/file-upload.mdx
index b07a44b6..7d79d36e 100644
--- a/apps/docs/add-memories/examples/file-upload.mdx
+++ b/apps/docs/add-memories/examples/file-upload.mdx
@@ -14,7 +14,7 @@ Extract text from PDFs with OCR support.
```typescript TypeScript
const file = fs.createReadStream('document.pdf');
-const response = await client.memories.uploadFile({
+const response = await client.documents.uploadFile({
file: file,
containerTags: 'documents'
});
@@ -25,7 +25,7 @@ console.log(response.id);
```python Python
with open('document.pdf', 'rb') as file:
- response = client.memories.upload_file(
+ response = client.documents.upload_file(
file=file,
container_tags='documents'
)
@@ -54,7 +54,7 @@ Extract text from images.
```typescript TypeScript
const image = fs.createReadStream('screenshot.png');
-await client.memories.uploadFile({
+await client.documents.uploadFile({
file: image,
containerTags: 'images'
});
@@ -62,7 +62,7 @@ await client.memories.uploadFile({
```python Python
with open('screenshot.png', 'rb') as file:
- client.memories.upload_file(
+ client.documents.upload_file(
file=file,
container_tags='images'
)
@@ -134,7 +134,7 @@ Batch upload with rate limiting.
for (const file of files) {
const stream = fs.createReadStream(file);
- await client.memories.uploadFile({
+ await client.documents.uploadFile({
file: stream,
containerTags: 'batch'
});
@@ -149,7 +149,7 @@ import time
for file_path in files:
with open(file_path, 'rb') as file:
- client.memories.upload_file(
+ client.documents.upload_file(
file=file,
container_tags='batch'
)
diff --git a/apps/docs/add-memories/overview.mdx b/apps/docs/add-memories/overview.mdx
index 28778b0e..5b07381b 100644
--- a/apps/docs/add-memories/overview.mdx
+++ b/apps/docs/add-memories/overview.mdx
@@ -53,7 +53,7 @@ client = Supermemory(
```typescript TypeScript
// Add text content
-const result = await client.memories.add({
+const result = await client.add({
content: "Machine learning enables computers to learn from data",
containerTag: "ai-research",
metadata: { priority: "high" }
@@ -65,7 +65,7 @@ console.log(result);
```python Python
# Add text content
-result = client.memories.add(
+result = client.add(
content="Machine learning enables computers to learn from data",
container_tags=["ai-research"],
metadata={"priority": "high"}
@@ -125,14 +125,14 @@ Add text content, URLs, or any supported format.
<CodeGroup>
```typescript TypeScript
-await client.memories.add({
+await client.add({
content: "Your content here",
containerTag: "project"
});
```
```python Python
-client.memories.add(
+client.add(
content="Your content here",
container_tags=["project"]
)
@@ -156,14 +156,14 @@ Upload files directly for processing.
<CodeGroup>
```typescript TypeScript
-await client.memories.uploadFile({
+await client.documents.uploadFile({
file: fileStream,
containerTag: "project"
});
```
```python Python
-client.memories.upload_file(
+client.documents.upload_file(
file=open('file.pdf', 'rb'),
container_tags='project'
)
@@ -187,13 +187,13 @@ Update existing document content.
<CodeGroup>
```typescript TypeScript
-await client.memories.update("doc_id", {
+await client.documents.update("doc_id", {
content: "Updated content"
});
```
```python Python
-client.memories.update("doc_id", {
+client.documents.update("doc_id", {
"content": "Updated content"
})
```
@@ -245,7 +245,5 @@ curl -X PATCH "https://api.supermemory.ai/v3/documents/doc_id" \
## Next Steps
-- [Track Processing Status](/memory-api/track-progress) - Monitor document processing
-- [Search Memories](/search/overview) - Search your content
-- [List Memories](/list-memories/overview) - Browse stored memories
-- [Update & Delete](/update-delete-memories/overview) - Manage memories
+- [Memory Operations](/memory-operations) - Track status, list, update, and delete memories
+- [Search Memories](/search) - Search your content
diff --git a/apps/docs/ai-sdk/examples.mdx b/apps/docs/ai-sdk/examples.mdx
index 61ad8d50..9eabbae9 100644
--- a/apps/docs/ai-sdk/examples.mdx
+++ b/apps/docs/ai-sdk/examples.mdx
@@ -4,7 +4,7 @@ description: "Complete examples showing how to use Supermemory with Vercel AI SD
sidebarTitle: "Examples"
---
-This page provides comprehensive examples of using Supermemory with the Vercel AI SDK, covering both Memory Tools and Infinite Chat approaches.
+This page provides comprehensive examples of using Supermemory with the Vercel AI SDK, covering Memory Tools and User Profiles approaches.
## Personal Assistant with Memory Tools
@@ -114,62 +114,6 @@ export async function POST(request: Request) {
}
```
-## Infinite Chat for Documentation
-
-Create a documentation assistant with unlimited context:
-
-<CodeGroup>
-
-```typescript Documentation Chat
-import { streamText } from 'ai'
-
-const supermemoryInfiniteChat = createOpenAI({
- baseUrl: 'https://api.supermemory.ai/v3/https://api.openai.com/v1',
- apiKey: 'your-provider-api-key',
- headers: {
- 'x-supermemory-api-key': 'supermemory-api-key',
- 'x-sm-conversation-id': 'conversation-id'
- }
-})
-
-export async function POST(request: Request) {
- const { messages } = await request.json()
-
- const result = await streamText({
- model: supermemoryInfiniteChat('gpt-5'),
- messages,
- system: `You are a documentation assistant. You have access to all previous
- conversations and can reference earlier discussions. Help users understand
- the documentation by building on previous context.`
- })
-
- return result.toAIStreamResponse()
-}
-```
-
-```typescript Upload Documentation
-// Separate endpoint to upload documentation to memory
-import { addMemory } from '@supermemory/tools'
-
-export async function POST(request: Request) {
- const { content, title, url } = await request.json()
-
- const memory = await addMemory({
- apiKey: process.env.SUPERMEMORY_API_KEY!,
- content,
- title,
- url,
- headers: {
- 'x-sm-conversation-id': 'documentation'
- }
- })
-
- return Response.json({ success: true, memory })
-}
-```
-
-</CodeGroup>
-
## Multi-User Learning Assistant
Build an assistant that learns from multiple users but keeps data separate:
@@ -394,12 +338,6 @@ ANTHROPIC_API_KEY=your_anthropic_key
- Use project headers to separate different use cases
- Implement error handling for tool failures
-### Infinite Chat
-- Use conversation IDs to maintain separate chat contexts
-- Include user IDs for personalized experiences
-- Test with different providers to find the best fit for your use case
-- Monitor token usage for cost optimization
-
### General Tips
- Start with simple examples and gradually add complexity
- Use the search functionality to avoid duplicate memories
@@ -413,7 +351,7 @@ ANTHROPIC_API_KEY=your_anthropic_key
Advanced memory management with full API control
</Card>
- <Card title="Memory Router" icon="route" href="/memory-router/overview">
- Drop-in proxy for existing LLM applications
+ <Card title="User Profiles" icon="user" href="/user-profiles">
+ Automatic personalization with user profiles
</Card>
</CardGroup>
diff --git a/apps/docs/ai-sdk/infinite-chat.mdx b/apps/docs/ai-sdk/infinite-chat.mdx
index c382bcbf..4d67a86d 100644
--- a/apps/docs/ai-sdk/infinite-chat.mdx
+++ b/apps/docs/ai-sdk/infinite-chat.mdx
@@ -206,7 +206,7 @@ const infiniteChat = createOpenAI({
## Next Steps
<CardGroup cols={2}>
- <Card title="Memory Tools" icon="wrench" href="/ai-sdk/memory-tools">
+ <Card title="Memory Tools" icon="wrench" href="/integrations/ai-sdk">
Explore explicit memory control
</Card>
diff --git a/apps/docs/ai-sdk/memory-tools.mdx b/apps/docs/ai-sdk/memory-tools.mdx
index cc84097f..f48d04f7 100644
--- a/apps/docs/ai-sdk/memory-tools.mdx
+++ b/apps/docs/ai-sdk/memory-tools.mdx
@@ -137,8 +137,8 @@ Each tool returns a result object:
## Next Steps
<CardGroup cols={2}>
- <Card title="Infinite Chat" icon="infinity" href="/ai-sdk/infinite-chat">
- Try automatic memory management
+ <Card title="User Profiles" icon="user" href="/integrations/ai-sdk">
+ Automatic personalization with profiles
</Card>
<Card title="Examples" icon="code" href="/cookbook/ai-sdk-integration">
diff --git a/apps/docs/ai-sdk/overview.mdx b/apps/docs/ai-sdk/overview.mdx
index 07d70e29..0c9a48f4 100644
--- a/apps/docs/ai-sdk/overview.mdx
+++ b/apps/docs/ai-sdk/overview.mdx
@@ -4,7 +4,7 @@ description: "Use Supermemory with Vercel AI SDK for seamless memory management"
sidebarTitle: "Overview"
---
-The Supermemory AI SDK provides native integration with Vercel's AI SDK through three approaches: **User Profiles** for automatic personalization, **Memory Tools** for agent-based interactions, and **Infinite Chat** for automatic context management.
+The Supermemory AI SDK provides native integration with Vercel's AI SDK through two approaches: **User Profiles** for automatic personalization and **Memory Tools** for agent-based interactions.
<Card title="Supermemory tools on npm" icon="npm" href="https://www.npmjs.com/package/@supermemory/tools">
Check out the NPM page for more details
@@ -68,50 +68,21 @@ const result = await streamText({
})
```
-## Infinite Chat
-
-Automatic memory management for chat applications with unlimited context.
-
-```typescript
-import { streamText } from "ai"
-
-const infiniteChat = createAnthropic({
- baseUrl: 'https://api.supermemory.ai/v3/https://api.anthropic.com/v1',
- apiKey: 'your-provider-api-key',
- headers: {
- 'x-supermemory-api-key': 'supermemory-api-key',
- 'x-sm-conversation-id': 'conversation-id'
- }
-})
-
-const result = await streamText({
- model: infiniteChat("claude-3-sonnet"),
- messages: [
- { role: "user", content: "What's my name?" }
- ]
-})
-```
-
## When to Use
| Approach | Use Case |
|----------|----------|
| User Profiles | Personalized LLM responses with automatic user context |
| Memory Tools | AI agents that need explicit memory control |
-| Infinite Chat | Chat applications with automatic context |
## Next Steps
-<CardGroup cols={3}>
- <Card title="User Profiles" icon="user" href="/ai-sdk/user-profiles">
+<CardGroup cols={2}>
+ <Card title="User Profiles" icon="user" href="/integrations/ai-sdk">
Automatic personalization with profiles
</Card>
- <Card title="Memory Tools" icon="wrench" href="/ai-sdk/memory-tools">
+ <Card title="Memory Tools" icon="wrench" href="/integrations/ai-sdk">
Agent-based memory management
</Card>
-
- <Card title="Infinite Chat" icon="infinity" href="/ai-sdk/infinite-chat">
- Automatic context management
- </Card>
</CardGroup>
diff --git a/apps/docs/ai-sdk/user-profiles.mdx b/apps/docs/ai-sdk/user-profiles.mdx
index fcfe4d7a..3afc41e0 100644
--- a/apps/docs/ai-sdk/user-profiles.mdx
+++ b/apps/docs/ai-sdk/user-profiles.mdx
@@ -289,7 +289,7 @@ The AI SDK middleware abstracts away the complexity of manual profile management
Understand how profiles work conceptually
</Card>
- <Card title="Memory Tools" icon="wrench" href="/ai-sdk/memory-tools">
+ <Card title="Memory Tools" icon="wrench" href="/integrations/ai-sdk">
Add explicit memory operations to your agents
</Card>
diff --git a/apps/docs/concepts/content-types.mdx b/apps/docs/concepts/content-types.mdx
new file mode 100644
index 00000000..473fad67
--- /dev/null
+++ b/apps/docs/concepts/content-types.mdx
@@ -0,0 +1,227 @@
+---
+title: "Supported Content Types"
+sidebarTitle: "Content Types"
+description: "All the content formats Supermemory can ingest and process"
+icon: "file-stack"
+---
+
+Supermemory automatically extracts and indexes content from various formats. Just send it—we handle the rest. See [Add Memories](/add-memories) to learn how to ingest content via the API.
+
+## Text Content
+
+Raw text, conversations, notes, or any string content.
+
+```typescript
+await client.add({
+ content: "User prefers dark mode and uses vim keybindings",
+ containerTags: ["user_123"]
+});
+```
+
+**Best for:** Chat messages, user preferences, notes, logs, transcripts.
+
+---
+
+## URLs & Web Pages
+
+Send a URL and Supermemory fetches, extracts, and indexes the content.
+
+```typescript
+await client.add({
+ content: "https://docs.example.com/api-reference",
+ containerTags: ["documentation"]
+});
+```
+
+**Extracts:** Article text, headings, metadata. Strips navigation, ads, boilerplate.
+
+---
+
+## Documents
+
+### PDF
+
+```typescript
+await client.add({
+ content: pdfBase64,
+ contentType: "pdf",
+ title: "Q4 Financial Report"
+});
+```
+
+**Extracts:** Text, tables, headers. OCR for scanned documents.
+
+### Microsoft Office
+
+| Format | Extension | Content Type |
+|--------|-----------|--------------|
+| Word | `.docx` | `docx` |
+| Excel | `.xlsx` | `xlsx` |
+| PowerPoint | `.pptx` | `pptx` |
+
+```typescript
+await client.add({
+ content: docxBase64,
+ contentType: "docx",
+ title: "Product Roadmap"
+});
+```
+
+### Google Workspace
+
+Automatically handled via [Google Drive connector](/connectors/google-drive):
+- Google Docs
+- Google Sheets
+- Google Slides
+
+---
+
+## Code & Markdown
+
+```typescript
+// Markdown
+await client.add({
+ content: markdownContent,
+ contentType: "md",
+ title: "README.md"
+});
+
+// Code files (auto-detected language)
+await client.add({
+ content: codeContent,
+ contentType: "code",
+ metadata: { language: "typescript" }
+});
+```
+
+**Extracts:** Structure, headings, code blocks with syntax awareness.
+
+Code is chunked using [code-chunk](https://github.com/supermemoryai/code-chunk), which understands AST boundaries to keep functions, classes, and logical blocks intact. See [Super RAG](/concepts/super-rag) for how Supermemory optimizes chunking for each content type.
+
+---
+
+## Images
+
+```typescript
+await client.add({
+ content: imageBase64,
+ contentType: "image",
+ title: "Architecture Diagram"
+});
+```
+
+**Extracts:** OCR text, visual descriptions, diagram interpretations.
+
+**Supported:** PNG, JPG, JPEG, WebP, GIF
+
+---
+
+## Audio & Video
+
+```typescript
+// Audio
+await client.add({
+ content: audioBase64,
+ contentType: "audio",
+ title: "Customer Call Recording"
+});
+
+// Video
+await client.add({
+ content: videoBase64,
+ contentType: "video",
+ title: "Product Demo"
+});
+```
+
+**Extracts:** Transcription, speaker detection, topic segmentation.
+
+**Supported:** MP3, WAV, M4A, MP4, WebM
+
+---
+
+## Structured Data
+
+### JSON
+
+```typescript
+await client.add({
+ content: JSON.stringify(userData),
+ contentType: "json",
+ title: "User Profile Data"
+});
+```
+
+### CSV
+
+```typescript
+await client.add({
+ content: csvContent,
+ contentType: "csv",
+ title: "Sales Data Q4"
+});
+```
+
+---
+
+## File Upload
+
+For binary files, encode as base64:
+
+```typescript
+import { readFileSync } from 'fs';
+
+const file = readFileSync('./document.pdf');
+const base64 = file.toString('base64');
+
+await client.add({
+ content: base64,
+ contentType: "pdf",
+ title: "document.pdf"
+});
+```
+
+---
+
+## Auto-Detection
+
+If you don't specify `contentType`, Supermemory auto-detects:
+
+```typescript
+// URL detected automatically
+await client.add({ content: "https://example.com/page" });
+
+// Plain text detected automatically
+await client.add({ content: "User said they prefer email contact" });
+```
+
+<Note>
+For binary content (files), always specify `contentType` for reliable processing.
+</Note>
+
+---
+
+## Content Limits
+
+| Type | Max Size |
+|------|----------|
+| Text | 1MB |
+| Files | 50MB |
+| URLs | Fetched content up to 10MB |
+
+<Tip>
+For large files, consider chunking or using [connectors](/connectors/overview) for automatic sync.
+</Tip>
+
+---
+
+## Next Steps
+
+<CardGroup cols={2}>
+ <Card title="Add Memories" icon="plus" href="/add-memories">
+ Upload content via the API
+ </Card>
+ <Card title="Super RAG" icon="bolt" href="/concepts/super-rag">
+ How content is chunked and indexed
+ </Card>
+</CardGroup>
diff --git a/apps/docs/concepts/customization.mdx b/apps/docs/concepts/customization.mdx
new file mode 100644
index 00000000..6107dd60
--- /dev/null
+++ b/apps/docs/concepts/customization.mdx
@@ -0,0 +1,172 @@
+---
+title: "Customizing for Your Use Case"
+sidebarTitle: "Customization"
+description: "Configure Supermemory's behavior for your specific application"
+icon: "settings-2"
+---
+
+Configure how Supermemory processes and retrieves content for your specific use case.
+
+## Filter Prompts
+
+Tell Supermemory what content matters during ingestion. This helps filter and prioritize what gets indexed.
+
+```typescript
+// Example: Brand guidelines assistant
+await client.settings.update({
+ shouldLLMFilter: true,
+ filterPrompt: `You are ingesting content for Brand.ai's brand guidelines system.
+
+ Index:
+ - Official brand values and mission statements
+ - Approved tone of voice guidelines
+ - Logo usage and visual identity docs
+ - Approved messaging and taglines
+
+ Skip:
+ - Draft documents and work-in-progress
+ - Outdated brand materials (pre-2024)
+ - Internal discussions about brand changes
+ - Competitor analysis docs`
+});
+```
+
+<AccordionGroup>
+ <Accordion title="Personal Assistant">
+ ```typescript
+ filterPrompt: `Personal AI assistant. Prioritize recent content, action items,
+ and personal context. Exclude spam and duplicates.`
+ ```
+ </Accordion>
+ <Accordion title="Customer Support">
+ ```typescript
+ filterPrompt: `Customer support agent. Prioritize verified solutions, official docs,
+ and resolved tickets. Exclude internal discussions and PII.`
+ ```
+ </Accordion>
+ <Accordion title="Legal Assistant">
+ ```typescript
+ filterPrompt: `Legal research assistant. Prioritize precedents, current regulations,
+ and approved contract language. Exclude privileged communications.`
+ ```
+ </Accordion>
+ <Accordion title="Finance Agent">
+ ```typescript
+ filterPrompt: `Financial analysis assistant. Prioritize latest reports, verified data,
+ and regulatory filings. Exclude speculative data and MNPI.`
+ ```
+ </Accordion>
+ <Accordion title="Healthcare">
+ ```typescript
+ filterPrompt: `Healthcare information assistant. Prioritize evidence-based guidelines
+ and FDA-approved info. Exclude PHI and outdated recommendations.`
+ ```
+ </Accordion>
+ <Accordion title="Developer Docs">
+ ```typescript
+ filterPrompt: `Developer documentation assistant. Prioritize current APIs, working
+ examples, and best practices. Exclude deprecated APIs and test fixtures.`
+ ```
+ </Accordion>
+</AccordionGroup>
+
+---
+
+## Chunk Size
+
+Control how documents are split into searchable pieces. Smaller chunks = more precise retrieval but less context per result.
+
+```typescript
+await client.settings.update({
+ chunkSize: 512 // -1 for default
+});
+```
+
+| Use Case | Chunk Size | Why |
+|----------|------------|-----|
+| Citations & references | `256-512` | Precise source attribution |
+| Q&A / Support | `512-1024` | Balanced context |
+| Long-form analysis | `1024-2048` | More context per chunk |
+| Default | `-1` | Supermemory's optimized default |
+
+<Note>
+Smaller chunks generate more memories per document. Larger chunks provide more context but may reduce precision.
+</Note>
+
+---
+
+## Connector Branding
+
+Show "Log in to **YourApp**" instead of "Log in to Supermemory" when users connect external services. See [Connectors Overview](/connectors/overview) for the full list of supported integrations.
+
+<AccordionGroup>
+ <Accordion title="Google Drive">
+ 1. Create OAuth credentials in [Google Cloud Console](https://console.cloud.google.com/)
+ 2. Redirect URI: `https://api.supermemory.ai/v3/connections/google-drive/callback`
+
+ ```typescript
+ await client.settings.update({
+ googleDriveCustomKeyEnabled: true,
+ googleDriveClientId: "your-client-id.apps.googleusercontent.com",
+ googleDriveClientSecret: "your-client-secret"
+ });
+ ```
+ </Accordion>
+ <Accordion title="Notion">
+ 1. Create integration at [Notion Developers](https://developers.notion.com/)
+ 2. Redirect URI: `https://api.supermemory.ai/v3/connections/notion/callback`
+
+ ```typescript
+ await client.settings.update({
+ notionCustomKeyEnabled: true,
+ notionClientId: "your-notion-client-id",
+ notionClientSecret: "your-notion-client-secret"
+ });
+ ```
+ </Accordion>
+ <Accordion title="OneDrive">
+ 1. Register app in [Azure Portal](https://portal.azure.com/)
+ 2. Redirect URI: `https://api.supermemory.ai/v3/connections/onedrive/callback`
+
+ ```typescript
+ await client.settings.update({
+ onedriveCustomKeyEnabled: true,
+ onedriveClientId: "your-azure-app-id",
+ onedriveClientSecret: "your-azure-client-secret"
+ });
+ ```
+ </Accordion>
+</AccordionGroup>
+
+---
+
+## API Reference
+
+```typescript
+// Get current settings
+const settings = await client.settings.get();
+
+// Update settings
+await client.settings.update({
+ shouldLLMFilter: true,
+ filterPrompt: "...",
+ chunkSize: 512
+});
+```
+
+<Note>
+Settings are organization-wide. Changes apply to new content only—existing memories aren't reprocessed.
+</Note>
+
+---
+
+## Next Steps
+
+<CardGroup cols={2}>
+ <Card title="Add Memories" icon="plus" href="/add-memories">
+ See your custom settings in action
+ </Card>
+ <Card title="Connectors" icon="plug" href="/connectors/overview">
+ Set up automatic syncing from external platforms
+ </Card>
+</CardGroup>
diff --git a/apps/docs/concepts/filtering.mdx b/apps/docs/concepts/filtering.mdx
new file mode 100644
index 00000000..d222e849
--- /dev/null
+++ b/apps/docs/concepts/filtering.mdx
@@ -0,0 +1,359 @@
+---
+title: "Organizing & Filtering Memories"
+sidebarTitle: "Multi-Tenancy / Filtering"
+description: "Use container tags and metadata to organize and retrieve memories"
+icon: "users"
+---
+
+Supermemory provides two ways to organize your memories:
+
+<CardGroup cols={2}>
+ <Card title="Container Tags" icon="folder">
+ **Organize memories** into isolated spaces by user, project, or workspace
+ </Card>
+ <Card title="Metadata Filtering" icon="database">
+ **Query memories** by custom properties like category, status, or date
+ </Card>
+</CardGroup>
+
+Both can be used independently or together for precise filtering.
+
+---
+
+## Container Tags
+
+Container tags create isolated memory spaces. Use them to separate memories by user, project, or any logical boundary.
+
+### Adding Memories with Tags
+
+```typescript
+await client.add({
+ content: "Meeting notes from Q1 planning",
+ containerTags: ["user_123"]
+});
+```
+
+### Searching with Tags
+
+```typescript
+const results = await client.search.documents({
+ q: "planning notes",
+ containerTags: ["user_123"]
+});
+```
+
+<Note>
+Container tags use **exact array matching**. A memory tagged `["user_123", "project_a"]` won't match a search for just `["user_123"]`.
+</Note>
+
+### Recommended Patterns
+
+| Pattern | Example | Use Case |
+|---------|---------|----------|
+| User isolation | `user_{userId}` | Per-user memories |
+| Project grouping | `project_{projectId}` | Project-specific content |
+| Hierarchical | `org_{orgId}_team_{teamId}` | Multi-level organization |
+
+<AccordionGroup>
+ <Accordion title="More Container Tag Examples">
+ ```typescript
+ // Multi-tenant SaaS - isolate by organization and user
+ await client.add({
+ content: "Company policy document",
+ containerTags: ["org_acme_user_john"]
+ });
+
+ // Search only within that user's org context
+ const results = await client.search.documents({
+ q: "vacation policy",
+ containerTags: ["org_acme_user_john"]
+ });
+
+ // Project-based isolation
+ await client.add({
+ content: "Sprint 5 retrospective notes",
+ containerTags: ["project_mobile_app"]
+ });
+
+ // Time-based segmentation
+ await client.add({
+ content: "Q1 2024 financial report",
+ containerTags: ["user_cfo_2024_q1"]
+ });
+ ```
+
+ **API field differences:**
+ | Endpoint | Field | Type |
+ |----------|-------|------|
+ | `/v3/search` | `containerTags` | Array |
+ | `/v4/search` | `containerTag` | String |
+ | `/v3/documents/list` | `containerTags` | Array |
+ </Accordion>
+</AccordionGroup>
+
+---
+
+## Metadata
+
+Metadata lets you attach custom properties to memories and filter by them later.
+
+### Adding Memories with Metadata
+
+```typescript
+await client.add({
+ content: "Technical design document for auth system",
+ containerTags: ["user_123"],
+ metadata: {
+ category: "engineering",
+ priority: "high",
+ year: 2024
+ }
+});
+```
+
+### Searching with Metadata Filters
+
+Filters must be wrapped in `AND` or `OR` arrays:
+
+```typescript
+const results = await client.search.documents({
+ q: "design document",
+ containerTags: ["user_123"],
+ filters: {
+ AND: [
+ { key: "category", value: "engineering" },
+ { key: "priority", value: "high" }
+ ]
+ }
+});
+```
+
+### Filter Types
+
+| Type | Example | Description |
+|------|---------|-------------|
+| String equality | `{ key: "status", value: "published" }` | Exact match |
+| String contains | `{ filterType: "string_contains", key: "title", value: "react" }` | Substring match |
+| Numeric | `{ filterType: "numeric", key: "priority", value: "5", numericOperator: ">=" }` | Number comparison |
+| Array contains | `{ filterType: "array_contains", key: "tags", value: "important" }` | Check array membership |
+
+### Combining Filters
+
+Use `AND` and `OR` for complex queries:
+
+```typescript
+const results = await client.search.documents({
+ q: "meeting notes",
+ filters: {
+ AND: [
+ { key: "type", value: "meeting" },
+ {
+ OR: [
+ { key: "team", value: "engineering" },
+ { key: "team", value: "product" }
+ ]
+ }
+ ]
+ }
+});
+```
+
+### Excluding Results
+
+Use `negate: true` to exclude matches:
+
+```typescript
+const results = await client.search.documents({
+ q: "documentation",
+ filters: {
+ AND: [
+ { key: "status", value: "draft", negate: true }
+ ]
+ }
+});
+```
+
+<AccordionGroup>
+ <Accordion title="More Metadata Filter Examples">
+ **String contains (substring search):**
+ ```typescript
+ // Find documents with "machine learning" in the description
+ const results = await client.search.documents({
+ q: "AI research",
+ filters: {
+ AND: [
+ {
+ filterType: "string_contains",
+ key: "description",
+ value: "machine learning",
+ ignoreCase: true
+ }
+ ]
+ }
+ });
+ ```
+
+ **Numeric comparisons:**
+ ```typescript
+ // Find high-priority items created after a specific date
+ const results = await client.search.documents({
+ q: "tasks",
+ filters: {
+ AND: [
+ {
+ filterType: "numeric",
+ key: "priority",
+ value: "7",
+ numericOperator: ">="
+ },
+ {
+ filterType: "numeric",
+ key: "created_timestamp",
+ value: "1704067200", // Unix timestamp
+ numericOperator: ">="
+ }
+ ]
+ }
+ });
+ ```
+
+ **Array contains (check array membership):**
+ ```typescript
+ // Find documents where a specific user is a participant
+ const results = await client.search.documents({
+ q: "meeting notes",
+ filters: {
+ AND: [
+ {
+ filterType: "array_contains",
+ key: "participants",
+ }
+ ]
+ }
+ });
+ ```
+
+ **Complex nested filters:**
+ ```typescript
+ // (category = "tech" OR category = "science") AND status != "archived"
+ const results = await client.search.documents({
+ q: "research papers",
+ filters: {
+ AND: [
+ {
+ OR: [
+ { key: "category", value: "tech" },
+ { key: "category", value: "science" }
+ ]
+ },
+ { key: "status", value: "archived", negate: true }
+ ]
+ }
+ });
+ ```
+
+ **Numeric operator negation mapping:**
+ When using `negate: true`, operators flip:
+ - `<` becomes `>=`
+ - `<=` becomes `>`
+ - `>` becomes `<=`
+ - `>=` becomes `<`
+ - `=` becomes `!=`
+ </Accordion>
+
+ <Accordion title="Real-World Patterns">
+ **User's work documents from 2024:**
+ ```typescript
+ const results = await client.search.documents({
+ q: "quarterly report",
+ containerTags: ["user_123"],
+ filters: {
+ AND: [
+ { key: "category", value: "work" },
+ { key: "type", value: "report" },
+ { filterType: "numeric", key: "year", value: "2024", numericOperator: "=" }
+ ]
+ }
+ });
+ ```
+
+ **Team meeting notes with specific participants:**
+ ```typescript
+ const results = await client.search.documents({
+ q: "sprint planning",
+ containerTags: ["project_alpha"],
+ filters: {
+ AND: [
+ { key: "type", value: "meeting" },
+ {
+ OR: [
+ { filterType: "array_contains", key: "participants", value: "alice" },
+ { filterType: "array_contains", key: "participants", value: "bob" }
+ ]
+ }
+ ]
+ }
+ });
+ ```
+
+ **Exclude drafts and deprecated content:**
+ ```typescript
+ const results = await client.search.documents({
+ q: "documentation",
+ filters: {
+ AND: [
+ { key: "status", value: "draft", negate: true },
+ { filterType: "string_contains", key: "content", value: "deprecated", negate: true },
+ { filterType: "array_contains", key: "tags", value: "archived", negate: true }
+ ]
+ }
+ });
+ ```
+ </Accordion>
+</AccordionGroup>
+
+---
+
+## Quick Reference
+
+### When Adding Memories
+
+```typescript
+await client.add({
+ content: "Your content here",
+ containerTags: ["user_123"], // Isolation
+ metadata: { key: "value" } // Custom properties
+});
+```
+
+### When Searching
+
+```typescript
+const results = await client.search.documents({
+ q: "search query",
+ containerTags: ["user_123"], // Must match exactly
+ filters: { // Optional metadata filters
+ AND: [{ key: "status", value: "published" }]
+ }
+});
+```
+
+### Metadata Key Rules
+
+- Allowed characters: `a-z`, `A-Z`, `0-9`, `_`, `-`, `.`
+- Max length: 64 characters
+- No spaces or special characters
+
+---
+
+## Next Steps
+
+<CardGroup cols={2}>
+ <Card title="Search" icon="search" href="/search">
+ Apply filters in search queries
+ </Card>
+ <Card title="Add Memories" icon="plus" href="/add-memories">
+ Add content with container tags and metadata
+ </Card>
+</CardGroup>
diff --git a/apps/docs/concepts/graph-memory.mdx b/apps/docs/concepts/graph-memory.mdx
new file mode 100644
index 00000000..9080ce4f
--- /dev/null
+++ b/apps/docs/concepts/graph-memory.mdx
@@ -0,0 +1,146 @@
+---
+title: "How Graph Memory Works"
+sidebarTitle: "Graph Memory"
+description: "Automatic memory evolution, knowledge updates, and intelligent forgetting"
+icon: "vector-square"
+---
+
+Supermemory builds a living knowledge graph where memories connect to other memories. Unlike traditional knowledge graphs with entity-relation-entity triples, Supermemory's graph is **facts built on top of other facts**.
+
+## Memory Relationships
+
+When you add content, Supermemory extracts facts and automatically connects them to existing memories through three relationship types:
+
+### Updates: Information Changes
+
+When new information contradicts existing knowledge:
+
+```
+Memory 1: "Alex works at Google as a software engineer"
+Memory 2: "Alex just started at Stripe as a PM"
+ ↓
+Memory 2 UPDATES Memory 1
+```
+
+The system tracks which memory is latest with `isLatest`, so searches return current information while preserving history.
+
+### Extends: Information Enriches
+
+When new information adds detail without replacing:
+
+```
+Memory 1: "Alex works at Stripe as a PM"
+Memory 2: "Alex focuses on payments infrastructure and leads a team of 5"
+ ↓
+Memory 2 EXTENDS Memory 1
+```
+
+Both memories remain valid—searches get richer context.
+
+### Derives: Information Infers
+
+When Supermemory infers new facts from patterns:
+
+```
+Memory 1: "Alex is a PM at Stripe"
+Memory 2: "Alex frequently discusses payment APIs and fraud detection"
+ ↓
+Derived: "Alex likely works on Stripe's core payments product"
+```
+
+These inferences surface insights you didn't explicitly state.
+
+---
+
+## Automatic Memory Extraction
+
+From a single conversation, Supermemory extracts multiple connected memories:
+
+**Input:**
+> "Had a great call with Alex. He's enjoying the new PM role at Stripe, though the
+> payments infrastructure work is intense. He moved to Seattle for the job—got a
+> place in Capitol Hill. Wants to grab dinner next time I'm in town."
+
+**Extracted memories:**
+- Alex works at Stripe as a PM
+- Alex works on payments infrastructure *(extends role memory)*
+- Alex lives in Seattle, Capitol Hill *(new fact)*
+- Alex wants to meet for dinner *(episodic)*
+
+Each fact is connected to related memories automatically.
+
+---
+
+## Automatic Forgetting
+
+Supermemory knows when memories become irrelevant:
+
+**Time-based forgetting**: Temporary facts are automatically forgotten when they expire.
+
+```
+"I have an exam tomorrow"
+ ↓
+ After the exam date passes → automatically forgotten
+
+"Meeting with Alex at 3pm today"
+ ↓
+ After today → automatically forgotten
+```
+
+**Contradiction resolution**: When new facts contradict old ones, the Update relationship ensures searches return current information.
+
+**Noise filtering**: Casual, non-meaningful content doesn't become permanent memories.
+
+---
+
+## Memory Types
+
+Supermemory distinguishes memory types automatically:
+
+| Type | Example | Behavior |
+|------|---------|----------|
+| **Facts** | "Alex is a PM at Stripe" | Persists until updated |
+| **Preferences** | "Alex prefers morning meetings" | Strengthens with repetition |
+| **Episodes** | "Met Alex for coffee Tuesday" | Decays unless significant |
+
+---
+
+## What You Don't Do
+
+All of this is automatic. You don't:
+- Define relationships manually
+- Tag memory types
+- Clean up old memories
+- Resolve contradictions
+
+Just add content and search naturally:
+
+```typescript
+await client.add({
+ content: "Alex mentioned he just started at Stripe"
+});
+
+const results = await client.search({
+ query: "where does Alex work?"
+});
+// → Stripe (latest), previously Google (historical)
+```
+
+---
+
+## Learn More
+
+<CardGroup cols={2}>
+ <Card title="How It Works" icon="cpu" href="/concepts/how-it-works">
+ Deep dive into the architecture
+ </Card>
+ <Card title="Memory vs RAG" icon="scale" href="/concepts/memory-vs-rag">
+ When to use memory vs document retrieval
+ </Card>
+ <Card title="User Profiles" icon="user" href="/user-profiles">
+ Automatic summaries from the graph
+ </Card>
+ <Card title="Add Memories" icon="plus" href="/add-memories">
+ Start building your knowledge graph
+ </Card>
+</CardGroup>
diff --git a/apps/docs/how-it-works.mdx b/apps/docs/concepts/how-it-works.mdx
index 318324b1..9404347d 100644
--- a/apps/docs/how-it-works.mdx
+++ b/apps/docs/concepts/how-it-works.mdx
@@ -1,7 +1,7 @@
---
title: "How Supermemory Works"
description: "Understanding the knowledge graph architecture that powers intelligent memory"
-icon: "brain"
+icon: "cpu"
---
@@ -42,7 +42,7 @@ Documents are what you provide - the raw materials:
- Images with text
- Videos to transcribe
-Think of documents as books you hand to Supermemory.
+Think of documents as books you hand to Supermemory. See [Content Types](/concepts/content-types) for the full list of supported formats.
### Memories: Intelligent Knowledge Units
@@ -63,7 +63,7 @@ Think of memories as the insights and connections your brain makes after reading
![](/images/memories-inferred.png)
-The graph connects memories through three types of relationships:
+The graph connects memories through three types of relationships. For a deeper dive into how these relationships work, see [Graph Memory](/concepts/graph-memory).
### Updates: Information Changes
@@ -142,11 +142,11 @@ Understanding the pipeline helps you optimize your usage:
Now that you understand how Supermemory works:
<CardGroup cols={2}>
- <Card title="Add Memories" icon="plus" href="/add-memories/overview">
+ <Card title="Add Memories" icon="plus" href="/add-memories">
Start adding content to your knowledge graph
</Card>
- <Card title="Search Memories" icon="search" href="/search/overview">
+ <Card title="Search Memories" icon="search" href="/search">
Learn to query your knowledge effectively
</Card>
</CardGroup>
diff --git a/apps/docs/memory-vs-rag.mdx b/apps/docs/concepts/memory-vs-rag.mdx
index 699b906d..bc08e94f 100644
--- a/apps/docs/memory-vs-rag.mdx
+++ b/apps/docs/concepts/memory-vs-rag.mdx
@@ -2,6 +2,7 @@
title: "Memory vs RAG: Understanding the Difference"
description: "Learn why agent memory and RAG are fundamentally different, and when to use each approach"
sidebarTitle: "Memory vs RAG"
+icon: "scale"
---
Most developers confuse RAG (Retrieval-Augmented Generation) with agent memory. They're not the same thing, and using RAG for memory is why your agents keep forgetting important context. Let's understand the fundamental difference.
@@ -193,7 +194,7 @@ Supermemory provides a unified platform that correctly handles both patterns:
### 1. Document Storage (RAG)
```python
# Add a document for RAG-style retrieval
-client.memories.add(
+client.add(
content="iPhone 15 has a 48MP camera and A17 Pro chip",
# No user association - universal knowledge
)
@@ -202,7 +203,7 @@ client.memories.add(
### 2. Memory Creation
```python
# Add a user-specific memory
-client.memories.add(
+client.add(
content="User prefers Android over iOS",
container_tags=["user_123"], # User-specific
metadata={
@@ -215,7 +216,7 @@ client.memories.add(
### 3. Hybrid Retrieval
```python
# Search combines both approaches
-results = client.memories.search(
+results = client.documents.search(
query="What phone should I recommend?",
container_tags=["user_123"], # Gets user memories
# Also searches general knowledge
@@ -237,3 +238,22 @@ Stop treating memory like a retrieval problem. Your agents need both:
- **Memory** for understanding users
Supermemory provides both capabilities in a unified platform, ensuring your agents have the right context at the right time.
+
+---
+
+## Next Steps
+
+<CardGroup cols={2}>
+ <Card title="Graph Memory" icon="network" href="/concepts/graph-memory">
+ How memory relationships work
+ </Card>
+ <Card title="Super RAG" icon="bolt" href="/concepts/super-rag">
+ Our managed RAG solution
+ </Card>
+ <Card title="Add Memories" icon="plus" href="/add-memories">
+ Start ingesting content
+ </Card>
+ <Card title="Search" icon="search" href="/search">
+ Query your memories and documents
+ </Card>
+</CardGroup>
diff --git a/apps/docs/concepts/super-rag.mdx b/apps/docs/concepts/super-rag.mdx
new file mode 100644
index 00000000..ebe538bf
--- /dev/null
+++ b/apps/docs/concepts/super-rag.mdx
@@ -0,0 +1,176 @@
+---
+title: "SuperRAG (Managed RAG as a service)"
+sidebarTitle: "SuperRAG"
+description: "Supermemory provides a managed RAG solution - extraction, indexing, storing, and retrieval."
+icon: "bolt"
+---
+
+Supermemory doesn't just store your content—it transforms it into optimized, searchable knowledge. Every upload goes through an intelligent pipeline that extracts, chunks, and indexes content in the ideal way for its type.
+
+## Automatic Content Intelligence
+
+When you add content, Supermemory:
+
+1. **Detects the content type** — PDF, code, markdown, images, video, etc.
+2. **Extracts content optimally** — Uses type-specific extraction (OCR for images, transcription for audio)
+3. **Chunks intelligently** — Applies the right chunking strategy for the content type
+4. **Generates embeddings** — Creates vector representations for semantic search
+5. **Builds relationships** — Connects new knowledge to existing memories
+
+```typescript
+// Just add content — Supermemory handles the rest
+await client.add({
+ content: pdfBase64,
+ contentType: "pdf",
+ title: "Technical Documentation"
+});
+```
+
+No chunking strategies to configure. No embedding models to choose. It just works.
+
+---
+
+## Smart Chunking by Content Type
+
+Different content types need different chunking strategies. Supermemory applies the optimal approach automatically:
+
+### Documents (PDF, DOCX)
+
+PDFs and documents are chunked by **semantic sections** — headers, paragraphs, and logical boundaries. This preserves context better than arbitrary character splits.
+
+```
+├── Executive Summary (chunk 1)
+├── Introduction (chunk 2)
+├── Section 1: Architecture
+│ ├── Overview (chunk 3)
+│ └── Components (chunk 4)
+└── Conclusion (chunk 5)
+```
+
+### Code
+
+Code is chunked using [code-chunk](https://github.com/supermemoryai/code-chunk), our open-source library that understands AST (Abstract Syntax Tree) boundaries:
+
+- Functions and methods stay intact
+- Classes are chunked by method
+- Import statements grouped separately
+- Comments attached to their code blocks
+
+```typescript
+// A 500-line file becomes meaningful chunks:
+// - Imports + type definitions
+// - Each function as a separate chunk
+// - Class methods individually indexed
+```
+
+This means searching for "authentication middleware" finds the actual function, not a random slice of code.
+
+### Web Pages
+
+URLs are fetched, cleaned of navigation/ads, and chunked by article structure — headings, paragraphs, lists.
+
+### Markdown
+
+Chunked by heading hierarchy, preserving the document structure.
+
+See [Content Types](/concepts/content-types) for the full list of supported formats.
+
+---
+
+## Hybrid Memory + RAG
+
+Supermemory combines the best of both approaches in every search:
+
+<CardGroup cols={2}>
+ <Card title="Traditional RAG" icon="magnifying-glass">
+ - Finds similar document chunks
+ - Great for knowledge retrieval
+ - Stateless — same results for everyone
+ </Card>
+
+ <Card title="Memory System" icon="brain">
+ - Extracts and tracks user facts
+ - Understands temporal context
+ - Personalizes results per user
+ </Card>
+</CardGroup>
+
+With `searchMode: "hybrid"` (the default), you get both:
+
+```typescript
+const results = await client.search({
+ q: "how do I deploy the app?",
+ containerTag: "user_123",
+ searchMode: "hybrid"
+});
+
+// Returns:
+// - Deployment docs from your knowledge base (RAG)
+// - User's previous deployment preferences (Memory)
+// - Their specific environment configs (Memory)
+```
+
+---
+
+## Search Optimization
+
+Two flags give you fine-grained control over result quality:
+
+### Reranking
+
+Re-scores results using a cross-encoder model for better relevance:
+
+```typescript
+const results = await client.search({
+ q: "complex technical question",
+ rerank: true // +~100ms, significantly better ranking
+});
+```
+
+**When to use:** Complex queries, technical documentation, when precision matters more than speed.
+
+### Query Rewriting
+
+Expands your query to capture more relevant results:
+
+```typescript
+const results = await client.search({
+ q: "how to auth",
+ rewriteQuery: true // Expands to "authentication login oauth jwt..."
+});
+```
+
+**When to use:** Short queries, user-facing search, when recall matters.
+
+---
+
+## Why It's "Super"
+
+| Traditional RAG | SUPER RAG |
+|-----------------|-----------|
+| Manual chunking config | Automatic per content type |
+| One-size-fits-all splits | AST-aware code chunking |
+| Just document retrieval | Hybrid memory + documents |
+| Static embeddings | Relationship-aware graph |
+| Generic search | Rerank + query rewriting |
+
+You focus on building your product. Supermemory handles the RAG complexity.
+
+---
+
+## Next Steps
+
+<CardGroup cols={2}>
+ <Card title="Content Types" icon="file-stack" href="/concepts/content-types">
+ All supported formats and how they're processed
+ </Card>
+ <Card title="How It Works" icon="cpu" href="/concepts/how-it-works">
+ The full processing pipeline
+ </Card>
+ <Card title="Memory vs RAG" icon="scale" href="/concepts/memory-vs-rag">
+ When to use each approach
+ </Card>
+ <Card title="Search" icon="search" href="/search">
+ Search parameters and optimization
+ </Card>
+</CardGroup>
diff --git a/apps/docs/concepts/user-profiles.mdx b/apps/docs/concepts/user-profiles.mdx
new file mode 100644
index 00000000..211b4dda
--- /dev/null
+++ b/apps/docs/concepts/user-profiles.mdx
@@ -0,0 +1,141 @@
+---
+title: "User Profiles"
+sidebarTitle: "User Profiles"
+description: "Automatically maintained context about your users"
+icon: "circle-user"
+---
+
+User profiles are **automatically maintained collections of facts about your users** that Supermemory builds from all their interactions. Think of it as a persistent "about me" document that's always up-to-date.
+
+<CardGroup cols={2}>
+ <Card title="Instant Context" icon="bolt">
+ No search needed — comprehensive user info always ready
+ </Card>
+ <Card title="Auto-Updated" icon="rotate">
+ Profiles update as users interact with your system
+ </Card>
+</CardGroup>
+
+## Why Profiles?
+
+Traditional memory systems rely entirely on search:
+
+| Problem | Search Only | With Profiles |
+|---------|------------|---------------|
+| Context retrieval | 3-5 queries | 1 call |
+| Response time | 200-500ms | 50-100ms |
+| Basic user info | Requires specific queries | Always available |
+
+**Search is too narrow**: When you search for "project updates", you miss that the user prefers bullet points, works in PST, and uses specific terminology.
+
+**Profiles provide the foundation**: Instead of searching for basic context, profiles give your LLM a complete picture of who the user is.
+
+---
+
+## Static vs Dynamic
+
+Profiles separate two types of information:
+
+### Static Profile
+
+Long-term, stable facts:
+
+- "Sarah is a senior software engineer at TechCorp"
+- "Sarah specializes in distributed systems"
+- "Sarah prefers technical docs over video tutorials"
+
+### Dynamic Profile
+
+Recent context and temporary states:
+
+- "Sarah is migrating the payment service to microservices"
+- "Sarah is preparing for a conference talk next month"
+- "Sarah is debugging a memory leak in auth service"
+
+---
+
+## How It Works
+
+Profiles are built automatically through ingestion:
+
+1. **Ingest content** — Users [add documents](/add-memories), chat, or any content
+2. **Extract facts** — AI analyzes content for facts about the user
+3. **Update profile** — System adds, updates, or removes facts
+4. **Always current** — Profiles reflect the latest information
+
+<Note>
+You don't manually manage profiles — they build themselves as users interact. Start by [adding content](/add-memories) to see profiles in action.
+</Note>
+
+---
+
+## Profiles + Search
+
+Profiles don't replace search — they complement it:
+
+- **Profile** = broad foundation (who the user is, preferences, background)
+- **Search** = specific details (exact memories matching a query)
+
+### Example
+
+User asks: **"Can you help me debug this?"**
+
+**Without profiles**: LLM has no context about expertise, projects, or preferences.
+
+**With profiles**: LLM knows:
+- Senior engineer (adjust technical level)
+- Working on payment service (likely context)
+- Prefers CLI tools (tool suggestions)
+- Recent memory leak issues (possible connection)
+
+---
+
+## Use Cases
+
+### Personalized AI Assistants
+
+Profiles provide: expertise level, communication preferences, tools used, current projects.
+
+```typescript
+const systemPrompt = `You are assisting ${userName}.
+
+Background: ${profile.static.join('\n')}
+Current focus: ${profile.dynamic.join('\n')}
+
+Adjust responses to their expertise and preferences.`;
+```
+
+### Customer Support
+
+Profiles provide: product usage, previous issues, tech proficiency.
+
+- No more "let me look up your account"
+- Agents immediately understand context
+- AI support references past interactions naturally
+
+### Educational Platforms
+
+Profiles provide: learning style, completed courses, strengths/weaknesses.
+
+### Development Tools
+
+Profiles provide: preferred languages, coding style, current project context.
+
+---
+
+## Next Steps
+
+<CardGroup cols={2}>
+ <Card title="User Profiles API" icon="code" href="/user-profiles">
+ Fetch and use profiles via the API
+ </Card>
+ <Card title="Graph Memory" icon="network" href="/concepts/graph-memory">
+ How the underlying knowledge graph works
+ </Card>
+ <Card title="AI SDK Integration" icon="triangle" href="/integrations/ai-sdk">
+ Automatic profile injection with AI SDK
+ </Card>
+ <Card title="Add Memories" icon="plus" href="/add-memories">
+ Build profiles by adding content
+ </Card>
+</CardGroup>
diff --git a/apps/docs/connectors/overview.mdx b/apps/docs/connectors/overview.mdx
index 046cc305..cb4ca1eb 100644
--- a/apps/docs/connectors/overview.mdx
+++ b/apps/docs/connectors/overview.mdx
@@ -2,6 +2,7 @@
title: "Connectors Overview"
description: "Integrate Google Drive, Notion, OneDrive, GitHub and Web Crawler to automatically sync documents into your knowledge base"
sidebarTitle: "Overview"
+icon: "layers"
---
Connect external platforms to automatically sync documents into Supermemory. Supported connectors include Google Drive, Notion, OneDrive, GitHub and Web Crawler with real-time synchronization and intelligent content processing.
@@ -138,7 +139,7 @@ connections.forEach(conn => {
});
// List synced documents (memories) using SDK
-const memories = await client.memories.list({
+const memories = await client.documents.list({
containerTags: ['user-123', 'workspace-alpha']
});
@@ -164,7 +165,7 @@ for conn in connections:
print(f'Created: {conn.created_at}')
# List synced documents (memories) using SDK
-memories = client.memories.list(container_tags=['user-123', 'workspace-alpha'])
+memories = client.documents.list(container_tags=['user-123', 'workspace-alpha'])
print(f'Synced {len(memories.memories)} documents')
# Output: Synced 45 documents
diff --git a/apps/docs/connectors/troubleshooting.mdx b/apps/docs/connectors/troubleshooting.mdx
index 847a7987..ec62fab7 100644
--- a/apps/docs/connectors/troubleshooting.mdx
+++ b/apps/docs/connectors/troubleshooting.mdx
@@ -1,6 +1,8 @@
---
title: "Connector Troubleshooting"
+sidebarTitle: "Troubleshooting"
description: "Diagnose and resolve common issues with Google Drive, Notion, and OneDrive connectors"
+icon: "wrench"
---
Quick guide to resolve common connector issues with authentication, syncing, and permissions.
diff --git a/apps/docs/cookbook/ai-sdk-integration.mdx b/apps/docs/cookbook/ai-sdk-integration.mdx
index 8bdafef7..d6853210 100644
--- a/apps/docs/cookbook/ai-sdk-integration.mdx
+++ b/apps/docs/cookbook/ai-sdk-integration.mdx
@@ -3,7 +3,7 @@ title: "AI SDK Integration"
description: "Complete examples showing how to use Supermemory with Vercel AI SDK for building intelligent applications"
---
-This page provides comprehensive examples of using Supermemory with the Vercel AI SDK, covering both Memory Tools and Infinite Chat approaches.
+This page provides comprehensive examples of using Supermemory with the Vercel AI SDK, covering Memory Tools and User Profiles approaches.
## Personal Assistant with Memory Tools
@@ -113,62 +113,6 @@ export async function POST(request: Request) {
}
```
-## Infinite Chat for Documentation
-
-Create a documentation assistant with unlimited context:
-
-<CodeGroup>
-
-```typescript Documentation Chat
-import { streamText } from 'ai'
-
-const infiniteChat = createOpenAI({
- baseUrl: 'https://api.supermemory.ai/v3/https://api.openai.com/v1',
- apiKey: 'your-provider-api-key',
- headers: {
- 'x-supermemory-api-key': 'supermemory-api-key',
- 'x-sm-conversation-id': 'conversation-id'
- }
-})
-
-export async function POST(request: Request) {
- const { messages } = await request.json()
-
- const result = await streamText({
- model: infiniteChat('gpt-5'),
- messages,
- system: `You are a documentation assistant. You have access to all previous
- conversations and can reference earlier discussions. Help users understand
- the documentation by building on previous context.`
- })
-
- return result.toAIStreamResponse()
-}
-```
-
-```typescript Upload Documentation
-// Separate endpoint to upload documentation to memory
-import { addMemory } from '@supermemory/tools'
-
-export async function POST(request: Request) {
- const { content, title, url } = await request.json()
-
- const memory = await addMemory({
- apiKey: process.env.SUPERMEMORY_API_KEY!,
- content,
- title,
- url,
- headers: {
- 'x-sm-conversation-id': 'documentation'
- }
- })
-
- return Response.json({ success: true, memory })
-}
-```
-
-</CodeGroup>
-
## Multi-User Learning Assistant
Build an assistant that learns from multiple users but keeps data separate:
@@ -391,12 +335,6 @@ ANTHROPIC_API_KEY=your_anthropic_key
- Use project headers to separate different use cases
- Implement error handling for tool failures
-### Infinite Chat
-- Use conversation IDs to maintain separate chat contexts
-- Include user IDs for personalized experiences
-- Test with different providers to find the best fit for your use case
-- Monitor token usage for cost optimization
-
### General Tips
- Start with simple examples and gradually add complexity
- Use the search functionality to avoid duplicate memories
@@ -410,7 +348,7 @@ ANTHROPIC_API_KEY=your_anthropic_key
Advanced memory management with full API control
</Card>
- <Card title="Memory Router" icon="route" href="/memory-router/overview">
- Drop-in proxy for existing LLM applications
+ <Card title="User Profiles" icon="user" href="/user-profiles">
+ Automatic personalization with user profiles
</Card>
</CardGroup>
diff --git a/apps/docs/cookbook/customer-support.mdx b/apps/docs/cookbook/customer-support.mdx
index 32e0417e..01ad0e8e 100644
--- a/apps/docs/cookbook/customer-support.mdx
+++ b/apps/docs/cookbook/customer-support.mdx
@@ -70,7 +70,7 @@ A customer support bot that:
metadata?: Record<string, any>
}) {
try {
- const result = await client.memories.add({
+ const result = await client.add({
content: `${interaction.type.toUpperCase()}: ${interaction.content}`,
containerTag: this.getContainerTag(customerId),
metadata: {
@@ -93,7 +93,7 @@ A customer support bot that:
async getCustomerHistory(customerId: string, limit: number = 10) {
try {
- const memories = await client.memories.list({
+ const memories = await client.documents.list({
containerTags: [this.getContainerTag(customerId)],
limit,
sort: 'updatedAt',
@@ -146,7 +146,7 @@ A customer support bot that:
try {
const issueContent = `ISSUE: ${issue.subject}\n\nDescription: ${issue.description}\nCategory: ${issue.category}\nPriority: ${issue.priority}\nStatus: ${issue.status}`
- const result = await client.memories.add({
+ const result = await client.add({
content: issueContent,
containerTag: this.getContainerTag(customerId),
metadata: {
@@ -170,8 +170,8 @@ A customer support bot that:
try {
// Note: In a real implementation, you'd update the memory
// For now, we'll add a status update
- const memory = await client.memories.get(issueId)
- const customerId = memory.containerTag.replace('customer_', '')
+ const memory = await client.documents.get(issueId)
+ const customerId = memory.containerTags?.[0]?.replace('customer_', '') || ''
const updateContent = `ISSUE UPDATE: ${memory.metadata?.subject}\nStatus changed to: ${status}${resolution ? `\nResolution: ${resolution}` : ''}`
@@ -232,7 +232,7 @@ A customer support bot that:
try:
content = f"{interaction['type'].upper()}: {interaction['content']}"
- result = self.client.memories.add(
+ result = self.client.add(
content=content,
container_tag=self._get_container_tag(customer_id),
metadata={
@@ -253,7 +253,7 @@ A customer support bot that:
def get_customer_history(self, customer_id: str, limit: int = 10) -> List[Dict]:
"""Get customer interaction history"""
try:
- memories = self.client.memories.list(
+ memories = self.client.documents.list(
container_tags=[self._get_container_tag(customer_id)],
limit=limit,
sort='updatedAt',
@@ -309,7 +309,7 @@ Category: {issue['category']}
Priority: {issue['priority']}
Status: {issue['status']}"""
- result = self.client.memories.add(
+ result = self.client.add(
content=issue_content,
container_tag=self._get_container_tag(customer_id),
metadata={
@@ -330,8 +330,8 @@ Status: {issue['status']}"""
"""Update the status of a support issue"""
try:
# Get original issue
- memory = self.client.memories.get(issue_id)
- customer_id = memory.container_tag.replace('customer_', '')
+ memory = self.client.documents.get(issue_id)
+ customer_id = (memory.container_tags[0] if memory.container_tags else '').replace('customer_', '')
update_content = f"ISSUE UPDATE: {memory.metadata.get('subject', 'Unknown')}\nStatus changed to: {status}"
if resolution:
diff --git a/apps/docs/cookbook/document-qa.mdx b/apps/docs/cookbook/document-qa.mdx
index d11e947b..5aa071eb 100644
--- a/apps/docs/cookbook/document-qa.mdx
+++ b/apps/docs/cookbook/document-qa.mdx
@@ -71,7 +71,7 @@ A document Q&A system that:
async uploadURL({ url, collection, metadata = {} }: { url: string, collection: string, metadata?: Record<string, any> }) {
try {
- const result = await client.memories.add({
+ const result = await client.add({
content: url,
containerTag: collection,
metadata: {
@@ -91,7 +91,7 @@ A document Q&A system that:
async getDocumentStatus(documentId: string) {
try {
- const memory = await client.memories.get(documentId)
+ const memory = await client.documents.get(documentId)
return {
id: memory.id,
status: memory.status,
@@ -106,7 +106,7 @@ A document Q&A system that:
async listDocuments(collection: string) {
try {
- const memories = await client.memories.list({
+ const memories = await client.documents.list({
containerTags: [collection],
limit: 50,
sort: 'updatedAt',
@@ -148,15 +148,10 @@ A document Q&A system that:
return NextResponse.json({ error: 'No file provided' }, { status: 400 })
}
- // Convert File to Buffer for Supermemory
- const bytes = await file.arrayBuffer()
- const buffer = Buffer.from(bytes)
-
- const result = await client.memories.uploadFile({
- file: buffer,
- filename: file.name,
- containerTags,
- metadata
+ const result = await client.documents.uploadFile({
+ file: file,
+ containerTags: JSON.stringify(containerTags),
+ metadata: JSON.stringify(metadata)
})
return NextResponse.json({
@@ -180,6 +175,7 @@ A document Q&A system that:
```python document_processor.py
from supermemory import Supermemory
import os
+ import json
from typing import Dict, List, Any, Optional
import requests
from datetime import datetime
@@ -195,15 +191,15 @@ A document Q&A system that:
try:
with open(file_path, 'rb') as file:
- result = self.client.memories.upload_file(
+ result = self.client.documents.upload_file(
file=file,
- container_tags=[collection],
- metadata={
+ container_tags=collection,
+ metadata=json.dumps({
'originalName': os.path.basename(file_path),
'fileType': os.path.splitext(file_path)[1],
'uploadedAt': datetime.now().isoformat(),
**metadata
- }
+ })
)
return result
except Exception as e:
@@ -216,7 +212,7 @@ A document Q&A system that:
metadata = {}
try:
- result = self.client.memories.add(
+ result = self.client.add(
content=url,
container_tag=collection,
metadata={
@@ -234,7 +230,7 @@ A document Q&A system that:
def get_document_status(self, document_id: str) -> Dict:
"""Check document processing status"""
try:
- memory = self.client.memories.get(document_id)
+ memory = self.client.documents.get(document_id)
return {
'id': memory.id,
'status': memory.status,
@@ -248,7 +244,7 @@ A document Q&A system that:
def list_documents(self, collection: str) -> List[Dict]:
"""List all documents in a collection"""
try:
- memories = self.client.memories.list(
+ memories = self.client.documents.list(
container_tags=[collection],
limit=50,
sort='updatedAt',
@@ -307,7 +303,6 @@ A document Q&A system that:
includeFullDocs: false,
includeSummary: true,
onlyMatchingChunks: false,
- documentThreshold: 0.6,
chunkThreshold: 0.7
})
@@ -432,7 +427,6 @@ If the question cannot be answered from the provided documents, respond with: "I
include_full_docs=False,
include_summary=True,
only_matching_chunks=False,
- document_threshold=0.6,
chunk_threshold=0.7
)
diff --git a/apps/docs/cookbook/overview.mdx b/apps/docs/cookbook/overview.mdx
index 80ec57a1..a36dcfb6 100644
--- a/apps/docs/cookbook/overview.mdx
+++ b/apps/docs/cookbook/overview.mdx
@@ -53,7 +53,7 @@ We're working on more comprehensive recipes. Have a suggestion? [Let us know!](m
Can't find what you're looking for?
-- Browse [Search Examples](/search/examples/document-search) for specific feature usage
+- Browse [Search](/search) for specific feature usage
- Check the [AI SDK Examples](/cookbook/ai-sdk-integration) for complete implementations
- Reach out to [support](mailto:[email protected]) for help
diff --git a/apps/docs/cookbook/personal-assistant.mdx b/apps/docs/cookbook/personal-assistant.mdx
index 7d5f256b..59c3e057 100644
--- a/apps/docs/cookbook/personal-assistant.mdx
+++ b/apps/docs/cookbook/personal-assistant.mdx
@@ -9,7 +9,7 @@ Build a personal AI assistant that learns and remembers everything about the use
A personal AI assistant that:
- **Remembers user preferences** (dietary restrictions, work schedule, communication style)
-- **Maintains context** across multiple chat sessions
+- **Maintains context** across multiple chat sessions
- **Provides personalized recommendations** based on user history
- **Handles multiple conversation topics** while maintaining context
@@ -169,7 +169,7 @@ This searches the user's memory store for context relevant to their current mess
```python
async def add_user_memory(content: str, container_tag: str, email: str = None):
try:
- supermemory_client.memories.add(
+ supermemory_client.add(
content=content,
container_tag=container_tag,
metadata={"type": "personal_info", "email": normalize_email(email) if email else None}
@@ -196,7 +196,7 @@ Stores new information about the user.
async def chat_endpoint(data: dict):
messages = data.get("messages", [])
email = data.get("email")
-
+
if not messages:
raise HTTPException(status_code=400, detail="No messages provided")
if not email:
@@ -216,7 +216,7 @@ This endpoint receives the chat request. It expects:
user_id = stable_user_id_from_email(email)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
-
+
container_tag = f"user_{user_id}"
```
@@ -229,7 +229,7 @@ The container tag (`user_abc123`) isolates this user's memories from everyone el
```python
user_message = messages[-1]["content"]
memory_context = await search_user_memories(user_message, container_tag)
-
+
enhanced_messages = [
{"role": "system", "content": f"{SYSTEM_PROMPT}\n\n{memory_context}"}
] + messages
@@ -257,7 +257,7 @@ Now the AI can answer: "Try overnight oats with plant-based protein—perfect fo
```python
try:
response = await openai_client.chat.completions.create(
- model="gpt-4o",
+ model="gpt-5",
messages=enhanced_messages,
temperature=0.7,
stream=True
@@ -265,7 +265,7 @@ Now the AI can answer: "Try overnight oats with plant-based protein—perfect fo
```
**Key parameters:**
-- `model="gpt-4o"`: Fast, capable model
+- `model="gpt-5"`: Fast, capable model
- `messages`: Full conversation + memory context
- `temperature=0.7`: Balanced creativity (0=deterministic, 1=creative)
- `stream=True`: Enables word-by-word streaming
@@ -308,7 +308,7 @@ After streaming completes, check if the user explicitly asked to remember someth
```python
return StreamingResponse(generate(), media_type="text/plain")
-
+
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
```
@@ -360,11 +360,11 @@ st.markdown("*Your AI that learns and remembers*")
with st.sidebar:
st.header("👤 User Profile")
-
+
if not st.session_state.user_name or not st.session_state.email:
name = st.text_input("What should I call you?")
email = st.text_input("Email", placeholder="[email protected]")
-
+
if st.button("Get Started"):
if name and email:
st.session_state.user_name = name
@@ -388,12 +388,12 @@ if st.session_state.user_name and st.session_state.email:
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
-
+
if prompt := st.chat_input("Message..."):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
-
+
with st.chat_message("assistant"):
try:
response = requests.post(
@@ -405,7 +405,7 @@ if st.session_state.user_name and st.session_state.email:
stream=True,
timeout=30
)
-
+
if response.status_code == 200:
full_response = ""
for line in response.iter_lines():
@@ -416,7 +416,7 @@ if st.session_state.user_name and st.session_state.email:
full_response += data['content']
except:
continue
-
+
st.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
else:
@@ -544,7 +544,7 @@ Without email, we can't maintain personalization across sessions.
const containerTag = `user_${email.toLowerCase().trim()}`
```
-Convert email to a container tag for memory isolation.
+Convert email to a container tag for memory isolation.
**Simpler than Python**: We skip UUID generation here for simplicity. In production, you might want to hash the email for privacy:
@@ -558,7 +558,7 @@ const containerTag = `user_${crypto.createHash('sha256').update(email).digest('h
```typescript
const result = streamText({
- model: openai('gpt-4o'),
+ model: openai('gpt-5'),
messages,
tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
containerTags: [containerTag]
@@ -569,7 +569,7 @@ const containerTag = `user_${crypto.createHash('sha256').update(email).digest('h
This is where the magic happens! Let's break down each parameter:
-**`model: openai('gpt-4o')`**
+**`model: openai('gpt-5')`**
- Specifies which AI model to use
- The AI SDK handles the API calls
@@ -646,10 +646,10 @@ Catches any errors (API failures, tool errors, etc.) and returns a clean error r
| **Streaming** | Manual SSE formatting | `toAIStreamResponse()` handles it |
| **Error Handling** | Try/catch in each function | AI SDK handles tool errors |
-**Python = Manual Control**
+**Python = Manual Control**
You explicitly search and add memories. More control, more code.
-**TypeScript = AI-Driven**
+**TypeScript = AI-Driven**
The AI decides when to use tools. Less code, more "magic."
### Step 3: Chat UI
@@ -668,12 +668,12 @@ export default function ChatPage() {
const [userName, setUserName] = useState('')
const [tempEmail, setTempEmail] = useState('')
const [tempName, setTempName] = useState('')
-
+
const { messages, input, handleInputChange, handleSubmit } = useChat({
api: '/api/chat',
body: { email }
})
-
+
if (!email) {
return (
<div className="flex items-center justify-center min-h-screen p-4">
@@ -708,7 +708,7 @@ export default function ChatPage() {
</div>
)
}
-
+
return (
<div className="flex flex-col h-screen max-w-4xl mx-auto p-4">
<div className="flex-1 overflow-y-auto space-y-4 mb-4">
@@ -716,8 +716,8 @@ export default function ChatPage() {
<div
key={message.id}
className={`p-4 rounded-lg ${
- message.role === 'user'
- ? 'bg-blue-100 ml-auto max-w-[80%]'
+ message.role === 'user'
+ ? 'bg-blue-100 ml-auto max-w-[80%]'
: 'bg-gray-100 mr-auto max-w-[80%]'
}`}
>
@@ -725,7 +725,7 @@ export default function ChatPage() {
</div>
))}
</div>
-
+
<form onSubmit={handleSubmit} className="flex gap-2">
<input
value={input}
@@ -733,7 +733,7 @@ export default function ChatPage() {
placeholder="Tell me about yourself..."
className="flex-1 p-3 border rounded-lg"
/>
- <button
+ <button
type="submit"
className="px-6 py-3 bg-blue-600 text-white rounded-lg hover:bg-blue-700"
>
@@ -803,7 +803,7 @@ client = Supermemory(api_key=os.getenv("SUPERMEMORY_API_KEY"))
user_id = "your_user_id_here"
container_tag = f"user_{user_id}"
-memories = client.memories.list(
+memories = client.documents.list(
container_tags=[container_tag],
limit=20,
sort="updatedAt",
@@ -812,7 +812,7 @@ memories = client.memories.list(
print(f"Found {len(memories.memories)} memories:")
for i, memory in enumerate(memories.memories):
- full = client.memories.get(id=memory.id)
+ full = client.documents.get(id=memory.id)
print(f"\n{i + 1}. {full.content}")
```
diff --git a/apps/docs/docs.json b/apps/docs/docs.json
index 26aff108..bf38012a 100644
--- a/apps/docs/docs.json
+++ b/apps/docs/docs.json
@@ -42,7 +42,7 @@
"navbar": {
"links": [
{
- "href": "mailto:[email protected]",
+ "href": "mailto:[email protected]",
"label": "Support"
}
],
@@ -55,10 +55,6 @@
"navigation": {
"tabs": [
{
- "pages": ["introduction"],
- "tab": "Welcome"
- },
- {
"icon": "code",
"anchors": [
{
@@ -71,187 +67,111 @@
"pages": [
{
"group": "Getting Started",
- "pages": ["intro", "vibe-coding", "quickstart", "memory-vs-rag"]
+ "icon": "rocket",
+ "pages": ["intro", "quickstart", "vibe-coding"]
},
{
- "group": "Memory API",
+ "group": "Concepts",
+ "icon": "lightbulb",
"pages": [
- "how-it-works",
- {
- "group": "Add Memories",
- "icon": "plus",
- "pages": [
- "add-memories/overview",
- "add-memories/parameters",
- "memory-api/ingesting",
- {
- "group": "Examples",
- "pages": [
- "add-memories/examples/basic",
- "add-memories/examples/file-upload"
- ]
- }
- ]
- },
- {
- "group": "Search Memories",
- "icon": "search",
- "pages": [
- "search/overview",
- "search/parameters",
- "search/response-schema",
- "search/query-rewriting",
- "search/reranking",
- {
- "group": "Examples",
- "pages": [
- "search/examples/document-search",
- "search/examples/memory-search"
- ]
- }
- ]
- },
- "search/filtering",
- "memory-api/track-progress",
+ "concepts/how-it-works",
+ "concepts/graph-memory",
+ "concepts/content-types",
+ "concepts/super-rag",
+ "concepts/memory-vs-rag",
+ "concepts/filtering",
+ "concepts/user-profiles",
+ "concepts/customization"
+ ]
+ },
+ {
+ "group": "Using supermemory",
+ "icon": "brain",
+ "pages": [
+ "add-memories",
+ "search",
+ "user-profiles",
{
- "group": "List Memories",
- "icon": "list",
- "pages": [
- "list-memories/overview",
- {
- "group": "Examples",
- "pages": [
- "list-memories/examples/basic",
- "list-memories/examples/filtering",
- "list-memories/examples/pagination",
- "list-memories/examples/monitoring"
- ]
- }
- ]
+ "group": "Manage Content",
+ "icon": "folder-cog",
+ "pages": ["document-operations", "memory-operations"]
},
- "update-delete-memories/overview",
+ "overview/use-cases"
+ ]
+ },
+ {
+ "group": "Connectors and sync",
+ "icon": "plug",
+ "pages": [
+ "connectors/overview",
{
"group": "Connectors",
- "icon": "link",
+ "icon": "plug",
"pages": [
- "connectors/overview",
"connectors/notion",
"connectors/google-drive",
"connectors/onedrive",
"connectors/s3",
"connectors/github",
- "connectors/web-crawler",
- "connectors/troubleshooting",
- "memory-api/connectors/managing-resources"
+ "connectors/web-crawler"
]
},
- "/org-settings",
- "/analytics",
- "overview/use-cases"
- ]
- },
- {
- "group": "User Profiles",
- "icon": "user",
- "pages": [
- "user-profiles/overview",
- "user-profiles/api",
- "user-profiles/examples",
- "user-profiles/use-cases"
+ "connectors/troubleshooting",
+ "memory-api/connectors/managing-resources"
]
},
{
- "group": "Memory Router",
- "icon": "route",
+ "group": "Migration Guides",
+ "icon": "arrow-right-left",
"pages": [
- "memory-router/overview",
- "memory-router/usage",
- "memory-router/with-memory-api"
+ {
+ "group": "From another provider",
+ "icon": "truck",
+ "pages": ["migration/from-mem0", "migration/from-zep"]
+ }
]
- },
- {
- "group": "Integrations with no-code tools",
- "pages": ["n8n", "zapier"]
- },
- {
- "group": "Supermemory MCP",
- "pages": ["supermemory-mcp/mcp", "supermemory-mcp/setup"]
- },
- {
- "group": "Migration Guides",
- "pages": ["migration/from-mem0", "migration/from-zep"]
- },
- {
- "group": "Deployment",
- "pages": ["deployment/self-hosting"]
}
]
+ },
+ {
+ "anchor": "Supermemory MCP",
+ "icon": "terminal",
+ "pages": ["supermemory-mcp/mcp", "supermemory-mcp/setup"]
}
],
"tab": "Developer Platform"
},
{
- "icon": "book-open",
+ "icon": "plug",
"anchors": [
{
- "anchor": "API Reference",
- "icon": "unplug",
- "openapi": "https://api.supermemory.ai/v3/openapi"
+ "anchor": "Integrations",
+ "pages": [
+ "integrations/supermemory-sdk",
+ "integrations/ai-sdk",
+ "integrations/openai",
+ "integrations/memory-graph",
+ "integrations/claude-memory",
+ "integrations/pipecat",
+ "integrations/n8n",
+ "integrations/zapier"
+ ]
}
],
- "tab": "API Reference"
+ "tab": "Integrations"
},
{
- "icon": "plug",
+ "icon": "book-open",
"anchors": [
{
- "anchor": "SDKs",
- "pages": [
- "memory-api/sdks/overview",
- {
- "group": "Supermemory SDKs",
- "pages": [
- "memory-api/sdks/native"
- ]
- },
- {
- "group": "OpenAI SDK",
- "icon": "sparkles",
- "pages": ["memory-api/sdks/openai-plugins"]
- },
- {
- "group": "AI SDK",
- "icon": "triangle",
- "pages": [
- "ai-sdk/overview",
- "ai-sdk/user-profiles",
- "ai-sdk/memory-tools",
- "ai-sdk/infinite-chat"
- ]
- },
- {
- "group": "Memory Graph",
- "icon": "network",
- "pages": [
- "memory-graph/overview",
- "memory-graph/installation",
- "memory-graph/quickstart",
- "memory-graph/api-reference",
- "memory-graph/examples"
- ]
- },
- {
- "group": "Voice & Realtime",
- "icon": "mic",
- "pages": [
- "voice-realtime/pipecat"
- ]
- }
- ]
+ "anchor": "API Reference",
+ "icon": "unplug",
+ "openapi": "https://api.supermemory.ai/v3/openapi"
}
],
- "tab": "SDKs"
+ "tab": "API Reference"
},
+
{
"icon": "flask-conical",
"anchors": [
@@ -299,8 +219,7 @@
"cookbook/customer-support",
"cookbook/ai-sdk-integration",
"cookbook/perplexity-supermemory",
- "cookbook/chat-with-gdrive",
- "cookbook/inf-chat-blog"
+ "cookbook/chat-with-gdrive"
]
}
]
@@ -322,9 +241,224 @@
},
"redirects": [
{
- "destination": "/introduction",
+ "destination": "/intro",
"permanent": false,
"source": "/"
+ },
+ {
+ "destination": "/concepts/how-it-works",
+ "permanent": true,
+ "source": "/how-it-works"
+ },
+ {
+ "destination": "/concepts/memory-vs-rag",
+ "permanent": true,
+ "source": "/memory-vs-rag"
+ },
+ {
+ "destination": "/integrations/supermemory-sdk",
+ "permanent": true,
+ "source": "/memory-api/sdks/overview"
+ },
+ {
+ "destination": "/integrations/supermemory-sdk",
+ "permanent": true,
+ "source": "/memory-api/sdks/native"
+ },
+ {
+ "destination": "/integrations/openai",
+ "permanent": true,
+ "source": "/memory-api/sdks/openai-plugins"
+ },
+ {
+ "destination": "/integrations/ai-sdk",
+ "permanent": true,
+ "source": "/ai-sdk/overview"
+ },
+ {
+ "destination": "/integrations/ai-sdk",
+ "permanent": true,
+ "source": "/ai-sdk/user-profiles"
+ },
+ {
+ "destination": "/integrations/ai-sdk",
+ "permanent": true,
+ "source": "/ai-sdk/memory-tools"
+ },
+ {
+ "destination": "/integrations/ai-sdk",
+ "permanent": true,
+ "source": "/ai-sdk/infinite-chat"
+ },
+ {
+ "destination": "/integrations/memory-graph",
+ "permanent": true,
+ "source": "/memory-graph/overview"
+ },
+ {
+ "destination": "/integrations/memory-graph",
+ "permanent": true,
+ "source": "/memory-graph/installation"
+ },
+ {
+ "destination": "/integrations/memory-graph",
+ "permanent": true,
+ "source": "/memory-graph/quickstart"
+ },
+ {
+ "destination": "/integrations/memory-graph",
+ "permanent": true,
+ "source": "/memory-graph/api-reference"
+ },
+ {
+ "destination": "/integrations/memory-graph",
+ "permanent": true,
+ "source": "/memory-graph/examples"
+ },
+ {
+ "destination": "/integrations/pipecat",
+ "permanent": true,
+ "source": "/voice-realtime/pipecat"
+ },
+ {
+ "destination": "/integrations/n8n",
+ "permanent": true,
+ "source": "/n8n"
+ },
+ {
+ "destination": "/integrations/zapier",
+ "permanent": true,
+ "source": "/zapier"
+ },
+ {
+ "destination": "/concepts/filtering",
+ "permanent": true,
+ "source": "/search/filtering"
+ },
+ {
+ "destination": "/add-memories",
+ "permanent": true,
+ "source": "/add-memories/overview"
+ },
+ {
+ "destination": "/add-memories",
+ "permanent": true,
+ "source": "/add-memories/parameters"
+ },
+ {
+ "destination": "/add-memories",
+ "permanent": true,
+ "source": "/memory-api/ingesting"
+ },
+ {
+ "destination": "/add-memories",
+ "permanent": true,
+ "source": "/add-memories/examples/basic"
+ },
+ {
+ "destination": "/add-memories",
+ "permanent": true,
+ "source": "/add-memories/examples/file-upload"
+ },
+ {
+ "destination": "/search",
+ "permanent": true,
+ "source": "/search/overview"
+ },
+ {
+ "destination": "/search",
+ "permanent": true,
+ "source": "/search/parameters"
+ },
+ {
+ "destination": "/search",
+ "permanent": true,
+ "source": "/search/response-schema"
+ },
+ {
+ "destination": "/search",
+ "permanent": true,
+ "source": "/search/query-rewriting"
+ },
+ {
+ "destination": "/search",
+ "permanent": true,
+ "source": "/search/reranking"
+ },
+ {
+ "destination": "/search",
+ "permanent": true,
+ "source": "/search/examples/document-search"
+ },
+ {
+ "destination": "/search",
+ "permanent": true,
+ "source": "/search/examples/memory-search"
+ },
+ {
+ "destination": "/concepts/user-profiles",
+ "permanent": true,
+ "source": "/user-profiles/overview"
+ },
+ {
+ "destination": "/user-profiles",
+ "permanent": true,
+ "source": "/user-profiles/api"
+ },
+ {
+ "destination": "/user-profiles",
+ "permanent": true,
+ "source": "/user-profiles/examples"
+ },
+ {
+ "destination": "/concepts/user-profiles",
+ "permanent": true,
+ "source": "/user-profiles/use-cases"
+ },
+ {
+ "destination": "/add-memories",
+ "permanent": true,
+ "source": "/update-delete-memories/overview"
+ },
+ {
+ "destination": "/document-operations",
+ "permanent": true,
+ "source": "/memory-api/track-progress"
+ },
+ {
+ "destination": "/document-operations",
+ "permanent": true,
+ "source": "/list-memories/overview"
+ },
+ {
+ "destination": "/document-operations",
+ "permanent": true,
+ "source": "/list-memories/examples/basic"
+ },
+ {
+ "destination": "/document-operations",
+ "permanent": true,
+ "source": "/list-memories/examples/filtering"
+ },
+ {
+ "destination": "/document-operations",
+ "permanent": true,
+ "source": "/list-memories/examples/pagination"
+ },
+ {
+ "destination": "/document-operations",
+ "permanent": true,
+ "source": "/list-memories/examples/monitoring"
+ },
+ {
+ "destination": "/concepts/customization",
+ "permanent": true,
+ "source": "/org-settings"
+ },
+ {
+ "destination": "/add-memories",
+ "permanent": true,
+ "source": "/memory-api/overview"
}
],
"styling": { "eyebrows": "breadcrumbs" },
diff --git a/apps/docs/document-operations.mdx b/apps/docs/document-operations.mdx
new file mode 100644
index 00000000..2161d696
--- /dev/null
+++ b/apps/docs/document-operations.mdx
@@ -0,0 +1,295 @@
+---
+title: "Document Operations"
+sidebarTitle: "Documents"
+description: "List, get, update, and delete your ingested documents"
+icon: "files"
+---
+
+Manage documents after ingestion using the SDK.
+
+## List Documents
+
+Retrieve paginated documents with filtering.
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ const documents = await client.documents.list({
+ limit: 10,
+ containerTags: ["user_123"]
+ });
+
+ documents.memories.forEach(d => {
+ console.log(d.id, d.title, d.status);
+ });
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ documents = client.documents.list(
+ limit=10,
+ container_tags=["user_123"]
+ )
+
+ for doc in documents.memories:
+ print(doc.id, doc.title, doc.status)
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl -X POST "https://api.supermemory.ai/v3/documents/list" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{"limit": 10, "containerTags": ["user_123"]}'
+ ```
+ </Tab>
+</Tabs>
+
+**Response:**
+```json
+{
+ "memories": [
+ {
+ "id": "doc_abc123",
+ "title": "Meeting notes",
+ "status": "done",
+ "type": "text",
+ "createdAt": "2024-01-15T10:30:00Z",
+ "containerTags": ["user_123"],
+ "metadata": { "source": "slack" }
+ }
+ ],
+ "pagination": {
+ "currentPage": 1,
+ "totalPages": 3,
+ "totalItems": 25
+ }
+}
+```
+
+### Parameters
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `limit` | number | 50 | Items per page (max 200) |
+| `page` | number | 1 | Page number |
+| `containerTags` | string[] | — | Filter by tags |
+| `sort` | string | `createdAt` | Sort by `createdAt` or `updatedAt` |
+| `order` | string | `desc` | `desc` (newest) or `asc` (oldest) |
+
+<Accordion title="Pagination Example">
+ ```typescript
+ async function getAllDocuments(containerTag: string) {
+ const all = [];
+ let page = 1;
+
+ while (true) {
+ const { memories, pagination } = await client.documents.list({
+ containerTags: [containerTag],
+ limit: 100,
+ page
+ });
+
+ all.push(...memories);
+ if (page >= pagination.totalPages) break;
+ page++;
+ }
+
+ return all;
+ }
+ ```
+</Accordion>
+
+<Accordion title="Filter by Metadata">
+ ```typescript
+ const documents = await client.documents.list({
+ containerTags: ["user_123"],
+ filters: {
+ AND: [
+ { key: "status", value: "reviewed", negate: false },
+ { key: "priority", value: "high", negate: false }
+ ]
+ }
+ });
+ ```
+</Accordion>
+
+---
+
+## Get Document
+
+Get a specific document with its processing status.
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ const doc = await client.documents.get("doc_abc123");
+
+ console.log(doc.status); // "queued" | "processing" | "done" | "failed"
+ console.log(doc.content);
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ doc = client.documents.get("doc_abc123")
+
+ print(doc.status)
+ print(doc.content)
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl "https://api.supermemory.ai/v3/documents/doc_abc123" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY"
+ ```
+ </Tab>
+</Tabs>
+
+### Processing Status
+
+| Status | Description |
+|--------|-------------|
+| `queued` | Waiting to process |
+| `extracting` | Extracting content (OCR, transcription) |
+| `chunking` | Breaking into searchable pieces |
+| `embedding` | Creating vector representations |
+| `done` | Ready for search |
+| `failed` | Processing failed |
+
+<Accordion title="Poll for Completion">
+ ```typescript
+ async function waitForProcessing(docId: string) {
+ while (true) {
+ const doc = await client.documents.get(docId);
+
+ if (doc.status === "done") return doc;
+ if (doc.status === "failed") throw new Error("Processing failed");
+
+ await new Promise(r => setTimeout(r, 2000));
+ }
+ }
+ ```
+</Accordion>
+
+---
+
+## Update Document
+
+Update a document's content or metadata. Triggers reprocessing.
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ await client.documents.update("doc_abc123", {
+ content: "Updated content here",
+ metadata: { version: 2, reviewed: true }
+ });
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ client.documents.update(
+ "doc_abc123",
+ content="Updated content here",
+ metadata={"version": 2, "reviewed": True}
+ )
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl -X PATCH "https://api.supermemory.ai/v3/documents/doc_abc123" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{"content": "Updated content here", "metadata": {"version": 2}}'
+ ```
+ </Tab>
+</Tabs>
+
+---
+
+## Delete Documents
+
+Permanently remove documents.
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ // Single delete
+ await client.documents.delete("doc_abc123");
+
+ // Bulk delete by IDs
+ await client.documents.deleteBulk({
+ ids: ["doc_1", "doc_2", "doc_3"]
+ });
+
+ // Bulk delete by container tag (delete all for a user)
+ await client.documents.deleteBulk({
+ containerTags: ["user_123"]
+ });
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ # Single delete
+ client.documents.delete("doc_abc123")
+
+ # Bulk delete by IDs
+ client.documents.delete_bulk(ids=["doc_1", "doc_2", "doc_3"])
+
+ # Bulk delete by container tag
+ client.documents.delete_bulk(container_tags=["user_123"])
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ # Single delete
+ curl -X DELETE "https://api.supermemory.ai/v3/documents/doc_abc123" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY"
+
+ # Bulk delete by IDs
+ curl -X DELETE "https://api.supermemory.ai/v3/documents/bulk" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{"ids": ["doc_1", "doc_2", "doc_3"]}'
+ ```
+ </Tab>
+</Tabs>
+
+<Warning>
+Deletes are permanent — no recovery.
+</Warning>
+
+---
+
+## Processing Queue
+
+Check documents currently being processed.
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ const response = await client.documents.listProcessing();
+ console.log(`${response.documents.length} documents processing`);
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ response = client.documents.list_processing()
+ print(f"{len(response.documents)} documents processing")
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl "https://api.supermemory.ai/v3/documents/processing" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY"
+ ```
+ </Tab>
+</Tabs>
+
+---
+
+## Next Steps
+
+- [Memory Operations](/memory-operations) — Advanced v4 memory operations
+- [Search](/search) — Query your memories
+- [Ingesting Content](/add-memories) — Add new content
diff --git a/apps/docs/images/anthropic-1.svg b/apps/docs/images/anthropic-1.svg
new file mode 100644
index 00000000..acfa2297
--- /dev/null
+++ b/apps/docs/images/anthropic-1.svg
@@ -0,0 +1 @@
+<svg height="2500" viewBox="0 6.603 1192.672 1193.397" width="2500" xmlns="http://www.w3.org/2000/svg" fill="#1E293B" fill-rule="evenodd"><path d="m233.96 800.215 234.684-131.678 3.947-11.436-3.947-6.363h-11.436l-39.221-2.416-134.094-3.624-116.296-4.832-112.67-6.04-28.35-6.04-26.577-35.035 2.738-17.477 23.84-16.027 34.147 2.98 75.463 5.155 113.235 7.812 82.147 4.832 121.692 12.644h19.329l2.738-7.812-6.604-4.832-5.154-4.832-117.182-79.41-126.845-83.92-66.443-48.321-35.92-24.484-18.12-22.953-7.813-50.093 32.618-35.92 43.812 2.98 11.195 2.98 44.375 34.147 94.792 73.37 123.786 91.167 18.12 15.06 7.249-5.154.886-3.624-8.135-13.61-67.329-121.692-71.838-123.785-31.974-51.302-8.456-30.765c-2.98-12.645-5.154-23.275-5.154-36.242l37.127-50.416 20.537-6.604 49.53 6.604 20.86 18.121 30.765 70.39 49.852 110.818 77.315 150.684 22.631 44.698 12.08 41.396 4.51 12.645h7.813v-7.248l6.362-84.886 11.759-104.215 11.436-134.094 3.946-37.772 18.685-45.262 37.127-24.482 28.994 13.852 23.839 34.148-3.303 22.067-14.174 92.134-27.785 144.323-18.121 96.644h10.55l12.08-12.08 48.887-64.913 82.147-102.685 36.242-40.752 42.282-45.02 27.14-21.423h51.303l37.772 56.135-16.913 57.986-52.832 67.007-43.812 56.779-62.82 84.563-39.22 67.651 3.623 5.396 9.343-.886 141.906-30.201 76.671-13.852 91.49-15.705 41.396 19.329 4.51 19.65-16.269 40.189-97.852 24.16-114.764 22.954-170.9 40.43-2.093 1.53 2.416 2.98 76.993 7.248 32.94 1.771h80.617l150.12 11.195 39.222 25.933 23.517 31.732-3.946 24.16-60.403 30.766-81.503-19.33-190.228-45.26-65.235-16.27h-9.02v5.397l54.362 53.154 99.624 89.96 124.752 115.973 6.362 28.671-16.027 22.63-16.912-2.415-109.611-82.47-42.282-37.127-95.758-80.618h-6.363v8.456l22.067 32.296 116.537 175.167 6.04 53.719-8.456 17.476-30.201 10.55-33.181-6.04-68.215-95.758-70.39-107.84-56.778-96.644-6.926 3.947-33.503 360.886-15.705 18.443-36.243 13.852-30.201-22.953-16.027-37.127 16.027-73.37 19.329-95.758 15.704-76.107 14.175-94.55 8.456-31.41-.563-2.094-6.927.886-71.275 97.852-108.402 146.497-85.772 91.812-20.537 8.134-35.597-18.443 3.301-32.94 19.893-29.315 118.712-151.007 71.597-93.583 46.228-54.04-.322-7.813h-2.738l-315.302 204.725-56.135 7.248-24.16-22.63 2.98-37.128 11.435-12.08 94.792-65.236-.322.323z"/></svg>
diff --git a/apps/docs/images/openai.svg b/apps/docs/images/openai.svg
new file mode 100644
index 00000000..cba1bd1c
--- /dev/null
+++ b/apps/docs/images/openai.svg
@@ -0,0 +1 @@
+<svg fill="#1E293B" fill-rule="evenodd" height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>OpenAI</title><path d="M21.55 10.004a5.416 5.416 0 00-.478-4.501c-1.217-2.09-3.662-3.166-6.05-2.66A5.59 5.59 0 0010.831 1C8.39.995 6.224 2.546 5.473 4.838A5.553 5.553 0 001.76 7.496a5.487 5.487 0 00.691 6.5 5.416 5.416 0 00.477 4.502c1.217 2.09 3.662 3.165 6.05 2.66A5.586 5.586 0 0013.168 23c2.443.006 4.61-1.546 5.361-3.84a5.553 5.553 0 003.715-2.66 5.488 5.488 0 00-.693-6.497v.001zm-8.381 11.558a4.199 4.199 0 01-2.675-.954c.034-.018.093-.05.132-.074l4.44-2.53a.71.71 0 00.364-.623v-6.176l1.877 1.069c.02.01.033.029.036.05v5.115c-.003 2.274-1.87 4.118-4.174 4.123zM4.192 17.78a4.059 4.059 0 01-.498-2.763c.032.02.09.055.131.078l4.44 2.53c.225.13.504.13.73 0l5.42-3.088v2.138a.068.068 0 01-.027.057L9.9 19.288c-1.999 1.136-4.552.46-5.707-1.51h-.001zM3.023 8.216A4.15 4.15 0 015.198 6.41l-.002.151v5.06a.711.711 0 00.364.624l5.42 3.087-1.876 1.07a.067.067 0 01-.063.005l-4.489-2.559c-1.995-1.14-2.679-3.658-1.53-5.63h.001zm15.417 3.54l-5.42-3.088L14.896 7.6a.067.067 0 01.063-.006l4.489 2.557c1.998 1.14 2.683 3.662 1.529 5.633a4.163 4.163 0 01-2.174 1.807V12.38a.71.71 0 00-.363-.623zm1.867-2.773a6.04 6.04 0 00-.132-.078l-4.44-2.53a.731.731 0 00-.729 0l-5.42 3.088V7.325a.068.068 0 01.027-.057L14.1 4.713c2-1.137 4.555-.46 5.707 1.513.487.833.664 1.809.499 2.757h.001zm-11.741 3.81l-1.877-1.068a.065.065 0 01-.036-.051V6.559c.001-2.277 1.873-4.122 4.181-4.12.976 0 1.92.338 2.671.954-.034.018-.092.05-.131.073l-4.44 2.53a.71.71 0 00-.365.623l-.003 6.173v.002zm1.02-2.168L12 9.25l2.414 1.375v2.75L12 14.75l-2.415-1.375v-2.75z"></path></svg>
diff --git a/apps/docs/images/pipecat.svg b/apps/docs/images/pipecat.svg
new file mode 100644
index 00000000..4f7fd4f0
--- /dev/null
+++ b/apps/docs/images/pipecat.svg
@@ -0,0 +1 @@
+<svg width="16" height="16" viewBox="0 0 332 192" fill="none" xmlns="http://www.w3.org/2000/svg" class="logo"><path d="M45.7718 0.770123C50.4477 -0.990356 55.7252 0.330677 59.0204 4.08644L101.936 53.0005H230.064L272.98 4.08644C276.275 0.330677 281.552 -0.990356 286.228 0.770123C290.904 2.5306 294 7.00416 294 12.0005V120H332V144H270V43.8733L244.52 72.9146C242.242 75.5116 238.955 77.0005 235.5 77.0005H96.5C93.0452 77.0005 89.7581 75.5116 87.4796 72.9146L62 43.8733V144H0V120H38V12.0005C38 7.00416 41.0958 2.5306 45.7718 0.770123Z" fill="#1E293B"></path><path d="M270 168.001H332V192.001H270V168.001Z" fill="#1E293B"></path><path d="M0 168.001H62V192.001H0V168.001Z" fill="#1E293B"></path><path d="M128 128.001C128 136.837 120.837 144.001 112 144.001C103.163 144.001 96 136.837 96 128.001C96 119.164 103.163 112.001 112 112.001C120.837 112.001 128 119.164 128 128.001Z" fill="#1E293B"></path><path d="M236 128.001C236 136.837 228.837 144.001 220 144.001C211.163 144.001 204 136.837 204 128.001C204 119.164 211.163 112.001 220 112.001C228.837 112.001 236 119.164 236 128.001Z" fill="#1E293B"></path></svg>
diff --git a/apps/docs/images/quickstart-icon.svg b/apps/docs/images/quickstart-icon.svg
new file mode 100644
index 00000000..eac75e8b
--- /dev/null
+++ b/apps/docs/images/quickstart-icon.svg
@@ -0,0 +1,4 @@
+<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="16" height="16" color="currentColor" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round">
+ <path d="M17.5 17.5L21.5 21.5" />
+ <path d="M5.97185 3.79282L9.31786 4.92059C9.78049 5.08073 10.4825 4.97602 10.88 4.69283L13.2273 3.01883C14.729 1.949 15.972 2.58706 15.9733 4.44162L15.9862 7.58112C15.9884 8.11281 16.3582 8.7726 16.8087 9.05928L19.207 10.5629C21.104 11.7544 20.8884 13.1649 18.7263 13.713L15.7172 14.4729C15.1735 14.6099 14.6126 15.1709 14.4693 15.721L13.7096 18.7307C13.1679 20.8868 11.7449 21.1025 10.5601 19.2114L9.0567 16.8127C8.77007 16.3621 8.1104 15.9923 7.57881 15.9901L4.43989 15.9772C2.59198 15.9696 1.94773 14.7327 3.01737 13.2306L4.69105 10.8829C4.96789 10.4917 5.07258 9.78951 4.91247 9.3268L3.78491 5.98017C3.17623 4.16109 4.15941 3.17773 5.97185 3.79282Z" />
+</svg>
diff --git a/apps/docs/images/supermemory.svg b/apps/docs/images/supermemory.svg
new file mode 100644
index 00000000..2086fcaf
--- /dev/null
+++ b/apps/docs/images/supermemory.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 206 168"><path fill="#1E293B" d="M205.864 66.263h-76.401V0h-24.684v71.897c0 7.636 3.021 14.97 8.391 20.373l62.383 62.777 17.454-17.564-46.076-46.365h58.948v-24.84l-.015-.015ZM12.872 30.517l46.075 46.365H0v24.84h76.4v66.264h24.685V96.089c0-7.637-3.021-14.97-8.39-20.374l-62.37-62.762-17.453 17.564Z"/></svg>
diff --git a/apps/docs/install.md b/apps/docs/install.md
index d0d4b1b8..86b26b94 100644
--- a/apps/docs/install.md
+++ b/apps/docs/install.md
@@ -71,9 +71,17 @@ containerTag: orgId // Org members share memories
```
**BOTH (ask which):**
-- Option A: `containerTag: \`\${userId}-\${orgId}\``
-- Option B: `containerTag: orgId, metadata: { userId }`
-- Option C: `containerTag: userId, metadata: { orgId }`
+
+```typescript
+// Option A: Unique per user-org combination
+containerTag: `${userId}-${orgId}`
+
+// Option B: Org-scoped with user metadata
+containerTag: orgId, metadata: { userId }
+
+// Option C: User-scoped with org metadata
+containerTag: userId, metadata: { orgId }
+```
## STEP 5: INTEGRATION CODE
diff --git a/apps/docs/integrations/ai-sdk.mdx b/apps/docs/integrations/ai-sdk.mdx
new file mode 100644
index 00000000..4f70d916
--- /dev/null
+++ b/apps/docs/integrations/ai-sdk.mdx
@@ -0,0 +1,182 @@
+---
+title: "Vercel AI SDK"
+sidebarTitle: "Vercel AI SDK"
+description: "Use Supermemory with Vercel AI SDK for seamless memory management"
+icon: "triangle"
+---
+
+The Supermemory AI SDK provides native integration with Vercel's AI SDK through two approaches: **User Profiles** for automatic personalization and **Memory Tools** for agent-based interactions.
+
+<Card title="@supermemory/tools on npm" icon="npm" href="https://www.npmjs.com/package/@supermemory/tools">
+ Check out the NPM page for more details
+</Card>
+
+## Installation
+
+```bash
+npm install @supermemory/tools
+```
+
+## Quick Comparison
+
+| Approach | Use Case | Setup |
+|----------|----------|-------|
+| User Profiles | Personalized LLM responses with automatic user context | Simple middleware |
+| Memory Tools | AI agents that need explicit memory control | Tool definitions |
+
+---
+
+## User Profiles with Middleware
+
+Automatically inject user profiles into every LLM call for instant personalization.
+
+```typescript
+import { generateText } from "ai"
+import { withSupermemory } from "@supermemory/tools/ai-sdk"
+import { openai } from "@ai-sdk/openai"
+
+const modelWithMemory = withSupermemory(openai("gpt-5"), "user-123")
+
+const result = await generateText({
+ model: modelWithMemory,
+ messages: [{ role: "user", content: "What do you know about me?" }]
+})
+```
+
+<Note>
+ **Memory saving is disabled by default.** The middleware only retrieves existing memories. To automatically save new memories:
+
+ ```typescript
+ const modelWithMemory = withSupermemory(openai("gpt-5"), "user-123", {
+ addMemory: "always"
+ })
+ ```
+</Note>
+
+### Memory Search Modes
+
+**Profile Mode (Default)** - Retrieves the user's complete profile:
+
+```typescript
+const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "profile" })
+```
+
+**Query Mode** - Searches memories based on the user's message:
+
+```typescript
+const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "query" })
+```
+
+**Full Mode** - Combines profile AND query-based search:
+
+```typescript
+const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "full" })
+```
+
+### Custom Prompt Templates
+
+Customize how memories are formatted:
+
+```typescript
+import { withSupermemory, type MemoryPromptData } from "@supermemory/tools/ai-sdk"
+
+const claudePrompt = (data: MemoryPromptData) => `
+<context>
+ <user_profile>
+ ${data.userMemories}
+ </user_profile>
+ <relevant_memories>
+ ${data.generalSearchMemories}
+ </relevant_memories>
+</context>
+`.trim()
+
+const model = withSupermemory(anthropic("claude-3-sonnet"), "user-123", {
+ mode: "full",
+ promptTemplate: claudePrompt
+})
+```
+
+### Verbose Logging
+
+```typescript
+const model = withSupermemory(openai("gpt-4"), "user-123", {
+ verbose: true
+})
+// Console output shows memory retrieval details
+```
+
+---
+
+## Memory Tools
+
+Add memory capabilities to AI agents with search, add, and fetch operations.
+
+```typescript
+import { streamText } from "ai"
+import { createAnthropic } from "@ai-sdk/anthropic"
+import { supermemoryTools } from "@supermemory/tools/ai-sdk"
+
+const anthropic = createAnthropic({ apiKey: "YOUR_ANTHROPIC_KEY" })
+
+const result = await streamText({
+ model: anthropic("claude-3-sonnet"),
+ prompt: "Remember that my name is Alice",
+ tools: supermemoryTools("YOUR_SUPERMEMORY_KEY")
+})
+```
+
+### Available Tools
+
+**Search Memories** - Semantic search through user memories:
+
+```typescript
+const result = await streamText({
+ model: openai("gpt-5"),
+ prompt: "What are my dietary preferences?",
+ tools: supermemoryTools("API_KEY")
+})
+// AI will call: searchMemories({ informationToGet: "dietary preferences" })
+```
+
+**Add Memory** - Store new information:
+
+```typescript
+const result = await streamText({
+ model: anthropic("claude-3-sonnet"),
+ prompt: "Remember that I'm allergic to peanuts",
+ tools: supermemoryTools("API_KEY")
+})
+// AI will call: addMemory({ memory: "User is allergic to peanuts" })
+```
+
+### Using Individual Tools
+
+For more control, import tools separately:
+
+```typescript
+import {
+ searchMemoriesTool,
+ addMemoryTool
+} from "@supermemory/tools/ai-sdk"
+
+const result = await streamText({
+ model: openai("gpt-5"),
+ prompt: "What do you know about me?",
+ tools: {
+ searchMemories: searchMemoriesTool("API_KEY", { projectId: "personal" }),
+ createEvent: yourCustomTool,
+ }
+})
+```
+
+### Tool Results
+
+```typescript
+// searchMemories result
+{ success: true, results: [...], count: 5 }
+
+// addMemory result
+{ success: true, memory: { id: "mem_123", ... } }
+```
+
diff --git a/apps/docs/integrations/claude-memory.mdx b/apps/docs/integrations/claude-memory.mdx
new file mode 100644
index 00000000..4487cb77
--- /dev/null
+++ b/apps/docs/integrations/claude-memory.mdx
@@ -0,0 +1,269 @@
+---
+title: "Claude Memory Tool"
+sidebarTitle: "Claude Memory Tool"
+description: "Use Claude's native memory tool with Supermemory as the backend"
+icon: "/images/anthropic-1.svg"
+---
+
+Claude has a native memory tool that allows it to store and retrieve information across conversations. Supermemory provides a backend implementation that maps Claude's memory commands to persistent storage.
+
+<Info>
+This integration works with Claude's built-in `memory` tool type, introduced in the Anthropic API. It requires the `context-management` beta flag.
+</Info>
+
+## Installation
+
+```bash
+npm install @supermemory/tools @anthropic-ai/sdk
+```
+
+## Quick Start
+
+```typescript
+import Anthropic from "@anthropic-ai/sdk"
+import { createClaudeMemoryTool } from "@supermemory/tools/claude-memory"
+
+const anthropic = new Anthropic()
+
+const memoryTool = createClaudeMemoryTool(process.env.SUPERMEMORY_API_KEY!, {
+ projectId: "my-app",
+})
+
+async function chatWithMemory(userMessage: string) {
+ // Send message to Claude with memory tool
+ const response = await anthropic.beta.messages.create({
+ model: "claude-sonnet-4-5",
+ max_tokens: 2048,
+ messages: [{ role: "user", content: userMessage }],
+ tools: [{ type: "memory_20250818", name: "memory" }],
+ betas: ["context-management-2025-06-27"],
+ })
+
+ // Handle any memory tool calls
+ const toolResults = []
+ for (const block of response.content) {
+ if (block.type === "tool_use" && block.name === "memory") {
+ const toolResult = await memoryTool.handleCommandForToolResult(
+ block.input as any,
+ block.id
+ )
+ toolResults.push(toolResult)
+ }
+ }
+
+ // Send tool results back to Claude if needed
+ if (toolResults.length > 0) {
+ const finalResponse = await anthropic.beta.messages.create({
+ model: "claude-sonnet-4-5",
+ max_tokens: 2048,
+ messages: [
+ { role: "user", content: userMessage },
+ { role: "assistant", content: response.content },
+ { role: "user", content: toolResults },
+ ],
+ tools: [{ type: "memory_20250818", name: "memory" }],
+ betas: ["context-management-2025-06-27"],
+ })
+
+ return finalResponse
+ }
+
+ return response
+}
+
+// Example usage
+const response = await chatWithMemory(
+ "Remember that I prefer React with TypeScript for my projects"
+)
+console.log(response.content[0])
+```
+
+## Configuration
+
+```typescript
+import { createClaudeMemoryTool } from "@supermemory/tools/claude-memory"
+
+const memoryTool = createClaudeMemoryTool(process.env.SUPERMEMORY_API_KEY!, {
+ // Scope memories to a project or user
+ projectId: "my-app",
+
+ // Or use container tags for more flexibility
+ containerTags: ["user-123", "project-alpha"],
+
+ // Custom memory container prefix (default: "claude_memory")
+ memoryContainerTag: "my_memory_prefix",
+
+ // Custom API endpoint
+ baseUrl: "https://custom.api.com",
+})
+```
+
+## How It Works
+
+Claude's memory tool uses a file-system metaphor. Supermemory maps these operations to document storage:
+
+| Claude Command | Supermemory Action |
+|----------------|-------------------|
+| `view` | Search/retrieve documents |
+| `create` | Add new document |
+| `str_replace` | Update document content |
+| `insert` | Insert content at line |
+| `delete` | Delete document |
+| `rename` | Move document to new path |
+
+### Memory Path Structure
+
+All memory paths must start with `/memories/`:
+
+```
+/memories/preferences.txt # User preferences
+/memories/projects/react.txt # Project-specific notes
+/memories/context/current.txt # Current context
+```
+
+## Commands Reference
+
+### View (Read/List)
+
+```typescript
+// List directory contents
+{ command: "view", path: "/memories/" }
+
+// Read file contents
+{ command: "view", path: "/memories/preferences.txt" }
+
+// Read specific lines
+{ command: "view", path: "/memories/notes.txt", view_range: [1, 10] }
+```
+
+### Create
+
+```typescript
+{
+ command: "create",
+ path: "/memories/preferences.txt",
+ file_text: "User prefers dark mode\nFavorite language: TypeScript"
+}
+```
+
+### String Replace
+
+```typescript
+{
+ command: "str_replace",
+ path: "/memories/preferences.txt",
+ old_str: "dark mode",
+ new_str: "light mode"
+}
+```
+
+### Insert
+
+```typescript
+{
+ command: "insert",
+ path: "/memories/notes.txt",
+ insert_line: 5,
+ insert_text: "New note added here"
+}
+```
+
+### Delete
+
+```typescript
+{ command: "delete", path: "/memories/old-notes.txt" }
+```
+
+### Rename
+
+```typescript
+{
+ command: "rename",
+ path: "/memories/old-name.txt",
+ new_path: "/memories/new-name.txt"
+}
+```
+
+## Complete Example
+
+```typescript
+import Anthropic from "@anthropic-ai/sdk"
+import { createClaudeMemoryTool } from "@supermemory/tools/claude-memory"
+
+const anthropic = new Anthropic()
+const memoryTool = createClaudeMemoryTool(process.env.SUPERMEMORY_API_KEY!, {
+ projectId: "assistant",
+})
+
+async function runConversation() {
+ const messages: Anthropic.MessageParam[] = []
+
+ // Helper to chat with memory
+ async function chat(userMessage: string) {
+ messages.push({ role: "user", content: userMessage })
+
+ let response = await anthropic.beta.messages.create({
+ model: "claude-sonnet-4-5",
+ max_tokens: 2048,
+ messages,
+ tools: [{ type: "memory_20250818", name: "memory" }],
+ betas: ["context-management-2025-06-27"],
+ })
+
+ // Handle tool calls
+ while (response.stop_reason === "tool_use") {
+ const toolResults = []
+
+ for (const block of response.content) {
+ if (block.type === "tool_use" && block.name === "memory") {
+ const result = await memoryTool.handleCommandForToolResult(
+ block.input as any,
+ block.id
+ )
+ toolResults.push(result)
+ }
+ }
+
+ messages.push({ role: "assistant", content: response.content })
+ messages.push({ role: "user", content: toolResults })
+
+ response = await anthropic.beta.messages.create({
+ model: "claude-sonnet-4-5",
+ max_tokens: 2048,
+ messages,
+ tools: [{ type: "memory_20250818", name: "memory" }],
+ betas: ["context-management-2025-06-27"],
+ })
+ }
+
+ messages.push({ role: "assistant", content: response.content })
+ return response
+ }
+
+ // Have a conversation with persistent memory
+ await chat("My name is Alex and I'm a backend developer")
+ await chat("I prefer Go for systems programming")
+ await chat("What do you remember about me?")
+}
+
+runConversation()
+```
+
+## Environment Variables
+
+```bash
+SUPERMEMORY_API_KEY=your_supermemory_key
+ANTHROPIC_API_KEY=your_anthropic_key
+```
+
+## Next Steps
+
+<CardGroup cols={2}>
+ <Card title="AI SDK Integration" icon="triangle" href="/integrations/ai-sdk">
+ Use with Vercel AI SDK for streamlined development
+ </Card>
+
+ <Card title="OpenAI SDK" icon="bolt" href="/integrations/openai">
+ Memory tools for OpenAI function calling
+ </Card>
+</CardGroup>
diff --git a/apps/docs/integrations/memory-graph.mdx b/apps/docs/integrations/memory-graph.mdx
new file mode 100644
index 00000000..decd5f52
--- /dev/null
+++ b/apps/docs/integrations/memory-graph.mdx
@@ -0,0 +1,363 @@
+---
+title: 'Memory Graph'
+sidebarTitle: "Memory Graph"
+description: 'Interactive visualization for documents, memories and connections'
+icon: "network"
+---
+
+Memory Graph is a React component that visualizes your Supermemory documents and memories as an interactive network. Documents appear as rectangular nodes, memories as hexagonal nodes, and connections between them show relationships and similarity.
+
+<Card title="@supermemory/memory-graph on npm" icon="npm" href="https://www.npmjs.com/package/@supermemory/memory-graph">
+ Check out the NPM page for more details
+</Card>
+
+## Installation
+
+```bash
+npm install @supermemory/memory-graph
+```
+
+**Requirements:** React 18.0.0 or higher
+
+## Quick Start
+
+```tsx
+'use client'; // For Next.js App Router
+
+import { MemoryGraph } from '@supermemory/memory-graph';
+import type { DocumentWithMemories } from '@supermemory/memory-graph';
+import { useEffect, useState } from 'react';
+
+export default function GraphPage() {
+ const [documents, setDocuments] = useState<DocumentWithMemories[]>([]);
+ const [isLoading, setIsLoading] = useState(true);
+ const [error, setError] = useState<Error | null>(null);
+
+ useEffect(() => {
+ fetch('/api/graph')
+ .then(res => res.json())
+ .then(data => {
+ setDocuments(data.documents);
+ setIsLoading(false);
+ })
+ .catch(err => {
+ setError(err);
+ setIsLoading(false);
+ });
+ }, []);
+
+ return (
+ <div style={{ height: '100vh' }}>
+ <MemoryGraph
+ documents={documents}
+ isLoading={isLoading}
+ error={error}
+ variant="console"
+ />
+ </div>
+ );
+}
+```
+
+## Backend API Route
+
+Create an API route to fetch documents from Supermemory:
+
+<CodeGroup>
+
+```typescript Next.js App Router
+// app/api/graph/route.ts
+import { NextResponse } from 'next/server';
+
+export async function GET() {
+ const response = await fetch('https://api.supermemory.ai/v3/documents/documents', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${process.env.SUPERMEMORY_API_KEY}`,
+ },
+ body: JSON.stringify({
+ page: 1,
+ limit: 500,
+ sort: 'createdAt',
+ order: 'desc',
+ }),
+ });
+
+ const data = await response.json();
+ return NextResponse.json(data);
+}
+```
+
+```typescript Next.js Pages Router
+// pages/api/graph.ts
+import type { NextApiRequest, NextApiResponse } from 'next';
+
+export default async function handler(req: NextApiRequest, res: NextApiResponse) {
+ const response = await fetch('https://api.supermemory.ai/v3/documents/documents', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${process.env.SUPERMEMORY_API_KEY}`,
+ },
+ body: JSON.stringify({ page: 1, limit: 500, sort: 'createdAt', order: 'desc' }),
+ });
+
+ const data = await response.json();
+ res.json(data);
+}
+```
+
+```javascript Express
+app.get('/api/graph', async (req, res) => {
+ const response = await fetch('https://api.supermemory.ai/v3/documents/documents', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${process.env.SUPERMEMORY_API_KEY}`,
+ },
+ body: JSON.stringify({ page: 1, limit: 500, sort: 'createdAt', order: 'desc' }),
+ });
+
+ const data = await response.json();
+ res.json(data);
+});
+```
+
+</CodeGroup>
+
+<Warning>
+ Never expose your Supermemory API key to the client. Always fetch data through your backend.
+</Warning>
+
+---
+
+## Variants
+
+**Console Variant** - Full-featured dashboard view (0.8x zoom, space selector visible):
+
+```tsx
+<MemoryGraph documents={documents} variant="console" />
+```
+
+**Consumer Variant** - Embedded widget view (0.5x zoom, space selector hidden):
+
+```tsx
+<MemoryGraph documents={documents} variant="consumer" />
+```
+
+---
+
+## Examples
+
+### With Pagination
+
+```tsx
+'use client';
+
+import { MemoryGraph } from '@supermemory/memory-graph';
+import { useCallback, useEffect, useState } from 'react';
+
+export default function PaginatedGraph() {
+ const [documents, setDocuments] = useState([]);
+ const [page, setPage] = useState(1);
+ const [hasMore, setHasMore] = useState(true);
+ const [isLoading, setIsLoading] = useState(true);
+ const [isLoadingMore, setIsLoadingMore] = useState(false);
+
+ useEffect(() => { fetchPage(1, false); }, []);
+
+ const fetchPage = async (pageNum, append) => {
+ pageNum === 1 ? setIsLoading(true) : setIsLoadingMore(true);
+
+ const res = await fetch(`/api/graph?page=${pageNum}&limit=100`);
+ const data = await res.json();
+
+ append ? setDocuments(prev => [...prev, ...data.documents]) : setDocuments(data.documents);
+ setHasMore(data.pagination.currentPage < data.pagination.totalPages);
+ setIsLoading(false);
+ setIsLoadingMore(false);
+ };
+
+ const loadMore = useCallback(async () => {
+ if (!isLoadingMore && hasMore) {
+ const nextPage = page + 1;
+ setPage(nextPage);
+ await fetchPage(nextPage, true);
+ }
+ }, [page, hasMore, isLoadingMore]);
+
+ return (
+ <MemoryGraph
+ documents={documents}
+ isLoading={isLoading}
+ isLoadingMore={isLoadingMore}
+ hasMore={hasMore}
+ totalLoaded={documents.length}
+ loadMoreDocuments={loadMore}
+ />
+ );
+}
+```
+
+### Highlighting Search Results
+
+```tsx
+<MemoryGraph
+ documents={documents}
+ highlightDocumentIds={searchResults}
+ highlightsVisible={searchResults.length > 0}
+/>
+```
+
+### Controlled Space Selection
+
+```tsx
+<MemoryGraph
+ documents={documents}
+ selectedSpace={selectedSpace}
+ onSpaceChange={setSelectedSpace}
+ showSpacesSelector={false}
+/>
+```
+
+### Custom Empty State
+
+```tsx
+<MemoryGraph documents={documents} isLoading={isLoading}>
+ <div style={{ textAlign: 'center', padding: '2rem' }}>
+ <h2>No memories yet</h2>
+ <p>Add content to see your knowledge graph</p>
+ </div>
+</MemoryGraph>
+```
+
+---
+
+## Props Reference
+
+### Core Props
+
+| Prop | Type | Default | Description |
+|------|------|---------|-------------|
+| `documents` | `DocumentWithMemories[]` | required | Array of documents to display |
+| `isLoading` | `boolean` | `false` | Shows loading indicator |
+| `error` | `Error \| null` | `null` | Error to display |
+| `variant` | `"console" \| "consumer"` | `"console"` | Visual variant |
+| `children` | `ReactNode` | - | Custom empty state content |
+
+### Pagination Props
+
+| Prop | Type | Default | Description |
+|------|------|---------|-------------|
+| `isLoadingMore` | `boolean` | `false` | Shows indicator when loading more |
+| `hasMore` | `boolean` | `false` | Whether more documents available |
+| `totalLoaded` | `number` | - | Total documents currently loaded |
+| `loadMoreDocuments` | `() => Promise<void>` | - | Callback to load more |
+| `autoLoadOnViewport` | `boolean` | `true` | Auto-load when 80% visible |
+
+### Display Props
+
+| Prop | Type | Default | Description |
+|------|------|---------|-------------|
+| `showSpacesSelector` | `boolean` | variant-based | Show space filter dropdown |
+| `highlightDocumentIds` | `string[]` | `[]` | Document IDs to highlight |
+| `highlightsVisible` | `boolean` | `true` | Whether highlights shown |
+| `occludedRightPx` | `number` | `0` | Pixels occluded on right |
+
+### Controlled State Props
+
+| Prop | Type | Description |
+|------|------|-------------|
+| `selectedSpace` | `string` | Currently selected space (use `"all"` for all) |
+| `onSpaceChange` | `(spaceId: string) => void` | Callback when space changes |
+| `memoryLimit` | `number` | Max memories per document when space selected |
+
+---
+
+## Data Types
+
+### DocumentWithMemories
+
+```typescript
+interface DocumentWithMemories {
+ id: string;
+ customId?: string | null;
+ title?: string | null;
+ content?: string | null;
+ summary?: string | null;
+ url?: string | null;
+ source?: string | null;
+ type?: string | null;
+ status: 'pending' | 'processing' | 'done' | 'failed';
+ metadata?: Record<string, string | number | boolean> | null;
+ createdAt: string | Date;
+ updatedAt: string | Date;
+ memoryEntries: MemoryEntry[];
+}
+```
+
+### MemoryEntry
+
+```typescript
+interface MemoryEntry {
+ id: string;
+ documentId: string;
+ content: string | null;
+ summary?: string | null;
+ title?: string | null;
+ type?: string | null;
+ metadata?: Record<string, string | number | boolean> | null;
+ createdAt: string | Date;
+ updatedAt: string | Date;
+ spaceContainerTag?: string | null;
+ relation?: 'updates' | 'extends' | 'derives' | null;
+ isLatest?: boolean;
+ spaceId?: string | null;
+}
+```
+
+---
+
+## Exports
+
+### Components
+
+```typescript
+import {
+ MemoryGraph,
+ GraphCanvas,
+ Legend,
+ LoadingIndicator,
+ NodeDetailPanel,
+ SpacesDropdown
+} from '@supermemory/memory-graph';
+```
+
+### Hooks
+
+```typescript
+import { useGraphData, useGraphInteractions } from '@supermemory/memory-graph';
+```
+
+### Constants
+
+```typescript
+import { colors, GRAPH_SETTINGS, LAYOUT_CONSTANTS } from '@supermemory/memory-graph';
+```
+
+---
+
+## Performance
+
+The graph handles hundreds of nodes efficiently through:
+- Canvas-based rendering (not DOM elements)
+- Viewport culling (only draws visible nodes)
+- Level-of-detail optimization (simplifies when zoomed out)
+- Change-based rendering (only redraws when state changes)
+
+For very large datasets (1000+ documents), use pagination to load data in chunks.
+
+## Browser Support
+
+Works in all modern browsers supporting Canvas 2D API, ES2020, and CSS custom properties. Tested on Chrome, Firefox, Safari, and Edge.
diff --git a/apps/docs/integrations/n8n.mdx b/apps/docs/integrations/n8n.mdx
new file mode 100644
index 00000000..9fbf88e3
--- /dev/null
+++ b/apps/docs/integrations/n8n.mdx
@@ -0,0 +1,93 @@
+---
+title: "n8n"
+sidebarTitle: "n8n"
+description: "Automate knowledge management with Supermemory in n8n workflows"
+icon: "workflow"
+---
+
+Connect Supermemory to your n8n workflows to build intelligent automation workflows and agents that leverage your full knowledge base.
+
+## Quick Start
+
+### Prerequisites
+
+- n8n instance (self-hosted or cloud)
+- Supermemory API key ([get one here](https://console.supermemory.com/settings))
+- Basic understanding of n8n workflows
+
+### Setting Up the HTTP Request Node
+
+The Supermemory integration in n8n uses the HTTP Request node to interact with the Supermemory API. Here's how to configure it:
+
+1. Add an **HTTP Request** node to your workflow (Core > HTTP Request)
+![](/images/core-http-req.png)
+2. Set the **Method** to `POST`
+3. Set the **URL** to the appropriate Supermemory API endpoint:
+ - Add memory: `https://api.supermemory.ai/v3/documents`
+ - Search memories: `https://api.supermemory.ai/v4/search`
+4. For authentication, select **Generic Credential Type** and then **Bearer Auth**
+5. Click on **Create New Credential** and paste the Supermemory API Key in the Bearer Token field.
+![](/images/bearer-auth-add-n8n.png)
+6. Check **Send Body** and select **JSON** as the Body Content Type. The fields depend on what API endpoint you're sending the request to. You can find detailed step-by-step examples below.
+
+## Step-by-Step Tutorial
+
+In this tutorial, we'll create a workflow that automatically adds every email from Gmail to your Supermemory knowledge base. We'll use the HTTP Request node to send email data to Supermemory's API, creating a searchable archive of all your communications.
+
+### Adding Gmail Emails to Supermemory
+
+Follow these steps to build a workflow that captures and stores your Gmail messages:
+
+#### Step 1: Set Up Gmail Trigger
+
+![](/images/gmail-trigger.png)
+
+1. **Add a Gmail Trigger node** to your workflow
+2. Configure your Gmail credentials (OAuth2 recommended)
+3. Set the trigger to **Message Received**
+4. Optional: Add labels or filters to process specific emails only
+
+#### Step 2: Configure HTTP Request Node
+
+1. **Add an HTTP Request node** after the Gmail Trigger
+2. **Method**: `POST`
+3. **URL**: `https://api.supermemory.ai/v3/documents`
+4. Select your auth credentials you created with the Supermemory API Key.
+
+#### Step 3: Format Email Data for Supermemory
+
+In the HTTP Request node's **Body**, select **JSON** and **Using Fields Below**
+
+And create 2 fields:
+
+1. name: `content`, value: `{{ $json.snippet }}`
+2. name: `containerTag`, value: gmail
+
+
+![](/images/gmail-content.png)
+
+#### Step 4: Handle Attachments (Optional)
+
+If you want to process attachments:
+
+1. **Add a Loop node** after the Gmail Trigger
+2. Loop through `{{$json.attachments}}`
+3. **Add a Gmail node** to download each attachment
+4. **Add another HTTP Request node** to store attachment metadata
+
+
+#### Step 5: Add Error Handling
+
+1. **Add an Error Trigger node** connected to your workflow
+2. Configure it to catch errors from the HTTP Request node
+3. **Add a notification node** (Email, Slack, etc.) to alert you of failures
+4. Optional: Add a **Wait node** with retry logic
+
+#### Step 6: Test Your Workflow
+
+1. **Activate the workflow** in test mode
+2. Send a test email to your Gmail account
+3. Check the execution to ensure the email was captured
+4. Verify in Supermemory that the email appears in search results
+
+Refer to the API Reference tab to learn more about other supermemory API endpoints. \ No newline at end of file
diff --git a/apps/docs/integrations/openai.mdx b/apps/docs/integrations/openai.mdx
new file mode 100644
index 00000000..66b797c1
--- /dev/null
+++ b/apps/docs/integrations/openai.mdx
@@ -0,0 +1,655 @@
+---
+title: "OpenAI SDK"
+sidebarTitle: "OpenAI SDK"
+description: "Memory tools for OpenAI function calling with Supermemory integration"
+icon: "/images/openai.svg"
+---
+
+Add memory capabilities to the official OpenAI SDKs using Supermemory. Two approaches available:
+
+1. **`withSupermemory` wrapper** - Automatic memory injection into system prompts (zero-config)
+2. **Function calling tools** - Explicit tool calls for search/add memory operations
+
+<Tip>
+**New to Supermemory?** Start with `withSupermemory` for the simplest integration. It automatically injects relevant memories into your prompts.
+</Tip>
+
+<CardGroup>
+<Card title="Supermemory tools on npm" icon="npm" href="https://www.npmjs.com/package/@supermemory/tools">
+ Check out the NPM page for more details
+</Card>
+<Card title="Supermemory AI SDK" icon="python" href="https://pypi.org/project/supermemory-openai-sdk/">
+ Check out the PyPI page for more details
+</Card>
+</CardGroup>
+
+---
+
+## withSupermemory Wrapper
+
+The simplest way to add memory to your OpenAI client. Wraps your client to automatically inject relevant memories into system prompts.
+
+### Installation
+
+```bash
+npm install @supermemory/tools openai
+```
+
+### Quick Start
+
+```typescript
+import OpenAI from "openai"
+import { withSupermemory } from "@supermemory/tools/openai"
+
+const openai = new OpenAI()
+
+// Wrap client with memory - memories auto-injected into system prompts
+const client = withSupermemory(openai, "user-123", {
+ mode: "full", // "profile" | "query" | "full"
+ addMemory: "always", // "always" | "never"
+})
+
+// Use normally - memories are automatically included
+const response = await client.chat.completions.create({
+ model: "gpt-5",
+ messages: [
+ { role: "system", content: "You are a helpful assistant." },
+ { role: "user", content: "What's my favorite programming language?" }
+ ]
+})
+```
+
+### Configuration Options
+
+```typescript
+const client = withSupermemory(openai, "user-123", {
+ // Memory search mode
+ mode: "full", // "profile" (user profile only), "query" (search only), "full" (both)
+
+ // Auto-save conversations as memories
+ addMemory: "always", // "always" | "never"
+
+ // Group messages into conversations
+ conversationId: "conv-456",
+
+ // Enable debug logging
+ verbose: true,
+
+ // Custom API endpoint
+ baseUrl: "https://custom.api.com"
+})
+```
+
+### Modes Explained
+
+| Mode | Description | Use Case |
+|------|-------------|----------|
+| `profile` | Injects user profile (static + dynamic facts) | General personalization |
+| `query` | Searches memories based on user message | Question answering |
+| `full` | Both profile and query-based search | Best for chatbots |
+
+### Works with Responses API Too
+
+```typescript
+const client = withSupermemory(openai, "user-123", { mode: "full" })
+
+// Memories injected into instructions
+const response = await client.responses.create({
+ model: "gpt-5",
+ instructions: "You are a helpful assistant.",
+ input: "What do you know about me?"
+})
+```
+
+### Environment Variables
+
+```bash
+SUPERMEMORY_API_KEY=your_supermemory_key
+OPENAI_API_KEY=your_openai_key
+```
+
+---
+
+## Function Calling Tools
+
+For explicit control over memory operations, use function calling tools. The model decides when to search or add memories.
+
+## Installation
+
+<CodeGroup>
+
+```bash Python
+# Using uv (recommended)
+uv add supermemory-openai-sdk
+
+# Or with pip
+pip install supermemory-openai-sdk
+```
+
+```bash JavaScript/TypeScript
+npm install @supermemory/tools
+```
+
+</CodeGroup>
+
+## Quick Start
+
+<CodeGroup>
+
+```python Python SDK
+import asyncio
+import openai
+from supermemory_openai import SupermemoryTools, execute_memory_tool_calls
+
+async def main():
+ # Initialize OpenAI client
+ client = openai.AsyncOpenAI(api_key="your-openai-api-key")
+
+ # Initialize Supermemory tools
+ tools = SupermemoryTools(
+ api_key="your-supermemory-api-key",
+ config={"project_id": "my-project"}
+ )
+
+ # Chat with memory tools
+ response = await client.chat.completions.create(
+ model="gpt-5",
+ messages=[
+ {
+ "role": "system",
+ "content": "You are a helpful assistant with access to user memories."
+ },
+ {
+ "role": "user",
+ "content": "Remember that I prefer tea over coffee"
+ }
+ ],
+ tools=tools.get_tool_definitions()
+ )
+
+ # Handle tool calls if present
+ if response.choices[0].message.tool_calls:
+ tool_results = await execute_memory_tool_calls(
+ api_key="your-supermemory-api-key",
+ tool_calls=response.choices[0].message.tool_calls,
+ config={"project_id": "my-project"}
+ )
+ print("Tool results:", tool_results)
+
+ print(response.choices[0].message.content)
+
+asyncio.run(main())
+```
+
+```typescript JavaScript/TypeScript SDK
+import { supermemoryTools, getToolDefinitions, createToolCallExecutor } from "@supermemory/tools/openai"
+import OpenAI from "openai"
+
+const client = new OpenAI({
+ apiKey: process.env.OPENAI_API_KEY!,
+})
+
+// Get tool definitions for OpenAI
+const toolDefinitions = getToolDefinitions()
+
+// Create tool executor
+const executeToolCall = createToolCallExecutor(process.env.SUPERMEMORY_API_KEY!, {
+ projectId: "your-project-id",
+})
+
+// Use with OpenAI Chat Completions
+const completion = await client.chat.completions.create({
+ model: "gpt-5",
+ messages: [
+ {
+ role: "user",
+ content: "What do you remember about my preferences?",
+ },
+ ],
+ tools: toolDefinitions,
+})
+
+// Execute tool calls if any
+if (completion.choices[0]?.message.tool_calls) {
+ for (const toolCall of completion.choices[0].message.tool_calls) {
+ const result = await executeToolCall(toolCall)
+ console.log(result)
+ }
+}
+```
+
+</CodeGroup>
+
+## Configuration
+
+### Memory Tools Configuration
+
+<CodeGroup>
+
+```python Python Configuration
+from supermemory_openai import SupermemoryTools
+
+tools = SupermemoryTools(
+ api_key="your-supermemory-api-key",
+ config={
+ "project_id": "my-project", # or use container_tags
+ "base_url": "https://custom-endpoint.com", # optional
+ }
+)
+```
+
+```typescript JavaScript Configuration
+import { supermemoryTools } from "@supermemory/tools/openai"
+
+const tools = supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
+ containerTags: ["your-user-id"],
+ baseUrl: "https://custom-endpoint.com", // optional
+})
+```
+
+</CodeGroup>
+
+## Available Tools
+
+### Search Memories
+
+Search through user memories using semantic search:
+
+<CodeGroup>
+
+```python Python
+# Search memories
+result = await tools.search_memories(
+ information_to_get="user preferences",
+ limit=10,
+ include_full_docs=True
+)
+print(f"Found {len(result.memories)} memories")
+```
+
+```typescript JavaScript
+// Search memories
+const searchResult = await tools.searchMemories({
+ informationToGet: "user preferences",
+ limit: 10,
+})
+console.log(`Found ${searchResult.memories.length} memories`)
+```
+
+</CodeGroup>
+
+### Add Memory
+
+Store new information in memory:
+
+<CodeGroup>
+
+```python Python
+# Add memory
+result = await tools.add_memory(
+ memory="User prefers tea over coffee"
+)
+print(f"Added memory with ID: {result.memory.id}")
+```
+
+```typescript JavaScript
+// Add memory
+const addResult = await tools.addMemory({
+ memory: "User prefers dark roast coffee",
+})
+console.log(`Added memory with ID: ${addResult.memory.id}`)
+```
+
+</CodeGroup>
+
+## Individual Tools
+
+Use tools separately for more granular control:
+
+<CodeGroup>
+
+```python Python Individual Tools
+from supermemory_openai import (
+ create_search_memories_tool,
+ create_add_memory_tool
+)
+
+search_tool = create_search_memories_tool("your-api-key")
+add_tool = create_add_memory_tool("your-api-key")
+
+# Use individual tools in OpenAI function calling
+tools_list = [search_tool, add_tool]
+```
+
+```typescript JavaScript Individual Tools
+import {
+ createSearchMemoriesTool,
+ createAddMemoryTool
+} from "@supermemory/tools/openai"
+
+const searchTool = createSearchMemoriesTool(process.env.SUPERMEMORY_API_KEY!)
+const addTool = createAddMemoryTool(process.env.SUPERMEMORY_API_KEY!)
+
+// Use individual tools
+const toolDefinitions = [searchTool.definition, addTool.definition]
+```
+
+</CodeGroup>
+
+## Complete Chat Example
+
+Here's a complete example showing a multi-turn conversation with memory:
+
+<CodeGroup>
+
+```python Complete Python Example
+import asyncio
+import openai
+from supermemory_openai import SupermemoryTools, execute_memory_tool_calls
+
+async def chat_with_memory():
+ client = openai.AsyncOpenAI()
+ tools = SupermemoryTools(
+ api_key="your-supermemory-api-key",
+ config={"project_id": "chat-example"}
+ )
+
+ messages = [
+ {
+ "role": "system",
+ "content": """You are a helpful assistant with memory capabilities.
+ When users share personal information, remember it using addMemory.
+ When they ask questions, search your memories to provide personalized responses."""
+ }
+ ]
+
+ while True:
+ user_input = input("You: ")
+ if user_input.lower() == 'quit':
+ break
+
+ messages.append({"role": "user", "content": user_input})
+
+ # Get AI response with tools
+ response = await client.chat.completions.create(
+ model="gpt-5",
+ messages=messages,
+ tools=tools.get_tool_definitions()
+ )
+
+ # Handle tool calls
+ if response.choices[0].message.tool_calls:
+ messages.append(response.choices[0].message)
+
+ tool_results = await execute_memory_tool_calls(
+ api_key="your-supermemory-api-key",
+ tool_calls=response.choices[0].message.tool_calls,
+ config={"project_id": "chat-example"}
+ )
+
+ messages.extend(tool_results)
+
+ # Get final response after tool execution
+ final_response = await client.chat.completions.create(
+ model="gpt-5",
+ messages=messages
+ )
+
+ assistant_message = final_response.choices[0].message.content
+ else:
+ assistant_message = response.choices[0].message.content
+ messages.append({"role": "assistant", "content": assistant_message})
+
+ print(f"Assistant: {assistant_message}")
+
+# Run the chat
+asyncio.run(chat_with_memory())
+```
+
+```typescript Complete JavaScript Example
+import OpenAI from "openai"
+import { getToolDefinitions, createToolCallExecutor } from "@supermemory/tools/openai"
+import readline from 'readline'
+
+const client = new OpenAI()
+const executeToolCall = createToolCallExecutor(process.env.SUPERMEMORY_API_KEY!, {
+ projectId: "chat-example",
+})
+
+const rl = readline.createInterface({
+ input: process.stdin,
+ output: process.stdout,
+})
+
+async function chatWithMemory() {
+ const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
+ {
+ role: "system",
+ content: `You are a helpful assistant with memory capabilities.
+ When users share personal information, remember it using addMemory.
+ When they ask questions, search your memories to provide personalized responses.`
+ }
+ ]
+
+ const askQuestion = () => {
+ rl.question("You: ", async (userInput) => {
+ if (userInput.toLowerCase() === 'quit') {
+ rl.close()
+ return
+ }
+
+ messages.push({ role: "user", content: userInput })
+
+ // Get AI response with tools
+ const response = await client.chat.completions.create({
+ model: "gpt-5",
+ messages,
+ tools: getToolDefinitions(),
+ })
+
+ const choice = response.choices[0]
+ if (choice?.message.tool_calls) {
+ messages.push(choice.message)
+
+ // Execute tool calls
+ for (const toolCall of choice.message.tool_calls) {
+ const result = await executeToolCall(toolCall)
+ messages.push({
+ role: "tool",
+ tool_call_id: toolCall.id,
+ content: JSON.stringify(result),
+ })
+ }
+
+ // Get final response after tool execution
+ const finalResponse = await client.chat.completions.create({
+ model: "gpt-5",
+ messages,
+ })
+
+ const assistantMessage = finalResponse.choices[0]?.message.content || "No response"
+ console.log(`Assistant: ${assistantMessage}`)
+ messages.push({ role: "assistant", content: assistantMessage })
+ } else {
+ const assistantMessage = choice?.message.content || "No response"
+ console.log(`Assistant: ${assistantMessage}`)
+ messages.push({ role: "assistant", content: assistantMessage })
+ }
+
+ askQuestion()
+ })
+ }
+
+ console.log("Chat with memory started. Type 'quit' to exit.")
+ askQuestion()
+}
+
+chatWithMemory()
+```
+
+</CodeGroup>
+
+## Error Handling
+
+Handle errors gracefully in your applications:
+
+<CodeGroup>
+
+```python Python Error Handling
+from supermemory_openai import SupermemoryTools
+import openai
+
+async def safe_chat():
+ try:
+ client = openai.AsyncOpenAI()
+ tools = SupermemoryTools(api_key="your-api-key")
+
+ response = await client.chat.completions.create(
+ model="gpt-5",
+ messages=[{"role": "user", "content": "Hello"}],
+ tools=tools.get_tool_definitions()
+ )
+
+ except openai.APIError as e:
+ print(f"OpenAI API error: {e}")
+ except Exception as e:
+ print(f"Unexpected error: {e}")
+```
+
+```typescript JavaScript Error Handling
+import OpenAI from "openai"
+import { getToolDefinitions } from "@supermemory/tools/openai"
+
+async function safeChat() {
+ try {
+ const client = new OpenAI()
+
+ const response = await client.chat.completions.create({
+ model: "gpt-5",
+ messages: [{ role: "user", content: "Hello" }],
+ tools: getToolDefinitions(),
+ })
+
+ } catch (error) {
+ if (error instanceof OpenAI.APIError) {
+ console.error("OpenAI API error:", error.message)
+ } else {
+ console.error("Unexpected error:", error)
+ }
+ }
+}
+```
+
+</CodeGroup>
+
+## API Reference
+
+### Python SDK
+
+#### `SupermemoryTools`
+
+**Constructor**
+```python
+SupermemoryTools(
+ api_key: str,
+ config: Optional[SupermemoryToolsConfig] = None
+)
+```
+
+**Methods**
+- `get_tool_definitions()` - Get OpenAI function definitions
+- `search_memories(information_to_get, limit, include_full_docs)` - Search user memories
+- `add_memory(memory)` - Add new memory
+- `execute_tool_call(tool_call)` - Execute individual tool call
+
+#### `execute_memory_tool_calls`
+
+```python
+execute_memory_tool_calls(
+ api_key: str,
+ tool_calls: List[ToolCall],
+ config: Optional[SupermemoryToolsConfig] = None
+) -> List[dict]
+```
+
+### JavaScript SDK
+
+#### `supermemoryTools`
+
+```typescript
+supermemoryTools(
+ apiKey: string,
+ config?: { projectId?: string; baseUrl?: string }
+)
+```
+
+#### `createToolCallExecutor`
+
+```typescript
+createToolCallExecutor(
+ apiKey: string,
+ config?: { projectId?: string; baseUrl?: string }
+) -> (toolCall: OpenAI.Chat.ChatCompletionMessageToolCall) => Promise<any>
+```
+
+## Environment Variables
+
+Set these environment variables:
+
+```bash
+SUPERMEMORY_API_KEY=your_supermemory_key
+OPENAI_API_KEY=your_openai_key
+SUPERMEMORY_BASE_URL=https://custom-endpoint.com # optional
+```
+
+## Development
+
+### Python Setup
+
+```bash
+# Install uv
+curl -LsSf https://astral.sh/uv/install.sh | sh
+
+# Setup project
+git clone <repository-url>
+cd packages/openai-sdk-python
+uv sync --dev
+
+# Run tests
+uv run pytest
+
+# Type checking
+uv run mypy src/supermemory_openai
+
+# Formatting
+uv run black src/ tests/
+uv run isort src/ tests/
+```
+
+### JavaScript Setup
+
+```bash
+# Install dependencies
+npm install
+
+# Run tests
+npm test
+
+# Type checking
+npm run type-check
+
+# Linting
+npm run lint
+```
+
+## Next Steps
+
+<CardGroup cols={2}>
+ <Card title="AI SDK Integration" icon="triangle" href="/integrations/ai-sdk">
+ Use with Vercel AI SDK for streamlined development
+ </Card>
+
+ <Card title="Memory API" icon="database" href="/memory-api/overview">
+ Direct API access for advanced memory management
+ </Card>
+</CardGroup>
diff --git a/apps/docs/integrations/pipecat.mdx b/apps/docs/integrations/pipecat.mdx
new file mode 100644
index 00000000..c96f27e1
--- /dev/null
+++ b/apps/docs/integrations/pipecat.mdx
@@ -0,0 +1,203 @@
+---
+title: "Pipecat"
+sidebarTitle: "Pipecat (Voice)"
+description: "Integrate Supermemory with Pipecat for conversational memory in voice AI agents"
+icon: "/images/pipecat.svg"
+---
+
+Supermemory integrates with [Pipecat](https://github.com/pipecat-ai/pipecat), providing long-term memory capabilities for voice AI agents. Your Pipecat applications will remember past conversations and provide personalized responses based on user history.
+
+## Installation
+
+To use Supermemory with Pipecat, install the required dependencies:
+
+```bash
+pip install supermemory-pipecat
+```
+
+Set up your API key as an environment variable:
+
+```bash
+export SUPERMEMORY_API_KEY=your_supermemory_api_key
+```
+
+You can obtain an API key from [console.supermemory.ai](https://console.supermemory.ai).
+
+## Configuration
+
+Supermemory integration is provided through the `SupermemoryPipecatService` class in Pipecat:
+
+```python
+from supermemory_pipecat import SupermemoryPipecatService, InputParams
+
+memory = SupermemoryPipecatService(
+ api_key=os.getenv("SUPERMEMORY_API_KEY"),
+ user_id="unique_user_id",
+ session_id="session_123",
+ params=InputParams(
+ mode="full", # "profile" | "query" | "full"
+ search_limit=10, # Max memories to retrieve
+ search_threshold=0.1, # Relevance threshold (0.0-1.0)
+ system_prompt="Based on previous conversations:\n\n",
+ ),
+)
+```
+
+## Pipeline Integration
+
+The `SupermemoryPipecatService` should be positioned between your context aggregator and LLM service in the Pipecat pipeline:
+
+```python
+pipeline = Pipeline([
+ transport.input(),
+ stt, # Speech-to-text
+ context_aggregator.user(),
+ memory, # <- Supermemory memory service
+ llm,
+ tts, # Text-to-speech
+ transport.output(),
+ context_aggregator.assistant(),
+])
+```
+
+## How It Works
+
+When integrated with Pipecat, Supermemory provides two key functionalities:
+
+### 1. Memory Retrieval
+
+When a user message is detected, Supermemory retrieves relevant memories:
+
+- **Static Profile**: Persistent facts about the user
+- **Dynamic Profile**: Recent context and preferences
+- **Search Results**: Semantically relevant past memories
+
+### 2. Context Enhancement
+
+Retrieved memories are formatted and injected into the LLM context before generation, giving the model awareness of past conversations.
+
+## Memory Modes
+
+| Mode | Static Profile | Dynamic Profile | Search Results | Use Case |
+|------|----------------|-----------------|----------------|----------|
+| `"profile"` | Yes | Yes | No | Personalization without search |
+| `"query"` | No | No | Yes | Finding relevant past context |
+| `"full"` | Yes | Yes | Yes | Complete memory (default) |
+
+## Configuration Options
+
+You can customize how memories are retrieved and used:
+
+### InputParams
+
+```python
+InputParams(
+ mode="full", # Memory mode (default: "full")
+ search_limit=10, # Max memories to retrieve (default: 10)
+ search_threshold=0.1, # Similarity threshold 0.0-1.0 (default: 0.1)
+ system_prompt="Based on previous conversations:\n\n",
+)
+```
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `search_limit` | int | 10 | Maximum number of memories to retrieve per query |
+| `search_threshold` | float | 0.1 | Minimum similarity threshold for memory retrieval |
+| `mode` | str | "full" | Memory retrieval mode: `"profile"`, `"query"`, or `"full"` |
+| `system_prompt` | str | "Based on previous conversations:\n\n" | Prefix text for memory context |
+
+## Example: Voice Agent with Memory
+
+Here's a complete example of a Pipecat voice agent with Supermemory integration:
+
+```python
+import os
+from fastapi import FastAPI, WebSocket
+from fastapi.middleware.cors import CORSMiddleware
+
+from pipecat.audio.vad.silero import SileroVADAnalyzer
+from pipecat.frames.frames import LLMMessagesFrame
+from pipecat.pipeline.pipeline import Pipeline
+from pipecat.pipeline.runner import PipelineRunner
+from pipecat.pipeline.task import PipelineParams, PipelineTask
+from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
+from pipecat.serializers.protobuf import ProtobufFrameSerializer
+from pipecat.services.openai.llm import OpenAILLMService
+from pipecat.services.openai.tts import OpenAITTSService
+from pipecat.services.openai.stt import OpenAISTTService
+from pipecat.transports.websocket.fastapi import (
+ FastAPIWebsocketParams,
+ FastAPIWebsocketTransport,
+)
+
+from supermemory_pipecat import SupermemoryPipecatService, InputParams
+
+app = FastAPI()
+
+SYSTEM_PROMPT = """You are a helpful voice assistant with memory capabilities.
+You remember information from past conversations and use it to provide personalized responses.
+Keep responses brief and conversational."""
+
+
+async def run_bot(websocket_client, user_id: str, session_id: str):
+ transport = FastAPIWebsocketTransport(
+ websocket=websocket_client,
+ params=FastAPIWebsocketParams(
+ audio_in_enabled=True,
+ audio_out_enabled=True,
+ vad_enabled=True,
+ vad_analyzer=SileroVADAnalyzer(),
+ vad_audio_passthrough=True,
+ serializer=ProtobufFrameSerializer(),
+ ),
+ )
+
+ stt = OpenAISTTService(api_key=os.getenv("OPENAI_API_KEY"))
+ llm = OpenAILLMService(api_key=os.getenv("OPENAI_API_KEY"), model="gpt-5-mini")
+ tts = OpenAITTSService(api_key=os.getenv("OPENAI_API_KEY"), voice="alloy")
+
+ # Supermemory memory service
+ memory = SupermemoryPipecatService(
+ user_id=user_id,
+ session_id=session_id,
+ params=InputParams(
+ mode="full",
+ search_limit=10,
+ search_threshold=0.1,
+ ),
+ )
+
+ context = OpenAILLMContext([{"role": "system", "content": SYSTEM_PROMPT}])
+ context_aggregator = llm.create_context_aggregator(context)
+
+ pipeline = Pipeline([
+ transport.input(),
+ stt,
+ context_aggregator.user(),
+ memory,
+ llm,
+ tts,
+ transport.output(),
+ context_aggregator.assistant(),
+ ])
+
+ task = PipelineTask(pipeline, params=PipelineParams(allow_interruptions=True))
+
+ @transport.event_handler("on_client_disconnected")
+ async def on_client_disconnected(transport, client):
+ await task.cancel()
+
+ runner = PipelineRunner(handle_sigint=False)
+ await runner.run(task)
+
+
+async def websocket_endpoint(websocket: WebSocket):
+ await websocket.accept()
+ await run_bot(websocket, user_id="alice", session_id="session-123")
+
+
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=8000)
+```
diff --git a/apps/docs/integrations/supermemory-sdk.mdx b/apps/docs/integrations/supermemory-sdk.mdx
new file mode 100644
index 00000000..33c80517
--- /dev/null
+++ b/apps/docs/integrations/supermemory-sdk.mdx
@@ -0,0 +1,140 @@
+---
+title: 'Supermemory SDK'
+sidebarTitle: "Supermemory SDK"
+description: 'Official Python and JavaScript SDKs for Supermemory'
+icon: "/images/supermemory.svg"
+---
+
+<CardGroup cols={2}>
+ <Card title="Python SDK" icon="python" href="https://pypi.org/project/supermemory/">
+ pip install supermemory
+ </Card>
+ <Card title="JavaScript SDK" icon="js" href="https://www.npmjs.com/package/supermemory">
+ npm install supermemory
+ </Card>
+</CardGroup>
+
+<Tabs>
+ <Tab title="TypeScript">
+ ## Installation
+
+ ```bash
+ npm install supermemory
+ ```
+
+ ## Quick Start
+
+ ```typescript
+ import Supermemory from 'supermemory';
+
+ const client = new Supermemory({
+ apiKey: process.env.SUPERMEMORY_API_KEY, // Default, can be omitted
+ });
+
+ // Add a memory
+ await client.add({ content: "Meeting notes from Q1 planning", containerTags: ["user_123"] });
+
+ // Search memories
+ const response = await client.search.documents({
+ q: "planning notes",
+ containerTags: ["user_123"]
+ });
+ console.log(response.results);
+
+ // Get user profile
+ const profile = await client.profile({ containerTag: "user_123" });
+ console.log(profile.profile.static);
+ console.log(profile.profile.dynamic);
+ ```
+
+ ## Common Operations
+
+ ```typescript
+ // Add with metadata
+ await client.add({
+ content: "Technical design doc",
+ containerTags: ["user_123"],
+ metadata: { category: "engineering", priority: "high" }
+ });
+
+ // Search with filters
+ const results = await client.search.documents({
+ q: "design document",
+ containerTags: ["user_123"],
+ filters: {
+ AND: [
+ { key: "category", value: "engineering" }
+ ]
+ }
+ });
+
+ // List documents
+ const docs = await client.documents.list({ containerTags: ["user_123"], limit: 10 });
+
+ // Delete a document
+ await client.documents.delete({ docId: "doc_123" });
+ ```
+ </Tab>
+
+ <Tab title="Python">
+ ## Installation
+
+ ```bash
+ pip install supermemory
+ ```
+
+ ## Quick Start
+
+ ```python
+ import os
+ from supermemory import Supermemory
+
+ client = Supermemory(
+ api_key=os.environ.get("SUPERMEMORY_API_KEY"), # Default, can be omitted
+ )
+
+ # Add a memory
+ client.add(content="Meeting notes from Q1 planning", container_tags=["user_123"])
+
+ # Search memories
+ response = client.search.documents(
+ q="planning notes",
+ container_tags=["user_123"]
+ )
+ print(response.results)
+
+ # Get user profile
+ profile = client.profile(container_tag="user_123")
+ print(profile.profile.static)
+ print(profile.profile.dynamic)
+ ```
+
+ ## Common Operations
+
+ ```python
+ # Add with metadata
+ client.add(
+ content="Technical design doc",
+ container_tags=["user_123"],
+ metadata={"category": "engineering", "priority": "high"}
+ )
+
+ # Search with filters
+ results = client.search.documents(
+ q="design document",
+ container_tags=["user_123"],
+ filters={
+ "AND": [
+ {"key": "category", "value": "engineering"}
+ ]
+ }
+ )
+
+ # List documents
+ docs = client.documents.list(container_tags=["user_123"], limit=10)
+
+ # Delete a document
+ client.documents.delete(doc_id="doc_123")
+ ```
+ </Tab>
+</Tabs>
diff --git a/apps/docs/integrations/zapier.mdx b/apps/docs/integrations/zapier.mdx
new file mode 100644
index 00000000..f00cc6d5
--- /dev/null
+++ b/apps/docs/integrations/zapier.mdx
@@ -0,0 +1,65 @@
+---
+title: "Zapier"
+sidebarTitle: "Zapier"
+description: "Automate memory management with Supermemory in Zapier workflows"
+icon: "bolt"
+---
+
+With Supermemory you can now easily add memory to your Zapier workflow steps. Here's how:
+
+## Prerequisites
+- A Supermemory API Key. Get yours [here](https://console.supermemory.ai)
+
+## Step-by-step tutorial
+
+For this tutorial, we're building a simple flow that adds incoming emails in Gmail to Supermemory.
+
+<Steps>
+ <Step title="Make a flow">
+ Open your Zapier account and click on 'Zap' to make a new automation.
+ ![make a zap - annotated](/images/make-zap.png)
+ </Step>
+ <Step title="Add Gmail node">
+ Add a new Gmail node that gets triggered on every new email. Connect to your Google account.
+ ![add gmail](/images/add-gmail-node-zapier.png)
+ </Step>
+ <Step title="Add code block">
+ Now, add a new 'Code by Zapier' block. Set it up to run Python.
+
+ In the **Input Data** section, map the content field to the Gmail raw snippet.
+
+ ![](/images/map-content-to-gmail.png)
+ </Step>
+ <Step title="Integrate Supermemory">
+ Since we're ingesting data here, we'll use the add documents endpoint.
+
+ Add the following code block:
+
+ ```python
+ import requests
+
+ url = "https://api.supermemory.ai/v3/documents"
+
+ payload = { "content": inputData['content'], "containerTag": "gmail" }
+ headers = {
+ "Authorization": "Bearer YOUR_SM_API_KEY",
+ "Content-Type": "application/json"
+ }
+
+ response = requests.post(url, json=payload, headers=headers)
+
+ print(response.json())
+ ```
+
+ The `inputData['content']` field maps to the Gmail content fetched from Zapier.
+
+ ![](/images/zapier-output.png)
+ </Step>
+</Steps>
+
+<Note>
+ Sometimes Zapier might show an error on the first test run. It usually works right after. Weird bug, we know.
+</Note>
+
+
+You can perform other operations like search, filtering, user profiles, etc., by using other Supermemory API endpoints which can be found in our API Reference tab. \ No newline at end of file
diff --git a/apps/docs/intro.mdx b/apps/docs/intro.mdx
index 3f6d64a9..ccbfe892 100644
--- a/apps/docs/intro.mdx
+++ b/apps/docs/intro.mdx
@@ -1,25 +1,32 @@
---
title: "Overview — What is Supermemory?"
sidebarTitle: "Overview"
-description = "Add long-term memory to your LLMs with three integration paths: AI SDK, Memory API, or Memory Router."
+icon: "book-open"
---
-Supermemory gives your LLMs long-term memory. Instead of stateless text generation, they recall the right facts from your files, chats, and tools, so responses stay consistent, contextual, and personal.
+Supermemory is the long-term and short-term memory and context infrastructure for AI agents. It is the [state of the art](https://supermemory.ai/research) across multiple different benchmarks, like LongMemEval and LoCoMo.
+With supermemory, developers can provide perfect recall about their users to build AI agents that are more intelligent, more personalized, and more consistent. Additionally, *supermemory* has all the pieces of the context stack built in:
+- [Agent memory](/concepts/graph-memory)
+- [Content extraction](/concepts/content-types)
+- [Connectors and syncing](/connectors/overview)
+- [Managed RAG platform](/concepts/super-rag)
+
+All this, coming together, makes supermemory the best abstraction to provide to agents.
## How does it work? (at a glance)
![](/images/232.png)
- You send Supermemory text, files, and chats.
-- Supermemory [intelligently indexes them](/how-it-works) and builds a semantic understanding graph on top of an entity (e.g., a user, a document, a project, an organization).
+- Supermemory [intelligently indexes them](/concepts/how-it-works) and builds a semantic understanding graph on top of an entity (e.g., a user, a document, a project, an organization).
- At query time, we fetch only the most relevant context and pass it to your models.
## Supermemory is context engineering.
#### Ingestion and Extraction
-Supermemory handles all the extraction, for any data type that you have.
+Supermemory handles all the extraction, for [any data type that you have](/concepts/content-types).
- Text
- Conversations
- Files (PDF, Images, Docs)
@@ -34,7 +41,7 @@ We offer three ways to add context to your LLMs:
![memory graph](/images/memory-graph.png)
Supermemory learns and builds the memory for the user. These are extracted facts about the user, that:
-- Evolve on top of existing context about the user, **in real time**
+- [Evolve on top of existing context about the user](/concepts/graph-memory), **in real time**
- Handle **knowledge updates, temporal changes, forgetfulness**
- Creates a **user profile** as the default context provider for the LLM.
@@ -42,7 +49,7 @@ _This can then be provided to the LLM, to give more contextual, personalized res
#### User profiles
-Having the latest, evolving context about the user allows us to also create a **User Profile**. This is a combination of static and dynamic facts about the user, that the agent should **always know**
+Having the latest, evolving context about the user allows us to also create a [**User Profile**](/concepts/user-profiles). This is a combination of static and dynamic facts about the user, that the agent should **always know**
Developers can configure supermemory with what static and dynamic contents are, depending on their use case.
- Static: Information that the agent should **always** know.
@@ -58,7 +65,7 @@ Along with the user context, developers can also choose to do a search on the ra
- Works well with the memory engine
<Info>
- You can reference the full API reference for the Memory API in the API Reference tab.
+ See the full [API Reference](/api-reference) for detailed endpoint documentation.
</Info>
@@ -68,4 +75,11 @@ All three approaches share the **same context pool** when using the same user ID
## Next steps
-Head to the [**How it works**](/how-it-works) guide to understand the underlying way of how supermemory represents and learns in data.
+<CardGroup cols={2}>
+ <Card title="Quickstart" icon="play" href="/quickstart">
+ Make your first API call in minutes
+ </Card>
+ <Card title="How it Works" icon="cpu" href="/concepts/how-it-works">
+ Understand the knowledge graph architecture
+ </Card>
+</CardGroup>
diff --git a/apps/docs/introduction.mdx b/apps/docs/introduction.mdx
index 197586d6..89f2437d 100644
--- a/apps/docs/introduction.mdx
+++ b/apps/docs/introduction.mdx
@@ -45,14 +45,14 @@ export const HeroCard = ({ imageUrl, title, description, href }) => {
imageUrl="https://imagedelivery.net/_Zs8NCbSWCQ8-iurXrWjBg/9af9572c-9f8d-42d8-f7d0-503a5f87a300/public"
title="User profiles"
description="One line to add memory to your app. Supercharge your LLM with supermemory's intelligent context management."
- href="/memory-router/overview"
+ href="/concepts/user-profiles"
/>
<HeroCard
imageUrl="https://imagedelivery.net/_Zs8NCbSWCQ8-iurXrWjBg/abcc107d-7271-4acf-a9a2-90ad7a6f2000/public"
title="SDKs"
description="Use supermemory with your favorite tools and platforms seamlessly."
- href="/memory-api/sdks/overview"
+ href="/integrations/supermemory-sdk"
/>
<HeroCard
diff --git a/apps/docs/list-memories/examples/basic.mdx b/apps/docs/list-memories/examples/basic.mdx
index c4358c44..e0fa40dc 100644
--- a/apps/docs/list-memories/examples/basic.mdx
+++ b/apps/docs/list-memories/examples/basic.mdx
@@ -16,7 +16,7 @@ Simple memory retrieval examples for getting started with the list memories endp
apiKey: process.env.SUPERMEMORY_API_KEY!
});
- const response = await client.memories.list({ limit: 10 });
+ const response = await client.documents.list({ limit: 10 });
console.log(response);
```
</Tab>
@@ -26,7 +26,7 @@ Simple memory retrieval examples for getting started with the list memories endp
import os
client = Supermemory(api_key=os.environ.get("SUPERMEMORY_API_KEY"))
- response = client.memories.list(limit=10)
+ response = client.documents.list(limit=10)
print(response)
```
</Tab>
@@ -45,7 +45,7 @@ Simple memory retrieval examples for getting started with the list memories endp
<Tabs>
<Tab title="TypeScript">
```typescript
- const response = await client.memories.list({
+ const response = await client.documents.list({
containerTags: ["user_123"],
limit: 20,
sort: "updatedAt",
@@ -57,7 +57,7 @@ Simple memory retrieval examples for getting started with the list memories endp
</Tab>
<Tab title="Python">
```python
- response = client.memories.list(
+ response = client.documents.list(
container_tags=["user_123"],
limit=20,
sort="updatedAt",
diff --git a/apps/docs/list-memories/examples/filtering.mdx b/apps/docs/list-memories/examples/filtering.mdx
index 97394fa9..d159fb55 100644
--- a/apps/docs/list-memories/examples/filtering.mdx
+++ b/apps/docs/list-memories/examples/filtering.mdx
@@ -13,12 +13,12 @@ Container tags use exact array matching - memories must have the exact same tags
<Tab title="TypeScript">
```typescript
// Single tag - matches memories with exactly ["user_123"]
- const userMemories = await client.memories.list({
+ const userMemories = await client.documents.list({
containerTags: ["user_123"]
});
// Multiple tags - matches memories with exactly ["user_123", "project_ai"]
- const projectMemories = await client.memories.list({
+ const projectMemories = await client.documents.list({
containerTags: ["user_123", "project_ai"]
});
```
@@ -26,10 +26,10 @@ Container tags use exact array matching - memories must have the exact same tags
<Tab title="Python">
```python
# Single tag
- user_memories = client.memories.list(container_tags=["user_123"])
+ user_memories = client.documents.list(container_tags=["user_123"])
# Multiple tags (exact match)
- project_memories = client.memories.list(
+ project_memories = client.documents.list(
container_tags=["user_123", "project_ai"]
)
```
@@ -72,7 +72,7 @@ The JSON structure forces explicit grouping to prevent unexpected results.
<Info>
**Filter Structure Rules:**
- Always wrap conditions in `AND` or `OR` arrays (even single conditions)
-- Use `JSON.stringify()` to convert the filter object to a string
+- Pass the filter as an object (TypeScript/Python) or JSON string (cURL)
- Each condition needs `key`, `value`, and `negate` properties
- `negate: false` for normal matching, `negate: true` for exclusion
</Info>
@@ -83,26 +83,24 @@ The JSON structure forces explicit grouping to prevent unexpected results.
<Tab title="TypeScript">
```typescript
// Filter by single metadata field
- const programmingMemories = await client.memories.list({
- filters: JSON.stringify({
+ const programmingMemories = await client.documents.list({
+ filters: {
AND: [
{ key: "category", value: "programming", negate: false }
]
- })
+ }
});
```
</Tab>
<Tab title="Python">
```python
- import json
-
# Filter by single metadata field
- programming_memories = client.memories.list(
- filters=json.dumps({
+ programming_memories = client.documents.list(
+ filters={
"AND": [
{"key": "category", "value": "programming", "negate": False}
]
- })
+ }
)
```
</Tab>
@@ -124,28 +122,28 @@ The JSON structure forces explicit grouping to prevent unexpected results.
<Tab title="TypeScript">
```typescript
// All conditions must match
- const reactTutorials = await client.memories.list({
- filters: JSON.stringify({
+ const reactTutorials = await client.documents.list({
+ filters: {
AND: [
{ key: "category", value: "tutorial", negate: false },
{ key: "framework", value: "react", negate: false },
{ key: "difficulty", value: "beginner", negate: false }
]
- })
+ }
});
```
</Tab>
<Tab title="Python">
```python
# All conditions must match
- react_tutorials = client.memories.list(
- filters=json.dumps({
+ react_tutorials = client.documents.list(
+ filters={
"AND": [
{"key": "category", "value": "tutorial", "negate": False},
{"key": "framework", "value": "react", "negate": False},
{"key": "difficulty", "value": "beginner", "negate": False}
]
- })
+ }
)
```
</Tab>
@@ -167,28 +165,28 @@ The JSON structure forces explicit grouping to prevent unexpected results.
<Tab title="TypeScript">
```typescript
// Any condition can match
- const frontendMemories = await client.memories.list({
- filters: JSON.stringify({
+ const frontendMemories = await client.documents.list({
+ filters: {
OR: [
{ key: "framework", value: "react", negate: false },
{ key: "framework", value: "vue", negate: false },
{ key: "framework", value: "angular", negate: false }
]
- })
+ }
});
```
</Tab>
<Tab title="Python">
```python
# Any condition can match
- frontend_memories = client.memories.list(
- filters=json.dumps({
+ frontend_memories = client.documents.list(
+ filters={
"OR": [
{"key": "framework", "value": "react", "negate": False},
{"key": "framework", "value": "vue", "negate": False},
{"key": "framework", "value": "angular", "negate": False}
]
- })
+ }
)
```
</Tab>
@@ -210,8 +208,8 @@ The JSON structure forces explicit grouping to prevent unexpected results.
<Tab title="TypeScript">
```typescript
// Complex logic: programming AND (react OR advanced difficulty)
- const advancedContent = await client.memories.list({
- filters: JSON.stringify({
+ const advancedContent = await client.documents.list({
+ filters: {
AND: [
{ key: "category", value: "programming", negate: false },
{
@@ -221,15 +219,15 @@ The JSON structure forces explicit grouping to prevent unexpected results.
]
}
]
- })
+ }
});
```
</Tab>
<Tab title="Python">
```python
# Complex logic: programming AND (react OR advanced difficulty)
- advanced_content = client.memories.list(
- filters=json.dumps({
+ advanced_content = client.documents.list(
+ filters={
"AND": [
{"key": "category", "value": "programming", "negate": False},
{
@@ -239,7 +237,7 @@ The JSON structure forces explicit grouping to prevent unexpected results.
]
}
]
- })
+ }
)
```
</Tab>
@@ -265,8 +263,8 @@ Filter memories that contain specific values in array fields like participants,
<Tab title="TypeScript">
```typescript
// Find memories where john.doe participated
- const meetingMemories = await client.memories.list({
- filters: JSON.stringify({
+ const meetingMemories = await client.documents.list({
+ filters: {
AND: [
{
key: "participants",
@@ -275,15 +273,15 @@ Filter memories that contain specific values in array fields like participants,
negate: false
}
]
- })
+ }
});
```
</Tab>
<Tab title="Python">
```python
# Find memories where john.doe participated
- meeting_memories = client.memories.list(
- filters=json.dumps({
+ meeting_memories = client.documents.list(
+ filters={
"AND": [
{
"key": "participants",
@@ -292,7 +290,7 @@ Filter memories that contain specific values in array fields like participants,
"negate": False
}
]
- })
+ }
)
```
</Tab>
@@ -314,8 +312,8 @@ Filter memories that contain specific values in array fields like participants,
<Tab title="TypeScript">
```typescript
// Find memories that don't include a specific team member
- const filteredMemories = await client.memories.list({
- filters: JSON.stringify({
+ const filteredMemories = await client.documents.list({
+ filters: {
AND: [
{
key: "reviewers",
@@ -330,15 +328,15 @@ Filter memories that contain specific values in array fields like participants,
negate: false
}
]
- })
+ }
});
```
</Tab>
<Tab title="Python">
```python
# Find memories that don't include a specific team member
- filtered_memories = client.memories.list(
- filters=json.dumps({
+ filtered_memories = client.documents.list(
+ filters={
"AND": [
{
"key": "reviewers",
@@ -353,7 +351,7 @@ Filter memories that contain specific values in array fields like participants,
"negate": False
}
]
- })
+ }
)
```
</Tab>
@@ -375,8 +373,8 @@ Filter memories that contain specific values in array fields like participants,
<Tab title="TypeScript">
```typescript
// Find memories involving any of several team leads
- const leadershipMemories = await client.memories.list({
- filters: JSON.stringify({
+ const leadershipMemories = await client.documents.list({
+ filters: {
OR: [
{
key: "attendees",
@@ -394,7 +392,7 @@ Filter memories that contain specific values in array fields like participants,
filterType: "array_contains"
}
]
- }),
+ },
sort: "updatedAt",
order: "desc"
});
@@ -403,8 +401,8 @@ Filter memories that contain specific values in array fields like participants,
<Tab title="Python">
```python
# Find memories involving any of several team leads
- leadership_memories = client.memories.list(
- filters=json.dumps({
+ leadership_memories = client.documents.list(
+ filters={
"OR": [
{
"key": "attendees",
@@ -422,7 +420,7 @@ Filter memories that contain specific values in array fields like participants,
"filterType": "array_contains"
}
]
- }),
+ },
sort="updatedAt",
order="desc"
)
@@ -447,14 +445,14 @@ Filter memories that contain specific values in array fields like participants,
<Tabs>
<Tab title="TypeScript">
```typescript
- const filteredMemories = await client.memories.list({
+ const filteredMemories = await client.documents.list({
containerTags: ["user_123"],
- filters: JSON.stringify({
+ filters: {
AND: [
{ key: "category", value: "tutorial", negate: false },
{ key: "framework", value: "react", negate: false }
]
- }),
+ },
sort: "updatedAt",
order: "desc",
limit: 50
@@ -463,14 +461,14 @@ Filter memories that contain specific values in array fields like participants,
</Tab>
<Tab title="Python">
```python
- filtered_memories = client.memories.list(
+ filtered_memories = client.documents.list(
container_tags=["user_123"],
- filters=json.dumps({
+ filters={
"AND": [
{"key": "category", "value": "tutorial", "negate": False},
{"key": "framework", "value": "react", "negate": False}
]
- }),
+ },
sort="updatedAt",
order="desc",
limit=50
@@ -495,9 +493,9 @@ Filter memories that contain specific values in array fields like participants,
<Warning>
**Common Mistakes:**
-- Using bare condition objects: `{"key": "category", "value": "programming"}`
-- Forgetting JSON.stringify: passing objects instead of strings
+- Using bare condition objects: `{"key": "category", "value": "programming"}` without wrapping in `AND` or `OR`
- Missing negate property: always include `"negate": false` or `"negate": true`
+- For cURL requests: forgetting to properly escape the JSON string
</Warning>
<Note>
diff --git a/apps/docs/list-memories/examples/monitoring.mdx b/apps/docs/list-memories/examples/monitoring.mdx
index 248d945e..7c373d3c 100644
--- a/apps/docs/list-memories/examples/monitoring.mdx
+++ b/apps/docs/list-memories/examples/monitoring.mdx
@@ -10,7 +10,7 @@ Monitor memory processing status and track completion rates using the list endpo
<Tabs>
<Tab title="TypeScript">
```typescript
- const response = await client.memories.list({ limit: 100 });
+ const response = await client.documents.list({ limit: 100 });
const statusCounts = response.memories.reduce((acc: any, memory) => {
acc[memory.status] = (acc[memory.status] || 0) + 1;
@@ -22,7 +22,7 @@ Monitor memory processing status and track completion rates using the list endpo
</Tab>
<Tab title="Python">
```python
- response = client.memories.list(limit=100)
+ response = client.documents.list(limit=100)
status_counts = {}
for memory in response.memories:
@@ -48,7 +48,7 @@ Monitor memory processing status and track completion rates using the list endpo
<Tabs>
<Tab title="TypeScript">
```typescript
- const response = await client.memories.list({ limit: 100 });
+ const response = await client.documents.list({ limit: 100 });
const processing = response.memories.filter(m =>
['queued', 'extracting', 'chunking', 'embedding', 'indexing'].includes(m.status)
@@ -59,7 +59,7 @@ Monitor memory processing status and track completion rates using the list endpo
</Tab>
<Tab title="Python">
```python
- response = client.memories.list(limit=100)
+ response = client.documents.list(limit=100)
processing_statuses = ['queued', 'extracting', 'chunking', 'embedding', 'indexing']
processing = [m for m in response.memories if m.status in processing_statuses]
@@ -83,21 +83,22 @@ Monitor memory processing status and track completion rates using the list endpo
<Tabs>
<Tab title="TypeScript">
```typescript
- const failedMemories = await client.memories.list({
- filters: "status:failed",
- limit: 50
- });
+ const response = await client.documents.list({ limit: 100 });
+
+ const failedMemories = response.memories.filter(m => m.status === 'failed');
- failedMemories.memories.forEach(memory => {
+ failedMemories.forEach(memory => {
console.log(`Failed: ${memory.id} - ${memory.title || 'Untitled'}`);
});
```
</Tab>
<Tab title="Python">
```python
- failed_memories = client.memories.list(filters="status:failed", limit=50)
+ response = client.documents.list(limit=100)
- for memory in failed_memories.memories:
+ failed_memories = [m for m in response.memories if m.status == 'failed']
+
+ for memory in failed_memories:
title = memory.title or 'Untitled'
print(f"Failed: {memory.id} - {title}")
```
@@ -107,8 +108,8 @@ Monitor memory processing status and track completion rates using the list endpo
curl -X POST "https://api.supermemory.ai/v3/documents/list" \
-H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
-H "Content-Type: application/json" \
- -d '{"filters": "status:failed", "limit": 50}' | \
- jq '.memories[] | {id, title, status}'
+ -d '{"limit": 100}' | \
+ jq '.memories[] | select(.status == "failed") | {id, title, status}'
```
</Tab>
</Tabs>
diff --git a/apps/docs/list-memories/examples/pagination.mdx b/apps/docs/list-memories/examples/pagination.mdx
index 56004b89..9a975acc 100644
--- a/apps/docs/list-memories/examples/pagination.mdx
+++ b/apps/docs/list-memories/examples/pagination.mdx
@@ -11,13 +11,13 @@ Handle large memory collections efficiently using pagination to process data in
<Tab title="TypeScript">
```typescript
// Get first page
- const page1 = await client.memories.list({
+ const page1 = await client.documents.list({
limit: 20,
page: 1
});
// Get next page
- const page2 = await client.memories.list({
+ const page2 = await client.documents.list({
limit: 20,
page: 2
});
@@ -29,10 +29,10 @@ Handle large memory collections efficiently using pagination to process data in
<Tab title="Python">
```python
# Get first page
- page1 = client.memories.list(limit=20, page=1)
+ page1 = client.documents.list(limit=20, page=1)
# Get next page
- page2 = client.memories.list(limit=20, page=2)
+ page2 = client.documents.list(limit=20, page=2)
print(f"Page 1: {len(page1.memories)} memories")
print(f"Page 2: {len(page2.memories)} memories")
@@ -64,7 +64,7 @@ Handle large memory collections efficiently using pagination to process data in
let hasMore = true;
while (hasMore) {
- const response = await client.memories.list({
+ const response = await client.documents.list({
page: currentPage,
limit: 50
});
@@ -82,7 +82,7 @@ Handle large memory collections efficiently using pagination to process data in
has_more = True
while has_more:
- response = client.memories.list(page=current_page, limit=50)
+ response = client.documents.list(page=current_page, limit=50)
print(f"Page {current_page}: {len(response.memories)} memories")
diff --git a/apps/docs/list-memories/overview.mdx b/apps/docs/list-memories/overview.mdx
index d1631030..13976fe7 100644
--- a/apps/docs/list-memories/overview.mdx
+++ b/apps/docs/list-memories/overview.mdx
@@ -18,7 +18,7 @@ Retrieve paginated memories with filtering and sorting options from your Superme
apiKey: process.env.SUPERMEMORY_API_KEY!
});
- const memories = await client.memories.list({ limit: 10 });
+ const memories = await client.documents.list({ limit: 10 });
console.log(memories);
```
</Tab>
@@ -28,7 +28,7 @@ Retrieve paginated memories with filtering and sorting options from your Superme
import os
client = Supermemory(api_key=os.environ.get("SUPERMEMORY_API_KEY"))
- memories = client.memories.list(limit=10)
+ memories = client.documents.list(limit=10)
print(f"Found {len(memories.memories)} memories")
```
</Tab>
diff --git a/apps/docs/memory-api/connectors/managing-resources.mdx b/apps/docs/memory-api/connectors/managing-resources.mdx
index f9e0f424..24cab0ec 100644
--- a/apps/docs/memory-api/connectors/managing-resources.mdx
+++ b/apps/docs/memory-api/connectors/managing-resources.mdx
@@ -2,6 +2,7 @@
title: 'Managing Connection Resources'
sidebarTitle: 'Managing Resources'
description: 'Get and configure resources for connections that support resource management'
+icon: 'folder-sync'
---
<Note>
diff --git a/apps/docs/memory-api/features/filtering.mdx b/apps/docs/memory-api/features/filtering.mdx
index 3873e606..e7e3a14d 100644
--- a/apps/docs/memory-api/features/filtering.mdx
+++ b/apps/docs/memory-api/features/filtering.mdx
@@ -168,7 +168,7 @@ curl --location 'https://api.supermemory.ai/v3/documents' \
```
```typescript Typescript
-await client.memories.create({
+await client.documents.create({
content: "quarterly planning meeting discussion",
metadata: {
participants: ["john.doe", "sarah.smith", "mike.wilson"]
@@ -177,7 +177,7 @@ await client.memories.create({
```
```python Python
-client.memories.create(
+client.documents.create(
content="quarterly planning meeting discussion",
metadata={
"participants": ["john.doe", "sarah.smith", "mike.wilson"]
diff --git a/apps/docs/memory-api/ingesting.mdx b/apps/docs/memory-api/ingesting.mdx
index 79468eaf..301fb66a 100644
--- a/apps/docs/memory-api/ingesting.mdx
+++ b/apps/docs/memory-api/ingesting.mdx
@@ -104,7 +104,7 @@ const client = new Supermemory({
})
async function addContent() {
- const result = await client.memories.add({
+ const result = await client.add({
content: "Machine learning is a subset of artificial intelligence...",
containerTags: ["ai-research"],
metadata: {
@@ -127,7 +127,7 @@ import os
client = Supermemory(api_key=os.environ.get("SUPERMEMORY_API_KEY"))
-result = client.memories.add(
+result = client.add(
content="Machine learning is a subset of artificial intelligence...",
container_tags=["ai-research"],
metadata={
@@ -205,7 +205,7 @@ const client = new Supermemory({
})
// Method 1: Using SDK uploadFile method (RECOMMENDED)
-const result = await client.memories.uploadFile({
+const result = await client.documents.uploadFile({
file: fs.createReadStream('/path/to/document.pdf'),
containerTags: 'research_project' // String, not array!
})
@@ -234,7 +234,7 @@ from supermemory import Supermemory
client = Supermemory(api_key="your_api_key")
# Method 1: Using SDK upload_file method (RECOMMENDED)
-result = client.memories.upload_file(
+result = client.documents.upload_file(
file=open('document.pdf', 'rb'),
container_tags='research_project' # String parameter name
)
@@ -699,7 +699,7 @@ Process large volumes efficiently with rate limiting and error recovery.
async function ingestWithRetry(doc: Document, maxRetries: number) {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
- return await client.memories.add({
+ return await client.add({
content: doc.content,
customId: doc.id,
containerTags: ["batch_import_user_123"], // CORRECTED: Array
@@ -787,7 +787,7 @@ Process large volumes efficiently with rate limiting and error recovery.
async def ingest_with_retry(doc: Dict[str, Any], max_retries: int):
for attempt in range(1, max_retries + 1):
try:
- return await client.memories.add(
+ return await client.add(
content=doc['content'],
custom_id=doc['id'],
container_tags=["batch_import_user_123"], # CORRECTED: List
diff --git a/apps/docs/memory-api/introduction.mdx b/apps/docs/memory-api/introduction.mdx
index ca4fa705..24a46f8b 100644
--- a/apps/docs/memory-api/introduction.mdx
+++ b/apps/docs/memory-api/introduction.mdx
@@ -37,7 +37,7 @@ Check out the following resources to get started:
<Card title="Use Cases" icon="brain" href="/overview/use-cases">
See what supermemory can do for you
</Card>
- <Card title="SDKs" icon="code" href="/memory-api/sdks/">
+ <Card title="SDKs" icon="code" href="/integrations/supermemory-sdk">
Learn more about the SDKs
</Card>
</CardGroup> \ No newline at end of file
diff --git a/apps/docs/memory-api/sdks/anthropic-claude-memory.mdx b/apps/docs/memory-api/sdks/anthropic-claude-memory.mdx
index 5e6c5866..10bc195b 100644
--- a/apps/docs/memory-api/sdks/anthropic-claude-memory.mdx
+++ b/apps/docs/memory-api/sdks/anthropic-claude-memory.mdx
@@ -357,11 +357,11 @@ The Claude Memory Tool is ideal for:
## Next Steps
<CardGroup cols={2}>
- <Card title="OpenAI SDK Tools" icon="sparkles" href="/memory-api/sdks/openai-plugins">
+ <Card title="OpenAI SDK Tools" icon="sparkles" href="/integrations/openai">
Use memory tools with OpenAI function calling
</Card>
- <Card title="AI SDK Integration" icon="triangle" href="/ai-sdk/overview">
+ <Card title="AI SDK Integration" icon="triangle" href="/integrations/ai-sdk">
Integrate with Vercel AI SDK
</Card>
diff --git a/apps/docs/memory-api/sdks/openai-plugins.mdx b/apps/docs/memory-api/sdks/openai-plugins.mdx
index 635e0008..d95dad47 100644
--- a/apps/docs/memory-api/sdks/openai-plugins.mdx
+++ b/apps/docs/memory-api/sdks/openai-plugins.mdx
@@ -574,7 +574,7 @@ npm run lint
## Next Steps
<CardGroup cols={2}>
- <Card title="AI SDK Integration" icon="triangle" href="/ai-sdk/overview">
+ <Card title="AI SDK Integration" icon="triangle" href="/integrations/ai-sdk">
Use with Vercel AI SDK for streamlined development
</Card>
diff --git a/apps/docs/memory-api/sdks/overview.mdx b/apps/docs/memory-api/sdks/overview.mdx
index 32ffdd32..30ace8a2 100644
--- a/apps/docs/memory-api/sdks/overview.mdx
+++ b/apps/docs/memory-api/sdks/overview.mdx
@@ -3,18 +3,18 @@ title: "Overview"
---
<Columns cols={2}>
- <Card title="Native Python and Typescript/JS SDKs" icon="code" href="/memory-api/sdks/native">
+ <Card title="Native Python and Typescript/JS SDKs" icon="code" href="/integrations/supermemory-sdk">
<br/>
```pip install supermemory```
```npm install supermemory```
</Card>
- <Card title="AI SDK plugin" icon="triangle" href="/ai-sdk/overview">
+ <Card title="AI SDK plugin" icon="triangle" href="/integrations/ai-sdk">
Easy to use with Vercel AI SDK
</Card>
- <Card title="OpenAI SDK plugins" icon="sparkles" href="/memory-api/sdks/openai-plugins">
+ <Card title="OpenAI SDK plugins" icon="sparkles" href="/integrations/openai">
Use supermemory with the python and javascript OpenAI SDKs
</Card>
diff --git a/apps/docs/memory-api/sdks/python.mdx b/apps/docs/memory-api/sdks/python.mdx
index 52b6b3af..0888f2b0 100644
--- a/apps/docs/memory-api/sdks/python.mdx
+++ b/apps/docs/memory-api/sdks/python.mdx
@@ -78,7 +78,7 @@ from supermemory import Supermemory
client = Supermemory()
-client.memories.upload_file(
+client.documents.upload_file(
file=Path("/path/to/file"),
)
```
@@ -101,7 +101,7 @@ from supermemory import Supermemory
client = Supermemory()
try:
- client.memories.add(
+ client.add(
content="This is a detailed article about machine learning concepts...",
)
except supermemory.APIConnectionError as e:
@@ -146,7 +146,7 @@ client = Supermemory(
)
# Or, configure per-request:
-client.with_options(max_retries=5).memories.add(
+client.with_options(max_retries=5).documents.add(
content="This is a detailed article about machine learning concepts...",
)
```
@@ -171,7 +171,7 @@ client = Supermemory(
)
# Override per-request:
-client.with_options(timeout=5.0).memories.add(
+client.with_options(timeout=5.0).documents.add(
content="This is a detailed article about machine learning concepts...",
)
```
@@ -214,12 +214,12 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
from supermemory import Supermemory
client = Supermemory()
-response = client.memories.with_raw_response.add(
+response = client.documents.with_raw_response.add(
content="This is a detailed article about machine learning concepts...",
)
print(response.headers.get('X-My-Header'))
-memory = response.parse() # get the object that `memories.add()` would have returned
+memory = response.parse() # get the object that `documents.add()` would have returned
print(memory.id)
```
@@ -234,7 +234,7 @@ The above interface eagerly reads the full response body when you make the reque
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
```python
-with client.memories.with_streaming_response.add(
+with client.documents.with_streaming_response.add(
content="This is a detailed article about machine learning concepts...",
) as response:
print(response.headers.get("X-My-Header"))
@@ -346,4 +346,4 @@ print(supermemory.__version__)
## Requirements
-Python 3.8 or higher. \ No newline at end of file
+Python 3.8 or higher.
diff --git a/apps/docs/memory-api/sdks/typescript.mdx b/apps/docs/memory-api/sdks/typescript.mdx
index dd656a25..d6670b10 100644
--- a/apps/docs/memory-api/sdks/typescript.mdx
+++ b/apps/docs/memory-api/sdks/typescript.mdx
@@ -41,10 +41,10 @@ const client = new Supermemory({
});
async function main() {
- const params: supermemory.MemoryAddParams = {
+ const params: Supermemory.AddParams = {
content: 'This is a detailed article about machine learning concepts...',
};
- const response: supermemory.MemoryAddResponse = await client.memories.add(params);
+ const response: Supermemory.AddResponse = await client.add(params);
}
main();
@@ -68,17 +68,17 @@ import Supermemory, { toFile } from 'supermemory';
const client = new Supermemory();
// If you have access to Node `fs` we recommend using `fs.createReadStream()`:
-await client.memories.uploadFile({ file: fs.createReadStream('/path/to/file') });
+await client.documents.uploadFile({ file: fs.createReadStream('/path/to/file') });
// Or if you have the web `File` API you can pass a `File` instance:
-await client.memories.uploadFile({ file: new File(['my bytes'], 'file') });
+await client.documents.uploadFile({ file: new File(['my bytes'], 'file') });
// You can also pass a `fetch` `Response`:
-await client.memories.uploadFile({ file: await fetch('https://somesite/file') });
+await client.documents.uploadFile({ file: await fetch('https://somesite/file') });
// Finally, if none of the above are convenient, you can use our `toFile` helper:
-await client.memories.uploadFile({ file: await toFile(Buffer.from('my bytes'), 'file') });
-await client.memories.uploadFile({ file: await toFile(new Uint8Array([0, 1, 2]), 'file') });
+await client.documents.uploadFile({ file: await toFile(Buffer.from('my bytes'), 'file') });
+await client.documents.uploadFile({ file: await toFile(new Uint8Array([0, 1, 2]), 'file') });
```
## Handling errors
@@ -90,7 +90,7 @@ a subclass of `APIError` will be thrown:
```ts
async function main() {
- const response = await client.memories
+ const response = await client.documents
.add({ content: 'This is a detailed article about machine learning concepts...' })
.catch(async (err) => {
if (err instanceof supermemory.APIError) {
@@ -135,7 +135,7 @@ const client = new Supermemory({
});
// Or, configure per-request:
-await client.memories.add({ content: 'This is a detailed article about machine learning concepts...' }, {
+await client.add({ content: 'This is a detailed article about machine learning concepts...' }, {
maxRetries: 5,
});
```
@@ -152,7 +152,7 @@ const client = new Supermemory({
});
// Override per-request:
-await client.memories.add({ content: 'This is a detailed article about machine learning concepts...' }, {
+await client.add({ content: 'This is a detailed article about machine learning concepts...' }, {
timeout: 5 * 1000,
});
```
@@ -175,13 +175,13 @@ Unlike `.asResponse()` this method consumes the body, returning once it is parse
```ts
const client = new Supermemory();
-const response = await client.memories
+const response = await client.documents
.add({ content: 'This is a detailed article about machine learning concepts...' })
.asResponse();
console.debug(response.headers.get('X-My-Header'));
console.debug(response.statusText); // access the underlying Response object
-const { data: response, response: raw } = await client.memories
+const { data: response, response: raw } = await client.documents
.add({ content: 'This is a detailed article about machine learning concepts...' })
.withResponse();
console.debug(raw.headers.get('X-My-Header'));
@@ -388,4 +388,4 @@ The following runtimes are supported:
Note that React Native is not supported at this time.
-If you are interested in other runtime environments, please open or upvote an issue on GitHub. \ No newline at end of file
+If you are interested in other runtime environments, please open or upvote an issue on GitHub.
diff --git a/apps/docs/memory-api/track-progress.mdx b/apps/docs/memory-api/track-progress.mdx
index e14d7739..65c0462f 100644
--- a/apps/docs/memory-api/track-progress.mdx
+++ b/apps/docs/memory-api/track-progress.mdx
@@ -105,20 +105,20 @@ Track specific document processing status.
<CodeGroup>
```typescript Typescript
-const memory = await client.memories.get("doc_abc123");
+const memory = await client.documents.get("doc_abc123");
console.log(`Status: ${memory.status}`);
// Poll for completion
while (memory.status !== 'done') {
await new Promise(r => setTimeout(r, 2000));
- memory = await client.memories.get("doc_abc123");
+ memory = await client.documents.get("doc_abc123");
console.log(`Status: ${memory.status}`);
}
```
```python Python
-memory = client.memories.get("doc_abc123")
+memory = client.documents.get("doc_abc123")
print(f"Status: {memory['status']}")
@@ -126,7 +126,7 @@ print(f"Status: {memory['status']}")
import time
while memory['status'] != 'done':
time.sleep(2)
- memory = client.memories.get("doc_abc123")
+ memory = client.documents.get("doc_abc123")
print(f"Status: {memory['status']}")
```
@@ -178,7 +178,7 @@ async function waitForProcessing(documentId: string, maxWaitMs = 300000) {
const pollInterval = 2000; // 2 seconds
while (Date.now() - startTime < maxWaitMs) {
- const doc = await client.memories.get(documentId);
+ const doc = await client.documents.get(documentId);
if (doc.status === 'done') {
return doc;
@@ -205,7 +205,7 @@ async function trackBatch(documentIds: string[]) {
// Initial check
for (const id of documentIds) {
- const doc = await client.memories.get(id);
+ const doc = await client.documents.get(id);
statuses.set(id, doc.status);
}
@@ -215,7 +215,7 @@ async function trackBatch(documentIds: string[]) {
for (const id of documentIds) {
if (statuses.get(id) !== 'done' && statuses.get(id) !== 'failed') {
- const doc = await client.memories.get(id);
+ const doc = await client.documents.get(id);
statuses.set(id, doc.status);
}
}
@@ -236,7 +236,7 @@ Handle processing failures gracefully:
```typescript
async function addWithRetry(content: string, maxRetries = 3) {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
- const { id } = await client.memories.add({ content });
+ const { id } = await client.add({ content });
try {
const result = await waitForProcessing(id);
@@ -253,4 +253,4 @@ async function addWithRetry(content: string, maxRetries = 3) {
}
}
}
-``` \ No newline at end of file
+```
diff --git a/apps/docs/memory-graph/installation.mdx b/apps/docs/memory-graph/installation.mdx
index 0051825f..e3a0b2d4 100644
--- a/apps/docs/memory-graph/installation.mdx
+++ b/apps/docs/memory-graph/installation.mdx
@@ -19,10 +19,10 @@ npm install @supermemory/memory-graph
## Next Steps
<CardGroup cols={2}>
- <Card title="Quick Start" icon="rocket" href="/memory-graph/quickstart">
+ <Card title="Quick Start" icon="rocket" href="/integrations/memory-graph">
Get the graph running with real data
</Card>
- <Card title="API Reference" icon="code" href="/memory-graph/api-reference">
+ <Card title="API Reference" icon="code" href="/integrations/memory-graph">
Explore all available props and types
</Card>
</CardGroup>
diff --git a/apps/docs/memory-graph/quickstart.mdx b/apps/docs/memory-graph/quickstart.mdx
index d05a0925..1b02fef6 100644
--- a/apps/docs/memory-graph/quickstart.mdx
+++ b/apps/docs/memory-graph/quickstart.mdx
@@ -198,10 +198,10 @@ Show custom content when no documents exist:
## Next Steps
<CardGroup cols={2}>
- <Card title="Examples" icon="code" href="/memory-graph/examples">
+ <Card title="Examples" icon="code" href="/integrations/memory-graph">
See more usage examples
</Card>
- <Card title="API Reference" icon="book" href="/memory-graph/api-reference">
+ <Card title="API Reference" icon="book" href="/integrations/memory-graph">
Full API documentation
</Card>
</CardGroup>
diff --git a/apps/docs/memory-operations.mdx b/apps/docs/memory-operations.mdx
new file mode 100644
index 00000000..6b141447
--- /dev/null
+++ b/apps/docs/memory-operations.mdx
@@ -0,0 +1,98 @@
+---
+title: "Memory Operations"
+sidebarTitle: "Memories"
+description: "Advanced memory operations (v4 API)"
+icon: "database"
+---
+
+<Info>
+These v4 endpoints operate on extracted memories (not raw documents). SDK support coming soon — use fetch or cURL for now.
+
+For document management (list, get, update, delete), see [Document Operations](/document-operations).
+</Info>
+
+## Forget Memory
+
+Soft-delete a memory — excluded from search results but preserved in the system. Use this when you might want to restore later.
+
+<Tabs>
+ <Tab title="fetch">
+ ```typescript
+ await fetch("https://api.supermemory.ai/v4/memories/mem_abc123/forget", {
+ method: "POST",
+ headers: {
+ "Authorization": `Bearer ${API_KEY}`
+ }
+ });
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl -X POST "https://api.supermemory.ai/v4/memories/mem_abc123/forget" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY"
+ ```
+ </Tab>
+</Tabs>
+
+The memory will no longer appear in search results but remains in the database.
+
+---
+
+## Update Memory (Versioned)
+
+Update a memory by creating a new version. The original is preserved with `isLatest=false`.
+
+<Tabs>
+ <Tab title="fetch">
+ ```typescript
+ await fetch("https://api.supermemory.ai/v4/memories", {
+ method: "PATCH",
+ headers: {
+ "Authorization": `Bearer ${API_KEY}`,
+ "Content-Type": "application/json"
+ },
+ body: JSON.stringify({
+ // Identify by ID or content
+ id: "mem_abc123",
+ // content: "Original content to match",
+
+ newContent: "Updated content goes here",
+ metadata: {
+ tags: ["updated"]
+ }
+ })
+ });
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl -X PATCH "https://api.supermemory.ai/v4/memories" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "id": "mem_abc123",
+ "newContent": "Updated content goes here",
+ "metadata": {"tags": ["updated"]}
+ }'
+ ```
+ </Tab>
+</Tabs>
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+|-----------|------|----------|-------------|
+| `id` | string | * | Memory ID to update |
+| `content` | string | * | Original content to match (alternative to ID) |
+| `newContent` | string | yes | New content for the memory |
+| `metadata` | object | no | Updated metadata |
+
+\* Either `id` or `content` must be provided.
+
+---
+
+## Next Steps
+
+- [Document Operations](/document-operations) — Manage documents (SDK supported)
+- [Search](/search) — Query your memories
+- [Ingesting Content](/add-memories) — Add new content
diff --git a/apps/docs/memory-router/overview.mdx b/apps/docs/memory-router/overview.mdx
index 4c142e5e..9ed0ba99 100644
--- a/apps/docs/memory-router/overview.mdx
+++ b/apps/docs/memory-router/overview.mdx
@@ -11,7 +11,7 @@ The Memory Router is a transparent proxy that sits between your application and
</Note>
<Tip>
-**Using Vercel AI SDK?** Check out our [AI SDK integration](/ai-sdk/overview) for the cleanest implementation with `@supermemory/tools/ai-sdk` - it's our recommended approach for new projects.
+**Using Vercel AI SDK?** Check out our [AI SDK integration](/integrations/ai-sdk) for the cleanest implementation with `@supermemory/tools/ai-sdk` - it's our recommended approach for new projects.
</Tip>
## What is the Memory Router?
diff --git a/apps/docs/memory-router/with-memory-api.mdx b/apps/docs/memory-router/with-memory-api.mdx
index ae4396c2..e93705ea 100644
--- a/apps/docs/memory-router/with-memory-api.mdx
+++ b/apps/docs/memory-router/with-memory-api.mdx
@@ -21,7 +21,7 @@ from supermemory import Client
api_client = Client(api_key="YOUR_SUPERMEMORY_KEY")
# Add memory via API
-api_client.memories.add({
+api_client.add({
"content": "User prefers Python over JavaScript for backend development",
"user_id": "user123"
})
@@ -52,7 +52,7 @@ Use the API to add documents and context before conversations:
```python
# Step 1: Load user's documents via API
-api_client.memories.add({
+api_client.add({
"content": "https://company.com/product-docs.pdf",
"user_id": "support_agent_123",
"metadata": {"type": "product_documentation"}
@@ -80,11 +80,11 @@ Always use the same `user_id` format across both systems:
```python
# ✅ Good - consistent user_id
-api_client.memories.add({"user_id": "user_123"})
+api_client.add({"user_id": "user_123"})
router_headers = {"x-sm-user-id": "user_123"}
# ❌ Bad - inconsistent user_id
-api_client.memories.add({"user_id": "user-123"})
+api_client.add({"user_id": "user-123"})
router_headers = {"x-sm-user-id": "user_123"} # Different format!
```
@@ -92,7 +92,7 @@ router_headers = {"x-sm-user-id": "user_123"} # Different format!
```python
# API: Add memories with tags
-api_client.memories.add({
+api_client.add({
"content": "Q3 revenue report",
"user_id": "analyst_1",
"containerTag": "financial_reports"
diff --git a/apps/docs/migration/from-mem0.mdx b/apps/docs/migration/from-mem0.mdx
index e218c592..6903379e 100644
--- a/apps/docs/migration/from-mem0.mdx
+++ b/apps/docs/migration/from-mem0.mdx
@@ -1,19 +1,18 @@
---
-title: "Migrating from Mem0.ai to Supermemory"
-description: "Complete guide to migrate your data and applications from Mem0.ai to Supermemory"
+title: "Migrating from Mem0 to Supermemory"
+description: "Complete guide to migrate your data and applications from Mem0 to Supermemory"
sidebarTitle: "From Mem0"
---
-Migrating from Mem0.ai to Supermemory is straightforward. This guide walks you through exporting your memories from Mem0 and importing them into Supermemory.
+Migrating from Mem0 to Supermemory is straightforward. This guide walks you through exporting your memories from Mem0 and importing them into Supermemory.
## Why Migrate to Supermemory?
-Supermemory offers enhanced capabilities over Mem0.ai:
-- **Memory Router** for zero-code LLM integration
+Supermemory offers enhanced capabilities over Mem0:
- **Knowledge graph** architecture for better context relationships
- **Multiple content types** (URLs, PDFs, images, videos)
- **Generous free tier** (100k tokens) with affordable pricing
-- **Multiple integration options** (API, Router, MCP, SDKs)
+- **Multiple integration options** (API, MCP, SDKs)
## Quick Migration (All-in-One)
@@ -49,7 +48,7 @@ print("Migration complete!")
## Step-by-Step Migration
<Steps>
- <Step title="Export from Mem0.ai">
+ <Step title="Export from Mem0">
Mem0 provides two ways to export your memories:
### Option 1: Export via Dashboard (Recommended)
@@ -141,7 +140,7 @@ print("Migration complete!")
# Import to Supermemory
try:
- result = client.memories.add(
+ result = client.add(
content=content,
container_tags=["imported_from_mem0"],
metadata={
@@ -161,13 +160,13 @@ print("Migration complete!")
## API Migration Reference
-Here's how common Mem0.ai operations map to Supermemory:
+Here's how common Mem0 operations map to Supermemory:
### Adding Memories
<CodeGroup>
-```python Mem0.ai
+```python mem0
from mem0 import MemoryClient
client = MemoryClient(api_key="...")
@@ -181,7 +180,7 @@ client.add(
from supermemory import Supermemory
client = Supermemory(api_key="...")
-client.memories.add(
+client.add(
content="User prefers dark mode",
container_tags=["user_alice"]
)
@@ -193,7 +192,7 @@ client.memories.add(
<CodeGroup>
-```python Mem0.ai
+```python Mem0
results = client.search(
query="user preferences",
user_id="alice"
@@ -201,7 +200,7 @@ results = client.search(
```
```python Supermemory
-results = client.memories.search(
+results = client.documents.search(
query="user preferences",
container_tags=["user_alice"]
)
@@ -213,14 +212,14 @@ results = client.memories.search(
<CodeGroup>
-```python Mem0.ai
+```python Mem0
memories = client.get_all(
user_id="alice"
)
```
```python Supermemory
-memories = client.memories.list(
+memories = client.documents.list(
container_tags=["user_alice"],
limit=100
)
@@ -232,61 +231,12 @@ memories = client.memories.list(
<CodeGroup>
-```python Mem0.ai
+```python Mem0
client.delete(memory_id="mem_123")
```
```python Supermemory
-client.memories.delete("mem_123")
-```
-
-</CodeGroup>
-
-## Using Memory Router (Easiest Migration)
-
-For the simplest migration path, use Supermemory's Memory Router which requires minimal code changes:
-
-<CodeGroup>
-
-```python Before (Mem0 + OpenAI)
-from openai import OpenAI
-from mem0 import MemoryClient
-
-# Two separate clients needed
-openai = OpenAI(api_key="sk-...")
-memory = MemoryClient(api_key="mem0_key")
-
-# Manual memory management
-context = memory.search("user preferences", user_id="alice")
-messages = [
- {"role": "system", "content": f"Context: {context}"},
- {"role": "user", "content": "What are my preferences?"}
-]
-
-response = openai.chat.completions.create(
- model="gpt-5",
- messages=messages
-)
-```
-
-```python After (Supermemory Router)
-from openai import OpenAI
-
-# Single client with automatic memory management
-client = OpenAI(
- api_key="sk-...",
- base_url="https://api.supermemory.ai/v3/https://api.openai.com/v1",
- default_headers={
- "x-supermemory-api-key": "your_supermemory_key",
- "x-supermemory-user-id": "alice"
- }
-)
-
-# Memories handled automatically!
-response = client.chat.completions.create(
- model="gpt-5",
- messages=[{"role": "user", "content": "What are my preferences?"}]
-)
+client.documents.delete("mem_123")
```
</CodeGroup>
diff --git a/apps/docs/migration/from-zep.mdx b/apps/docs/migration/from-zep.mdx
index a109a157..f6585605 100644
--- a/apps/docs/migration/from-zep.mdx
+++ b/apps/docs/migration/from-zep.mdx
@@ -111,7 +111,7 @@ memories = client.memory.get(session_id="user_123")
```
```python Supermemory
-documents = client.memories.list({
+documents = client.documents.list({
"containerTag": ["user_123"],
"limit": 100
})
@@ -367,6 +367,6 @@ migrateFromZep(
## Resources
-- [Supermemory SDKs](/memory-api/sdks/overview)
+- [Supermemory SDKs](/integrations/supermemory-sdk)
- [API Reference](/memory-api/overview)
-- [Search Documentation](/search/overview)
+- [Search Documentation](/search)
diff --git a/apps/docs/migration/mem0-migration-script.py b/apps/docs/migration/mem0-migration-script.py
index 7b05edf6..ff33f10a 100644
--- a/apps/docs/migration/mem0-migration-script.py
+++ b/apps/docs/migration/mem0-migration-script.py
@@ -1,8 +1,8 @@
#!/usr/bin/env python3
"""
-Mem0.ai to Supermemory Migration Script
+Mem0 to Supermemory Migration Script
========================================
-Simple script to migrate memories from Mem0.ai to Supermemory.
+Simple script to migrate memories from Mem0 to Supermemory.
Prerequisites:
1. Install required packages:
@@ -18,36 +18,34 @@ Usage:
python mem0-migration-script.py
"""
-import os
import json
+import os
import time
from datetime import datetime
-from typing import Dict, Any, Optional
+from typing import Any, Dict, Optional
+
+from dotenv import load_dotenv
from mem0 import MemoryClient
from supermemory import Supermemory
-from dotenv import load_dotenv
# Load environment variables
load_dotenv()
+
def export_from_mem0(
api_key: str,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
- filters: Optional[Dict] = None
+ filters: Optional[Dict] = None,
) -> Dict[str, Any]:
"""
- Export memories from Mem0.ai using their export API
+ Export memories from mem0 using their export API
"""
- print("🔄 Starting Mem0.ai export...")
-
+ print("🔄 Starting mem0 export...")
+
# Initialize Mem0 client
- client = MemoryClient(
- api_key=api_key,
- org_id=org_id,
- project_id=project_id
- )
-
+ client = MemoryClient(api_key=api_key, org_id=org_id, project_id=project_id)
+
# Define export schema - this matches what Mem0 actually returns
export_schema = {
"type": "object",
@@ -65,41 +63,40 @@ def export_from_mem0(
"run_id": {"type": "string"},
"metadata": {"type": "object"},
"created_at": {"type": "string"},
- "updated_at": {"type": "string"}
- }
- }
+ "updated_at": {"type": "string"},
+ },
+ },
}
- }
+ },
}
-
+
try:
# Step 1: Create export job
print("📤 Creating export job...")
export_response = client.create_memory_export(
- schema=export_schema,
- filters=filters if filters else {}
+ schema=export_schema, filters=filters if filters else {}
)
-
+
export_id = export_response.get("id")
print(f"✅ Export job created with ID: {export_id}")
-
+
# Step 2: Wait for export to complete
print("⏳ Waiting for export to complete...")
time.sleep(5) # Usually takes a few seconds
-
+
# Step 3: Retrieve the exported data using the correct method
print("📥 Retrieving exported data...")
export_data = client.get_memory_export(memory_export_id=export_id)
-
+
# Step 4: Save backup
backup_filename = f"mem0_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
with open(backup_filename, "w") as f:
json.dump(export_data, f, indent=2)
print(f"💾 Backup saved to: {backup_filename}")
-
+
memory_count = len(export_data.get("memories", []))
- print(f"✅ Successfully exported {memory_count} memories from Mem0.ai")
-
+ print(f"✅ Successfully exported {memory_count} memories from mem0")
+
# Show sample of exported data
if memory_count > 0:
print("\n📋 Sample exported memory:")
@@ -107,36 +104,33 @@ def export_from_mem0(
print(f" Content: {sample.get('content', 'N/A')[:50]}...")
print(f" ID: {sample.get('id', 'None')}")
print(f" User ID: {sample.get('user_id', 'None')}")
-
+
return export_data
-
+
except Exception as e:
print(f"❌ Error exporting from Mem0: {str(e)}")
raise
+
def import_to_supermemory(mem0_data: Dict[str, Any], api_key: str) -> Dict[str, int]:
"""
Import Mem0 memories into Supermemory
"""
print("\n🚀 Starting import to Supermemory...")
-
+
# Initialize Supermemory client
client = Supermemory(api_key=api_key)
-
+
memories = mem0_data.get("memories", [])
if not memories:
print("⚠️ No memories found to import")
return {"imported": 0, "failed": 0, "skipped": 0}
-
+
# Statistics
- stats = {
- "imported": 0,
- "failed": 0,
- "skipped": 0
- }
-
+ stats = {"imported": 0, "failed": 0, "skipped": 0}
+
print(f"📦 Processing {len(memories)} memories...")
-
+
for i, memory in enumerate(memories, 1):
try:
# Check if content exists
@@ -145,174 +139,178 @@ def import_to_supermemory(mem0_data: Dict[str, Any], api_key: str) -> Dict[str,
print(f"⚠️ [{i}/{len(memories)}] Skipping: No content")
stats["skipped"] += 1
continue
-
+
# Build container tags
container_tags = ["imported_from_mem0"]
-
+
# Add user tag if present (handle None values)
user_id = memory.get("user_id")
if user_id and user_id != "None":
container_tags.append(f"user_{user_id}")
-
+
# Add agent tag if present
agent_id = memory.get("agent_id")
if agent_id and agent_id != "None":
container_tags.append(f"agent_{agent_id}")
-
+
# Add app tag if present
app_id = memory.get("app_id")
if app_id and app_id != "None":
container_tags.append(f"app_{app_id}")
-
+
# Add session tag if present
session_id = memory.get("session_id")
if session_id and session_id != "None":
container_tags.append(f"session_{session_id}")
-
+
# Generate a unique ID if Mem0 didn't provide one
memory_id = memory.get("id")
if not memory_id or memory_id == "None":
# Use content hash for uniqueness
import hashlib
+
memory_id = hashlib.md5(content.encode()).hexdigest()[:8]
-
+
# Prepare metadata
metadata = {
"source": "mem0_migration",
- "migration_date": datetime.now().isoformat()
+ "migration_date": datetime.now().isoformat(),
}
-
+
# Add original ID if it existed
if memory.get("id") and memory["id"] != "None":
metadata["original_id"] = memory["id"]
-
+
# Add timestamps if available and not None
created_at = memory.get("created_at")
if created_at and created_at != "None":
metadata["original_created_at"] = created_at
-
+
updated_at = memory.get("updated_at")
if updated_at and updated_at != "None":
metadata["original_updated_at"] = updated_at
-
+
# Add hash information if available
hash_val = memory.get("hash")
if hash_val and hash_val != "None":
metadata["original_hash"] = hash_val
-
+
prev_hash = memory.get("prev_hash")
if prev_hash and prev_hash != "None":
metadata["original_prev_hash"] = prev_hash
-
+
# Merge with existing metadata if it's a valid dict
if memory.get("metadata") and isinstance(memory["metadata"], dict):
metadata.update(memory["metadata"])
-
+
# Import to Supermemory
- result = client.memories.add(
+ result = client.add(
content=content,
container_tags=container_tags,
custom_id=f"mem0_{memory_id}",
- metadata=metadata
+ metadata=metadata,
)
-
+
stats["imported"] += 1
print(f"✅ [{i}/{len(memories)}] Imported: {content[:50]}...")
-
+
# Small delay to avoid rate limiting
if i % 10 == 0:
time.sleep(0.5)
-
+
except Exception as e:
stats["failed"] += 1
print(f"❌ [{i}/{len(memories)}] Failed: {str(e)}")
-
+
return stats
+
def verify_migration(api_key: str, expected_count: int):
"""
Verify that memories were imported correctly
"""
print("\n🔍 Verifying migration...")
-
+
client = Supermemory(api_key=api_key)
-
+
try:
# Check imported memories
- result = client.memories.list(
- container_tags=["imported_from_mem0"],
- limit=100
- )
-
- total_imported = result['pagination']['totalItems']
+ result = client.documents.list(container_tags=["imported_from_mem0"], limit=100)
+
+ total_imported = result["pagination"]["totalItems"]
print(f"✅ Found {total_imported} imported memories in Supermemory")
-
+
# Show sample memories
- if result['memories']:
+ if result["memories"]:
print("\n📋 Sample imported memories:")
- for memory in result['memories'][:3]:
- print(f" - {memory['id']}: {memory.get('summary', 'No summary')[:50]}...")
-
+ for memory in result["memories"][:3]:
+ print(
+ f" - {memory['id']}: {memory.get('summary', 'No summary')[:50]}..."
+ )
+
# Check success rate
- success_rate = (total_imported / expected_count * 100) if expected_count > 0 else 0
+ success_rate = (
+ (total_imported / expected_count * 100) if expected_count > 0 else 0
+ )
print(f"\n📊 Migration success rate: {success_rate:.1f}%")
-
+
return total_imported
-
+
except Exception as e:
print(f"❌ Error during verification: {str(e)}")
return 0
+
def main():
"""Main migration function"""
print("=" * 60)
- print("🎯 Mem0.ai to Supermemory Migration Tool")
+ print("🎯 mem0 to Supermemory Migration Tool")
print("=" * 60)
-
+
# Get credentials from environment
mem0_api_key = os.getenv("MEM0_API_KEY")
mem0_org_id = os.getenv("MEM0_ORG_ID")
mem0_project_id = os.getenv("MEM0_PROJECT_ID")
supermemory_api_key = os.getenv("SUPERMEMORY_API_KEY")
-
+
# Validate credentials
if not mem0_api_key:
print("❌ Error: MEM0_API_KEY environment variable not set")
return
-
+
if not supermemory_api_key:
print("❌ Error: SUPERMEMORY_API_KEY environment variable not set")
return
-
+
try:
# Step 1: Export from Mem0
- print("\n📤 STEP 1: Export from Mem0.ai")
+ print("\n📤 STEP 1: Export from mem0")
print("-" * 40)
-
+
# You can add filters here if needed
# Example: filters = {"AND": [{"user_id": "specific_user"}]}
filters = None
-
+
mem0_data = export_from_mem0(
api_key=mem0_api_key,
org_id=mem0_org_id,
project_id=mem0_project_id,
- filters=filters
+ filters=filters,
)
-
+
# Step 2: Import to Supermemory
print("\n📥 STEP 2: Import to Supermemory")
print("-" * 40)
-
+
stats = import_to_supermemory(mem0_data, supermemory_api_key)
-
+
# Step 3: Verify migration
print("\n✔️ STEP 3: Verification")
print("-" * 40)
-
+
expected_count = len(mem0_data.get("memories", []))
verify_migration(supermemory_api_key, expected_count)
-
+
# Final summary
print("\n" + "=" * 60)
print("📊 MIGRATION SUMMARY")
@@ -321,17 +319,18 @@ def main():
print(f"✅ Successfully imported: {stats['imported']}")
print(f"⚠️ Skipped (no content): {stats['skipped']}")
print(f"❌ Failed: {stats['failed']}")
-
- if stats['imported'] == expected_count - stats['skipped']:
+
+ if stats["imported"] == expected_count - stats["skipped"]:
print("\n🎉 Migration completed successfully!")
- elif stats['imported'] > 0:
+ elif stats["imported"] > 0:
print("\n⚠️ Migration completed with some issues. Check the logs above.")
else:
print("\n❌ Migration failed. Please check your credentials and try again.")
-
+
except Exception as e:
print(f"\n❌ Migration error: {str(e)}")
print("Please check your credentials and network connection.")
+
if __name__ == "__main__":
- main() \ No newline at end of file
+ main()
diff --git a/apps/docs/org-settings.mdx b/apps/docs/org-settings.mdx
deleted file mode 100644
index 0d954603..00000000
--- a/apps/docs/org-settings.mdx
+++ /dev/null
@@ -1,265 +0,0 @@
----
-title: "Organization Settings"
-description: "Configure organization-wide settings and content filtering for Supermemory"
-icon: "settings"
----
-
-Organization settings control how Supermemory processes content across your entire organization. These settings apply to all memories and connectors, helping you:
-
-- Filter content before indexing
-- Configure custom OAuth applications for connectors
-- Set organization-wide processing rules
-- Control what gets indexed and what gets excluded
-
-<Note>
-Settings are organization-wide and apply to all users and memories within your organization.
-</Note>
-
-## Why Settings Matter
-
-The settings endpoint is crucial for teaching Supermemory about your specific use case. It helps Supermemory understand:
-
-- **What you are**: Your organization's specific use case and purpose
-- **What to expect**: The types of content and information flowing through your system
-- **How to interpret**: Context for understanding queries in your specific use case
-- **What to prioritize**: Which content matters most for your users
-
-### Example: Brand Guidelines Use Case
-
-Without proper settings, when a user searches "what are our values?", Supermemory might return random documents mentioning "values". But with proper configuration:
-
-```typescript
-await client.settings.update({
- shouldLLMFilter: true,
- filterPrompt: `You are managing brand guidelines for Brand.ai.
- You will receive all outbound content from our organization.
-
- When users search, they're looking for:
- - "What are our values?" → Return official brand values document
- - "What's our tone of voice?" → Return brand voice guidelines
- - "How do we describe our mission?" → Return approved mission statements
-
- Focus on the latest approved brand materials, not drafts or outdated versions.`
-});
-```
-
-Now Supermemory understands that:
-- Searches about "values" refer to brand values, not financial values
-- "Tone" means brand voice, not audio settings
-- Priority should be given to official, approved content
-
-This context dramatically improves search relevance and ensures users get the right information for their specific use case.
-
-## API Endpoints
-
-### Get Current Settings
-
-Retrieve your organization's current settings configuration.
-
-<CodeGroup>
-
-```typescript TypeScript
-const settings = await client.settings.get();
-console.log('Current settings:', settings);
-```
-
-```python Python
-settings = client.settings.get()
-print(f'Current settings: {settings}')
-```
-
-```bash cURL
-curl -X GET "https://api.supermemory.ai/v3/settings" \
- -H "Authorization: Bearer $SUPERMEMORY_API_KEY"
-```
-
-</CodeGroup>
-
-### Update Settings
-
-Update your organization's settings. You only need to include the fields you want to change.
-
-<CodeGroup>
-
-```typescript TypeScript
-const updatedSettings = await client.settings.update({
- shouldLLMFilter: true,
- filterPrompt: "Only index technical documentation and code",
- includeItems: ["*.md", "*.ts", "*.py"],
- excludeItems: ["node_modules", ".git", "*.test.*"]
-});
-
-console.log('Updated fields:', updatedSettings.updated);
-```
-
-```python Python
-updated_settings = client.settings.update(
- should_llm_filter=True,
- filter_prompt="Only index technical documentation and code",
- include_items=["*.md", "*.ts", "*.py"],
- exclude_items=["node_modules", ".git", "*.test.*"]
-)
-
-print(f'Updated fields: {updated_settings.updated}')
-```
-
-```bash cURL
-curl -X PATCH "https://api.supermemory.ai/v3/settings" \
- -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
- -H "Content-Type: application/json" \
- -d '{
- "shouldLLMFilter": true,
- "filterPrompt": "Only index technical documentation and code",
- "includeItems": ["*.md", "*.ts", "*.py"],
- "excludeItems": ["node_modules", ".git", "*.test.*"]
- }'
-```
-
-</CodeGroup>
-
-## Content Filtering Settings
-
-Control what content gets indexed into Supermemory.
-
-### Basic Filtering
-
-Use include/exclude patterns to filter content:
-
-```typescript
-await client.settings.update({
- includeItems: [
- "*.md", // All markdown files
- "*.mdx", // MDX documentation
- "docs/**", // Everything in docs folder
- "src/**/*.ts" // TypeScript files in src
- ],
- excludeItems: [
- "node_modules", // Dependencies
- ".git", // Version control
- "*.test.*", // Test files
- "build/**", // Build outputs
- "*.tmp" // Temporary files
- ]
-});
-```
-
-### Intelligent LLM Filtering
-
-Enable AI-powered content filtering for semantic understanding:
-
-```typescript
-await client.settings.update({
- shouldLLMFilter: true,
- filterPrompt: `You are filtering content for a technical documentation system.
-
- Include:
- - API documentation
- - Code examples and tutorials
- - Technical guides and references
- - Architecture documentation
-
- Exclude:
- - Marketing materials
- - Internal meeting notes
- - Personal information
- - Outdated or deprecated content
-
- Focus on content that helps developers understand and use our APIs.`
-});
-```
-
-## Connector OAuth Settings
-
-Configure custom OAuth applications for connector integrations.
-
-### Google Drive Custom OAuth
-
-```typescript
-await client.settings.update({
- googleDriveCustomKeyEnabled: true,
- googleDriveClientId: "your-client-id.apps.googleusercontent.com",
- googleDriveClientSecret: "your-client-secret"
-});
-```
-
-### Notion Custom OAuth
-
-```typescript
-await client.settings.update({
- notionCustomKeyEnabled: true,
- notionClientId: "your-notion-oauth-client-id",
- notionClientSecret: "your-notion-oauth-client-secret"
-});
-```
-
-### OneDrive Custom OAuth
-
-```typescript
-await client.settings.update({
- onedriveCustomKeyEnabled: true,
- onedriveClientId: "your-azure-app-id",
- onedriveClientSecret: "your-azure-app-secret"
-});
-```
-
-## Best Practices
-
-### 1. Set Before Bulk Import
-Configure settings before importing large amounts of content. Changes don't retroactively affect existing memories.
-
-### 2. Be Specific in Filter Prompts
-Provide clear context about your organization and expected search patterns:
-
-```typescript
-// Good - Specific and contextual
-filterPrompt: `Technical documentation for developers.
- Include: API references, code examples, error solutions.
- Exclude: marketing content, personal data, test files.
- Users search for: implementation details, troubleshooting, best practices.`
-
-// Bad - Too vague
-filterPrompt: "Only important content"
-```
-
-### 3. Test OAuth Credentials
-Always test custom OAuth credentials in development before production:
-
-```typescript
-// Test connection after updating OAuth settings
-const testConnection = await client.connections.create('google-drive', {
- redirectUrl: 'https://yourapp.com/callback',
- containerTags: ['test-connection']
-});
-```
-
-### 4. Monitor Filter Effectiveness
-Check what's being indexed to ensure filters work as expected:
-
-```typescript
-const memories = await client.memories.list({
- containerTags: ['your-tags'],
- limit: 10
-});
-
-// Review what's actually being indexed
-memories.memories.forEach(memory => {
- console.log(`Indexed: ${memory.title} - ${memory.type}`);
-});
-```
-
-## Important Notes
-
-<Warning>
-**Settings Limitations:**
-- Changes are organization-wide, not per-user
-- Settings don't retroactively process existing memories
-- OAuth credentials must be properly configured in respective platforms
-- Filter patterns are applied during content ingestion
-</Warning>
-
-## Related Documentation
-
-- [Connectors Overview](/connectors/overview) - Setting up external integrations
-- [Google Drive Setup](/connectors/google-drive) - Configure Google Drive OAuth
-- [Notion Setup](/connectors/notion) - Configure Notion OAuth
-- [OneDrive Setup](/connectors/onedrive) - Configure OneDrive OAuth \ No newline at end of file
diff --git a/apps/docs/quickstart.mdx b/apps/docs/quickstart.mdx
index 04418c4c..963f1389 100644
--- a/apps/docs/quickstart.mdx
+++ b/apps/docs/quickstart.mdx
@@ -1,10 +1,11 @@
---
title: Quickstart
description: Make your first API call to Supermemory - add and retrieve memories.
+icon: "play"
---
<Tip>
-**Using Vercel AI SDK?** Check out the [AI SDK integration](/ai-sdk/overview) for the cleanest implementation with `@supermemory/tools/ai-sdk`.
+**Using Vercel AI SDK?** Check out the [AI SDK integration](/integrations/ai-sdk) for the cleanest implementation with `@supermemory/tools/ai-sdk`.
</Tip>
## Memory API
@@ -49,14 +50,18 @@ conversation = [
# Get user profile + relevant memories for context
profile = client.profile(container_tag=USER_ID, q=conversation[-1]["content"])
+static = "\n".join(profile.profile.static)
+dynamic = "\n".join(profile.profile.dynamic)
+memories = "\n".join(r.get("memory", "") for r in profile.search_results.results)
+
context = f"""Static profile:
-{"\n".join(profile.profile.static)}
+{static}
Dynamic profile:
-{"\n".join(profile.profile.dynamic)}
+{dynamic}
Relevant memories:
-{"\n".join(r.content for r in profile.search_results.results)}"""
+{memories}"""
# Build messages with memory-enriched context
messages = [{"role": "system", "content": f"User context:\n{context}"}, *conversation]
@@ -96,7 +101,7 @@ Dynamic profile:
${profile.profile.dynamic.join("\n")}
Relevant memories:
-${profile.searchResults.results.map((r) => r.content).join("\n")}`;
+${profile.searchResults.results.map((r) => r.memory).join("\n")}`;
// Build messages with memory-enriched context
const messages = [{ role: "system", content: `User context:\n${context}` }, ...conversation];
@@ -104,7 +109,7 @@ const messages = [{ role: "system", content: `User context:\n${context}` }, ...c
// const response = await llm.chat({ messages });
// Store conversation for future context
-await client.memories.add({
+await client.add({
content: conversation.map((m) => `${m.role}: ${m.content}`).join("\n"),
containerTag: USER_ID,
});
@@ -121,4 +126,4 @@ That's it! Supermemory automatically:
**Optional:** Use the `threshold` parameter to filter search results by relevance score. For example: `client.profile(container_tag=USER_ID, threshold=0.7, q=query)` will only include results with a score above 0.7.
</Tip>
-Learn more about [User Profiles](/user-profiles) and [Search](/search/overview).
+Learn more about [User Profiles](/user-profiles) and [Search](/search).
diff --git a/apps/docs/search.mdx b/apps/docs/search.mdx
new file mode 100644
index 00000000..15f4861d
--- /dev/null
+++ b/apps/docs/search.mdx
@@ -0,0 +1,247 @@
+---
+title: "Search"
+sidebarTitle: "Search Memories and Docs"
+description: "Semantic search across your memories and documents"
+icon: "search"
+---
+
+Search through your memories and documents with a single API call.
+
+<Tip>
+**Use `searchMode: "hybrid"`** for best results. It searches both memories and document chunks, returning the most relevant content.
+</Tip>
+
+## Quick Start
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ import Supermemory from 'supermemory';
+
+ const client = new Supermemory();
+
+ const results = await client.search.memories({
+ q: "machine learning",
+ containerTag: "user_123",
+ searchMode: "hybrid",
+ limit: 5
+ });
+
+ results.results.forEach(result => {
+ console.log(result.memory || result.chunk, result.similarity);
+ });
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ from supermemory import Supermemory
+
+ client = Supermemory()
+
+ results = client.search.memories(
+ q="machine learning",
+ container_tag="user_123",
+ search_mode="hybrid",
+ limit=5
+ )
+
+ for result in results.results:
+ print(result.memory or result.chunk, result.similarity)
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl -X POST "https://api.supermemory.ai/v4/search" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "q": "machine learning",
+ "containerTag": "user_123",
+ "searchMode": "hybrid",
+ "limit": 5
+ }'
+ ```
+ </Tab>
+</Tabs>
+
+**Response:**
+```json
+{
+ "results": [
+ {
+ "id": "mem_xyz",
+ "memory": "User is interested in machine learning for product recommendations",
+ "similarity": 0.91,
+ "metadata": { "topic": "interests" },
+ "updatedAt": "2024-01-15T10:30:00.000Z",
+ "version": 1
+ },
+ {
+ "id": "chunk_abc",
+ "chunk": "Machine learning enables personalized experiences at scale...",
+ "similarity": 0.87,
+ "metadata": { "source": "onboarding_doc" },
+ "updatedAt": "2024-01-14T09:15:00.000Z",
+ "version": 1
+ }
+ ],
+ "timing": 92,
+ "total": 5
+}
+```
+
+<Info>
+In hybrid mode, results contain either a `memory` field (extracted facts) or a `chunk` field (document content), depending on the source.
+</Info>
+
+---
+
+## Parameters
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `q` | string | required | Search query |
+| `containerTag` | string | — | Filter by user/project |
+| `searchMode` | string | `"hybrid"` | `"hybrid"` (recommended) or `"memories"` |
+| `limit` | number | 10 | Max results |
+| `threshold` | 0-1 | 0.5 | Similarity cutoff (higher = fewer, better results) |
+| `rerank` | boolean | false | Re-score for better relevance (+100ms) |
+| `filters` | object | — | Metadata filters (`AND`/`OR` structure) |
+
+### Search Modes
+
+- **`hybrid`** (recommended) — Searches both memories and document chunks, returns the most relevant
+- **`memories`** — Only searches extracted memories
+
+```typescript
+// Hybrid: memories + document chunks (recommended)
+await client.search.memories({
+ q: "quarterly goals",
+ containerTag: "user_123",
+ searchMode: "hybrid"
+});
+
+// Memories only: just extracted facts
+await client.search.memories({
+ q: "user preferences",
+ containerTag: "user_123",
+ searchMode: "memories"
+});
+```
+
+---
+
+## Filtering
+
+Filter by `containerTag` to scope results to a user or project:
+
+```typescript
+const results = await client.search.memories({
+ q: "project updates",
+ containerTag: "user_123",
+ searchMode: "hybrid"
+});
+```
+
+Use `filters` for metadata-based filtering:
+
+```typescript
+const results = await client.search.memories({
+ q: "meeting notes",
+ containerTag: "user_123",
+ filters: {
+ AND: [
+ { key: "type", value: "meeting" },
+ { key: "year", value: "2024" }
+ ]
+ }
+});
+```
+
+<Accordion title="Filter Types">
+ - **String equality:** `{ key: "status", value: "active" }`
+ - **String contains:** `{ filterType: "string_contains", key: "title", value: "react" }`
+ - **Numeric:** `{ filterType: "numeric", key: "priority", value: "5", numericOperator: ">=" }`
+ - **Array contains:** `{ filterType: "array_contains", key: "tags", value: "important" }`
+ - **Negate:** `{ key: "status", value: "draft", negate: true }`
+
+ See [Organizing & Filtering](/concepts/filtering) for full syntax.
+</Accordion>
+
+---
+
+## Query Optimization
+
+### Reranking
+
+Re-scores results for better relevance. Adds ~100ms latency.
+
+```typescript
+const results = await client.search.memories({
+ q: "complex technical question",
+ containerTag: "user_123",
+ rerank: true
+});
+```
+
+### Threshold
+
+Control result quality vs quantity:
+
+```typescript
+// Broad search — more results
+await client.search.memories({ q: "...", threshold: 0.3 });
+
+// Precise search — fewer, better results
+await client.search.memories({ q: "...", threshold: 0.8 });
+```
+
+---
+
+## Chatbot Example
+
+Optimal configuration for conversational AI:
+
+```typescript
+async function getContext(userId: string, message: string) {
+ const results = await client.search.memories({
+ q: message,
+ containerTag: userId,
+ searchMode: "hybrid",
+ threshold: 0.6,
+ limit: 5
+ });
+
+ return results.results
+ .map(r => r.memory || r.chunk)
+ .join('\n\n');
+}
+```
+
+<Accordion title="Response Schema">
+ ```typescript
+ interface SearchResult {
+ id: string;
+ memory?: string; // Present for memory results
+ chunk?: string; // Present for document chunk results
+ similarity: number; // 0-1
+ metadata: object | null;
+ updatedAt: string;
+ version: number;
+ }
+
+ interface SearchResponse {
+ results: SearchResult[];
+ timing: number; // ms
+ total: number;
+ }
+ ```
+</Accordion>
+
+---
+
+## Next Steps
+
+- [Ingesting Content](/add-memories) — Add content to search
+- [User Profiles](/user-profiles) — Get user context with search
+- [Organizing & Filtering](/concepts/filtering) — Container tags and metadata
diff --git a/apps/docs/search/filtering.mdx b/apps/docs/search/filtering.mdx
deleted file mode 100644
index a9efecba..00000000
--- a/apps/docs/search/filtering.mdx
+++ /dev/null
@@ -1,902 +0,0 @@
----
-title: "Filtering Memories"
-description: "Filter and search memories using container tags and metadata"
-icon: "filter"
----
-
-Supermemory provides two complementary filtering mechanisms that work independently or together to help you find exactly what you need.
-
-## How Filtering Works
-
-Supermemory uses two types of filters for different purposes:
-
-<CardGroup cols={2}>
- <Card title="Container Tags" icon="folder">
- **Organize memories** into isolated spaces by user, project, or workspace
- </Card>
- <Card title="Metadata Filtering" icon="database">
- **Query memories** by custom properties like category, status, or date
- </Card>
-</CardGroup>
-
-Both filtering types can be used:
-- **Independently** - Use container tags alone OR metadata filters alone
-- **Together** - Combine both for precise filtering (most common)
-
-Think of it as: `[Container Tags] → [Your Memories] ← [Metadata Filters]`
-
-## Container Tags
-
-Container tags create isolated memory spaces. They're perfect for multi-tenant applications, user profiles, and project organization.
-
-### How Container Tags Work
-
-- **Exact matching**: Arrays must match exactly. A memory tagged with `["user_123", "project_ai"]` will NOT match a search for just `["user_123"]`
-- **Isolation**: Each container tag combination creates a separate knowledge graph
-- **Naming patterns**: Use consistent patterns like `user_{id}`, `project_{id}`, or `org_{id}_team_{id}`
-
-### Basic Usage
-
-<Tabs>
- <Tab title="TypeScript">
- ```typescript
- // Search within a user's memories
- const results = await client.search.documents({
- q: "machine learning notes",
- containerTags: ["user_123"],
- limit: 10
- });
-
- // Search within a project
- const projectResults = await client.search.documents({
- q: "requirements",
- containerTags: ["project_ai"],
- limit: 10
- });
- ```
- </Tab>
- <Tab title="Python">
- ```python
- # Search within a user's memories
- results = client.search.documents(
- q="machine learning notes",
- container_tags=["user_123"],
- limit=10
- )
-
- # Search within a project
- project_results = client.search.documents(
- q="requirements",
- container_tags=["project_ai"],
- limit=10
- )
- ```
- </Tab>
- <Tab title="cURL">
- ```bash
- # Search within a user's memories
- curl -X POST "https://api.supermemory.ai/v3/search" \
- -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
- -H "Content-Type: application/json" \
- -d '{
- "q": "machine learning notes",
- "containerTags": ["user_123"],
- "limit": 10
- }'
- ```
- </Tab>
-</Tabs>
-
-### Container Tag Patterns
-
-<Tip>
-**Best Practice**: Use single container tags when possible. Multi-tag arrays require exact matching, which can be restrictive.
-</Tip>
-
-#### Recommended Patterns
-- User isolation: `user_{userId}`
-- Project grouping: `project_{projectId}`
-- Workspace separation: `workspace_{workspaceId}`
-- Hierarchical: `org_{orgId}_team_{teamId}`
-- Temporal: `user_{userId}_2024_q1`
-
-#### API Differences
-
-| Endpoint | Field Name | Type | Example |
-|----------|------------|------|---------|
-| `/v3/search` | `containerTags` | Array | `["user_123"]` |
-| `/v4/search` | `containerTag` | String | `"user_123"` |
-| `/v3/documents/list` | `containerTags` | Array | `["user_123"]` |
-
-## Metadata Filtering
-
-Metadata filters let you query memories by any custom property. They use SQL-like AND/OR logic with explicit grouping.
-
-### Filter Structure
-
-All metadata filters must be wrapped in AND or OR arrays:
-
-```javascript
-// ✅ Correct - wrapped in AND array
-filters: {
- AND: [
- { key: "category", value: "tech", negate: false }
- ]
-}
-
-// ❌ Wrong - not wrapped
-filters: {
- key: "category", value: "tech", negate: false
-}
-```
-
-### Why Explicit Grouping?
-
-Without explicit grouping, this SQL query is ambiguous:
-```sql
-category = 'tech' OR status = 'published' AND priority = 'high'
-```
-
-Our structure forces clarity:
-```javascript
-// Clear: (category = 'tech') OR (status = 'published' AND priority = 'high')
-{
- OR: [
- { key: "category", value: "tech" },
- { AND: [
- { key: "status", value: "published" },
- { key: "priority", value: "high" }
- ]}
- ]
-}
-```
-
-### Basic Metadata Filtering
-
-<Tabs>
- <Tab title="TypeScript">
- ```typescript
- // Single condition
- const results = await client.search.documents({
- q: "neural networks",
- filters: {
- AND: [
- { key: "category", value: "ai", negate: false }
- ]
- },
- limit: 10
- });
-
- // Multiple AND conditions
- const filtered = await client.search.documents({
- q: "research",
- filters: {
- AND: [
- { key: "category", value: "science", negate: false },
- { key: "status", value: "published", negate: false },
- { key: "year", value: "2024", negate: false }
- ]
- },
- limit: 10
- });
- ```
- </Tab>
- <Tab title="Python">
- ```python
- # Single condition
- results = client.search.documents(
- q="neural networks",
- filters={
- "AND": [
- {"key": "category", "value": "ai", "negate": False}
- ]
- },
- limit=10
- )
-
- # Multiple AND conditions
- filtered = client.search.documents(
- q="research",
- filters={
- "AND": [
- {"key": "category", "value": "science", "negate": False},
- {"key": "status", "value": "published", "negate": False},
- {"key": "year", "value": "2024", "negate": False}
- ]
- },
- limit=10
- )
- ```
- </Tab>
- <Tab title="cURL">
- ```bash
- # Single condition
- curl -X POST "https://api.supermemory.ai/v3/search" \
- -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
- -H "Content-Type: application/json" \
- -d '{
- "q": "neural networks",
- "filters": {
- "AND": [
- {"key": "category", "value": "ai", "negate": false}
- ]
- },
- "limit": 10
- }'
- ```
- </Tab>
-</Tabs>
-
-## Filter Types in Detail
-
-Supermemory supports four filter types, each designed for specific use cases.
-
-### 1. String Equality (Default)
-
-Exact string matching with optional case-insensitive comparison.
-
-<Tabs>
- <Tab title="Basic">
- ```javascript
- // Case-sensitive exact match (default)
- {
- key: "status",
- value: "Published",
- negate: false
- }
- ```
- </Tab>
- <Tab title="Case-Insensitive">
- ```javascript
- // Matches "published", "Published", "PUBLISHED"
- {
- key: "status",
- value: "PUBLISHED",
- ignoreCase: true,
- negate: false
- }
- ```
- </Tab>
- <Tab title="Negation">
- ```javascript
- // Exclude specific status
- {
- key: "status",
- value: "draft",
- negate: true
- }
- ```
- </Tab>
-</Tabs>
-
-### 2. String Contains
-
-Search for substrings within text fields.
-
-<Tabs>
- <Tab title="Basic">
- ```javascript
- // Find all documents containing "machine learning"
- {
- filterType: "string_contains",
- key: "description",
- value: "machine learning",
- negate: false
- }
- ```
- </Tab>
- <Tab title="Case-Insensitive">
- ```javascript
- // Case-insensitive substring search
- {
- filterType: "string_contains",
- key: "title",
- value: "NEURAL",
- ignoreCase: true,
- negate: false
- }
- ```
- </Tab>
- <Tab title="Exclusion">
- ```javascript
- // Exclude documents containing "deprecated"
- {
- filterType: "string_contains",
- key: "content",
- value: "deprecated",
- negate: true
- }
- ```
- </Tab>
-</Tabs>
-
-### 3. Numeric Comparisons
-
-Filter by numeric values with comparison operators.
-
-<Tabs>
- <Tab title="Basic Operators">
- ```javascript
- // Greater than or equal
- {
- filterType: "numeric",
- key: "score",
- value: "80",
- numericOperator: ">=",
- negate: false
- }
-
- // Less than
- {
- filterType: "numeric",
- key: "readingTime",
- value: "10",
- numericOperator: "<",
- negate: false
- }
- ```
- </Tab>
- <Tab title="With Negation">
- ```javascript
- // NOT equal to 5 (becomes !=)
- {
- filterType: "numeric",
- key: "priority",
- value: "5",
- numericOperator: "=",
- negate: true
- }
-
- // NOT less than 80 (becomes >=)
- {
- filterType: "numeric",
- key: "score",
- value: "80",
- numericOperator: "<",
- negate: true
- }
- ```
- </Tab>
-</Tabs>
-
-<Note>
-**Numeric Negation Mapping**:
-When using `negate: true` with numeric filters, operators are reversed:
-- `<` → `>=`
-- `<=` → `>`
-- `>` → `<=`
-- `>=` → `<`
-- `=` → `!=`
-</Note>
-
-### 4. Array Contains
-
-Check if an array field contains a specific value.
-
-<Tabs>
- <Tab title="Basic">
- ```javascript
- // Find documents with specific participant
- {
- filterType: "array_contains",
- key: "participants",
- value: "john.doe",
- negate: false
- }
- ```
- </Tab>
- <Tab title="Exclusion">
- ```javascript
- // Exclude documents with specific tag
- {
- filterType: "array_contains",
- key: "tags",
- value: "archived",
- negate: true
- }
- ```
- </Tab>
- <Tab title="Multiple Checks">
- ```javascript
- // Must have both participants (use AND)
- {
- AND: [
- {
- filterType: "array_contains",
- key: "participants",
- value: "project.manager"
- },
- {
- filterType: "array_contains",
- key: "participants",
- value: "lead.developer"
- }
- ]
- }
- ```
- </Tab>
-</Tabs>
-
-## Common Patterns
-
-Ready-to-use filtering patterns for common scenarios.
-
-### User-Specific Content with Category
-
-<Tabs>
- <Tab title="TypeScript">
- ```typescript
- const results = await client.search.documents({
- q: "project updates",
- containerTags: ["user_123"],
- filters: {
- AND: [
- { key: "category", value: "work", negate: false },
- { key: "visibility", value: "private", negate: false }
- ]
- },
- limit: 10
- });
- ```
- </Tab>
- <Tab title="Python">
- ```python
- results = client.search.documents(
- q="project updates",
- container_tags=["user_123"],
- filters={
- "AND": [
- {"key": "category", "value": "work", "negate": False},
- {"key": "visibility", "value": "private", "negate": False}
- ]
- },
- limit=10
- )
- ```
- </Tab>
-</Tabs>
-
-### Recent High-Priority Content
-
-<Tabs>
- <Tab title="TypeScript">
- ```typescript
- const results = await client.search.documents({
- q: "important tasks",
- filters: {
- AND: [
- {
- filterType: "numeric",
- key: "priority",
- value: "7",
- numericOperator: ">=",
- negate: false
- },
- {
- filterType: "numeric",
- key: "created_timestamp",
- value: "1704067200", // 2024-01-01
- numericOperator: ">=",
- negate: false
- }
- ]
- },
- limit: 20
- });
- ```
- </Tab>
- <Tab title="Python">
- ```python
- results = client.search.documents(
- q="important tasks",
- filters={
- "AND": [
- {
- "filterType": "numeric",
- "key": "priority",
- "value": "7",
- "numericOperator": ">=",
- "negate": False
- },
- {
- "filterType": "numeric",
- "key": "created_timestamp",
- "value": "1704067200", # 2024-01-01
- "numericOperator": ">=",
- "negate": False
- }
- ]
- },
- limit=20
- )
- ```
- </Tab>
-</Tabs>
-
-### Team Collaboration Filter
-
-<Tabs>
- <Tab title="TypeScript">
- ```typescript
- const results = await client.search.documents({
- q: "meeting notes",
- containerTags: ["project_alpha"],
- filters: {
- AND: [
- {
- OR: [
- {
- filterType: "array_contains",
- key: "participants",
- value: "alice"
- },
- {
- filterType: "array_contains",
- key: "participants",
- value: "bob"
- }
- ]
- },
- {
- key: "type",
- value: "meeting",
- negate: false
- }
- ]
- },
- limit: 15
- });
- ```
- </Tab>
- <Tab title="Python">
- ```python
- results = client.search.documents(
- q="meeting notes",
- container_tags=["project_alpha"],
- filters={
- "AND": [
- {
- "OR": [
- {
- "filterType": "array_contains",
- "key": "participants",
- "value": "alice"
- },
- {
- "filterType": "array_contains",
- "key": "participants",
- "value": "bob"
- }
- ]
- },
- {
- "key": "type",
- "value": "meeting",
- "negate": False
- }
- ]
- },
- limit=15
- )
- ```
- </Tab>
-</Tabs>
-
-### Exclude Drafts and Deprecated Content
-
-<Tabs>
- <Tab title="TypeScript">
- ```typescript
- const results = await client.search.documents({
- q: "documentation",
- filters: {
- AND: [
- {
- key: "status",
- value: "draft",
- negate: true // Exclude drafts
- },
- {
- filterType: "string_contains",
- key: "content",
- value: "deprecated",
- negate: true // Exclude deprecated
- },
- {
- filterType: "array_contains",
- key: "tags",
- value: "archived",
- negate: true // Exclude archived
- }
- ]
- },
- limit: 10
- });
- ```
- </Tab>
- <Tab title="Python">
- ```python
- results = client.search.documents(
- q="documentation",
- filters={
- "AND": [
- {
- "key": "status",
- "value": "draft",
- "negate": True # Exclude drafts
- },
- {
- "filterType": "string_contains",
- "key": "content",
- "value": "deprecated",
- "negate": True # Exclude deprecated
- },
- {
- "filterType": "array_contains",
- "key": "tags",
- "value": "archived",
- "negate": True # Exclude archived
- }
- ]
- },
- limit=10
- )
- ```
- </Tab>
-</Tabs>
-
-## API-Specific Notes
-
-Different endpoints have slightly different requirements:
-
-| Endpoint | Container Tag Field | Type | Filter Format | Notes |
-|----------|---------------------|------|---------------|-------|
-| `/v3/search` | `containerTags` | Array | JSON object | Document search |
-| `/v4/search` | `containerTag` | String | JSON object | Memory search |
-| `/v3/documents/list` | `containerTags` | Array | **JSON string** | Must use `JSON.stringify()` |
-
-<Warning>
-**List API Special Requirement**: The `/v3/documents/list` endpoint requires filters as a JSON string:
-
-```javascript
-// ✅ Correct for List API
-filters: JSON.stringify({ AND: [...] })
-
-// ❌ Wrong for List API (but correct for Search API)
-filters: { AND: [...] }
-```
-</Warning>
-
-## Combining Container Tags and Metadata
-
-Most real-world applications combine both filtering types for precise control.
-
-### Example: User's Work Documents from 2024
-
-<Tabs>
- <Tab title="TypeScript">
- ```typescript
- const results = await client.search.documents({
- q: "quarterly report",
- containerTags: ["user_123"], // User isolation
- filters: {
- AND: [
- { key: "category", value: "work" },
- { key: "type", value: "report" },
- {
- filterType: "numeric",
- key: "year",
- value: "2024",
- numericOperator: "="
- }
- ]
- },
- limit: 10
- });
- ```
- </Tab>
- <Tab title="Python">
- ```python
- results = client.search.documents(
- q="quarterly report",
- container_tags=["user_123"], # User isolation
- filters={
- "AND": [
- {"key": "category", "value": "work"},
- {"key": "type", "value": "report"},
- {
- "filterType": "numeric",
- "key": "year",
- "value": "2024",
- "numericOperator": "="
- }
- ]
- },
- limit=10
- )
- ```
- </Tab>
-</Tabs>
-
-### Example: Project's Active High-Priority Tasks
-
-<Tabs>
- <Tab title="TypeScript">
- ```typescript
- const results = await client.search.documents({
- q: "implementation",
- containerTags: ["project_alpha"], // Project isolation
- filters: {
- AND: [
- {
- key: "status",
- value: "completed",
- negate: true // Not completed
- },
- {
- filterType: "numeric",
- key: "priority",
- value: "7",
- numericOperator: ">=",
- negate: false
- },
- {
- filterType: "array_contains",
- key: "assignees",
- value: "current_user"
- }
- ]
- },
- limit: 20
- });
- ```
- </Tab>
- <Tab title="Python">
- ```python
- results = client.search.documents(
- q="implementation",
- container_tags=["project_alpha"], # Project isolation
- filters={
- "AND": [
- {
- "key": "status",
- "value": "completed",
- "negate": True # Not completed
- },
- {
- "filterType": "numeric",
- "key": "priority",
- "value": "7",
- "numericOperator": ">=",
- "negate": False
- },
- {
- "filterType": "array_contains",
- "key": "assignees",
- "value": "current_user"
- }
- ]
- },
- limit=20
- )
- ```
- </Tab>
-</Tabs>
-
-## Document-Specific Search
-
-Search within a single large document using the `docId` parameter:
-
-<Tabs>
- <Tab title="TypeScript">
- ```typescript
- // Search within a specific book or manual
- const results = await client.search.documents({
- q: "neural architecture",
- docId: "doc_textbook_ml_2024",
- limit: 20
- });
- ```
- </Tab>
- <Tab title="Python">
- ```python
- # Search within a specific book or manual
- results = client.search.documents(
- q="neural architecture",
- doc_id="doc_textbook_ml_2024",
- limit=20
- )
- ```
- </Tab>
-</Tabs>
-
-Use this for:
-- Large textbooks or manuals
-- Multi-chapter books
-- Long podcast transcripts
-- Course materials
-
-## Validation & Limits
-
-### Metadata Key Requirements
-- **Pattern**: `/^[a-zA-Z0-9_.-]+$/`
-- **Allowed**: Letters, numbers, underscore, hyphen, dot
-- **Max length**: 64 characters
-- **No spaces or special characters**
-
-### Valid vs Invalid Keys
-```javascript
-// ✅ Valid keys
-"user_email"
-"created-date"
-"version.number"
-"priority_level_2"
-
-// ❌ Invalid keys
-"user email" // Spaces not allowed
-"created@date" // @ not allowed
-"priority!" // ! not allowed
-"very_long_key_name_that_exceeds_64_characters_limit" // Too long
-```
-
-### Query Complexity Limits
-- **Maximum conditions**: 200 per query
-- **Maximum nesting depth**: 8 levels
-- **Container tag arrays**: Must match exactly
-
-## Troubleshooting
-
-### No Results Returned
-
-<AccordionGroup>
- <Accordion title="Container tag mismatch">
- **Problem**: Container tags must match exactly as arrays.
-
- **Solution**: Verify the exact array structure. `["user_123"]` ≠ `["user_123", "project_1"]`
- </Accordion>
- <Accordion title="Wrong metadata key casing">
- **Problem**: Keys are case-sensitive by default.
-
- **Solution**: Check exact key spelling and casing, or use `ignoreCase: true` for values.
- </Accordion>
- <Accordion title="Incorrect negate value">
- **Problem**: Using `negate: true` when you meant `false`.
-
- **Solution**: Review your negate values. `false` = include, `true` = exclude.
- </Accordion>
-</AccordionGroup>
-
-### Validation Errors
-
-<AccordionGroup>
- <Accordion title="Invalid metadata key format">
- **Error**: "Invalid metadata key: contains unsafe characters"
-
- **Solution**: Remove spaces, special characters. Use only alphanumeric, underscore, hyphen, dot.
- </Accordion>
- <Accordion title="Filter structure error">
- **Error**: "Invalid filter structure"
-
- **Solution**: Ensure all conditions are wrapped in AND or OR arrays.
- </Accordion>
- <Accordion title="List API filter error">
- **Error**: "Invalid filter format"
-
- **Solution**: For `/v3/documents/list`, use `JSON.stringify()` on the filter object.
- </Accordion>
-</AccordionGroup>
-
-### Performance Issues
-
-<AccordionGroup>
- <Accordion title="Slow queries">
- **Problem**: Complex nested OR conditions with many branches.
-
- **Solution**: Simplify logic, reduce nesting, or split into multiple queries.
- </Accordion>
- <Accordion title="Hitting complexity limits">
- **Problem**: "Query exceeds maximum complexity"
-
- **Solution**: Reduce conditions (max 200) or nesting depth (max 8).
- </Accordion>
-</AccordionGroup> \ No newline at end of file
diff --git a/apps/docs/search/overview.mdx b/apps/docs/search/overview.mdx
index 3f888b02..32c2d7da 100644
--- a/apps/docs/search/overview.mdx
+++ b/apps/docs/search/overview.mdx
@@ -345,7 +345,7 @@ This is useful when:
```typescript TypeScript
// Get a specific document by ID
-const document = await client.memories.get("doc_abc123");
+const document = await client.documents.get("doc_abc123");
console.log(document.content); // Full document content
console.log(document.status); // Processing status
@@ -355,7 +355,7 @@ console.log(document.summary); // AI-generated summary
```python Python
# Get a specific document by ID
-document = client.memories.get("doc_abc123")
+document = client.documents.get("doc_abc123")
print(document.content) # Full document content
print(document.status) # Processing status
diff --git a/apps/docs/search/parameters.mdx b/apps/docs/search/parameters.mdx
index f7c2b264..f9df18da 100644
--- a/apps/docs/search/parameters.mdx
+++ b/apps/docs/search/parameters.mdx
@@ -60,7 +60,7 @@ These parameters work across all search endpoints:
```
<Note>
- See [Metadata Filtering Guide](/search/filtering) for complete syntax and examples.
+ See [Metadata Filtering Guide](/concepts/filtering) for complete syntax and examples.
</Note>
</ParamField>
diff --git a/apps/docs/style.css b/apps/docs/style.css
new file mode 100644
index 00000000..b1a5db87
--- /dev/null
+++ b/apps/docs/style.css
@@ -0,0 +1,5 @@
+.dark img[src*="openai.svg"],
+.dark img[src*="pipecat.svg"],
+.dark img[src*="supermemory.svg"] {
+ filter: invert(1);
+}
diff --git a/apps/docs/supermemory-mcp/mcp.mdx b/apps/docs/supermemory-mcp/mcp.mdx
index acf20f93..f317d920 100644
--- a/apps/docs/supermemory-mcp/mcp.mdx
+++ b/apps/docs/supermemory-mcp/mcp.mdx
@@ -1,6 +1,7 @@
---
title: "Overview"
description: "Give your AI assistants persistent memory with the Model Context Protocol"
+icon: "brain-circuit"
---
Supermemory MCP Server 4.0 gives AI assistants (Claude, Cursor, Windsurf, etc.) persistent memory across conversations. Built on Cloudflare Workers with Durable Objects for scalable, persistent connections.
diff --git a/apps/docs/supermemory-mcp/setup.mdx b/apps/docs/supermemory-mcp/setup.mdx
index 647c07f5..92180467 100644
--- a/apps/docs/supermemory-mcp/setup.mdx
+++ b/apps/docs/supermemory-mcp/setup.mdx
@@ -1,6 +1,7 @@
---
title: 'Setup and Usage'
description: 'How to set up and use Supermemory MCP Server 4.0'
+icon: 'settings'
---
## Quick Install (Recommended)
diff --git a/apps/docs/test.py b/apps/docs/test.py
index ee5309d6..85ce6f82 100644
--- a/apps/docs/test.py
+++ b/apps/docs/test.py
@@ -12,14 +12,18 @@ conversation = [
# Get user profile + relevant memories for context
profile = client.profile(container_tag=USER_ID, q=conversation[-1]["content"])
+static = "\n".join(profile.profile.static)
+dynamic = "\n".join(profile.profile.dynamic)
+memories = "\n".join(r.get("memory", "") for r in profile.search_results.results)
+
context = f"""Static profile:
-{ "\n".join(profile.profile.static)}
+{static}
Dynamic profile:
-{"\n".join(profile.profile.dynamic)}
+{dynamic}
Relevant memories:
-{"\n".join(r.content for r in profile.search_results.results)}"""
+{memories}"""
# Build messages with memory-enriched context
messages = [{"role": "system", "content": f"User context:\n{context}"}, *conversation]
diff --git a/apps/docs/update-delete-memories/overview.mdx b/apps/docs/update-delete-memories/overview.mdx
index 926e2971..a4f5f0e1 100644
--- a/apps/docs/update-delete-memories/overview.mdx
+++ b/apps/docs/update-delete-memories/overview.mdx
@@ -20,7 +20,7 @@ const client = new Supermemory({
});
// Update by memory ID
-const updated = await client.memories.update('memory_id_123', {
+const updated = await client.documents.update('memory_id_123', {
content: 'Updated content here',
metadata: { version: 2, updated: true }
});
@@ -36,7 +36,7 @@ import os
client = Supermemory(api_key=os.environ.get("SUPERMEMORY_API_KEY"))
# Update by memory ID
-updated = client.memories.update(
+updated = client.documents.update(
'memory_id_123',
content='Updated content here',
metadata={'version': 2, 'updated': True}
@@ -74,7 +74,7 @@ const client = new Supermemory({
const customId = 'user-note-001';
// First call creates memory
-const created = await client.memories.add({
+const created = await client.add({
content: 'Initial content',
customId: customId,
metadata: { version: 1 }
@@ -83,7 +83,7 @@ const created = await client.memories.add({
console.log('Created memory:', created.id);
// Second call with same customId updates existing
-const updated = await client.memories.add({
+const updated = await client.add({
content: 'Updated content',
customId: customId, // Same customId = upsert
metadata: { version: 2 }
@@ -99,7 +99,7 @@ client = Supermemory(api_key=os.environ.get("SUPERMEMORY_API_KEY"))
custom_id = 'user-note-001'
# First call creates memory
-created = client.memories.add(
+created = client.add(
content='Initial content',
custom_id=custom_id,
metadata={'version': 1}
@@ -108,7 +108,7 @@ created = client.memories.add(
print(f'Created memory: {created.id}')
# Second call with same customId updates existing
-updated = client.memories.add(
+updated = client.add(
content='Updated content',
custom_id=custom_id, # Same customId = upsert
metadata={'version': 2}
@@ -151,7 +151,7 @@ curl -X POST "https://api.supermemory.ai/v3/documents" \
The `customId` enables idempotency across all endpoints. The `memoryId` doesn't support idempotency, only the `customId` does.
</Note>
-<Warning>
+<Warning>
The `customId` can have a maximum length of 100 characters.
@@ -165,18 +165,18 @@ Delete individual memories by their ID. This is a permanent hard delete with no
```typescript Typescript
// Hard delete - permanently removes memory
-await client.memories.delete('memory_id_123');
+await client.documents.delete('memory_id_123');
console.log('Memory deleted successfully');
```
```python Python
# Hard delete - permanently removes memory
-client.memories.delete('memory_id_123')
+client.documents.delete('memory_id_123')
print('Memory deleted successfully')
# Error handling for single delete
try:
- client.memories.delete('memory_id_123')
+ client.documents.delete('memory_id_123')
print('Delete successful')
except NotFoundError:
print('Memory not found or already deleted')
@@ -204,7 +204,7 @@ Delete multiple memories at once by providing an array of memory IDs. Maximum of
```typescript Typescript
// Bulk delete by memory IDs
-const result = await client.memories.bulkDelete({
+const result = await client.documents.deleteBulk({
ids: [
'memory_id_1',
'memory_id_2',
@@ -225,7 +225,7 @@ console.log('Bulk delete result:', result);
```python Python
# Bulk delete by memory IDs
-result = client.memories.bulk_delete(
+result = client.documents.delete_bulk(
ids=[
'memory_id_1',
'memory_id_2',
@@ -276,7 +276,7 @@ Delete all memories within specific container tags. This is useful for cleaning
```typescript Typescript
// Delete all memories in specific container tags
-const result = await client.memories.bulkDelete({
+const result = await client.documents.deleteBulk({
containerTags: ['user-123', 'project-old', 'archived-content']
});
@@ -290,7 +290,7 @@ console.log('Bulk delete by tags result:', result);
```python Python
# Delete all memories in specific container tags
-result = client.memories.bulk_delete(
+result = client.documents.delete_bulk(
container_tags=['user-123', 'project-old', 'archived-content']
)
@@ -329,7 +329,7 @@ For applications requiring audit trails or recovery mechanisms, implement soft d
```typescript Typescript
// Soft delete pattern using metadata
-await client.memories.update('memory_id', {
+await client.documents.update('memory_id', {
metadata: {
deleted: true,
deletedAt: new Date().toISOString(),
@@ -338,40 +338,40 @@ await client.memories.update('memory_id', {
});
// Filter out deleted memories in searches
-const activeMemories = await client.memories.list({
- filters: JSON.stringify({
+const activeMemories = await client.documents.list({
+ filters: {
AND: [
{ key: "deleted", value: "true", negate: true }
]
- })
+ }
});
-console.log('Active memories:', activeMemories.results.length);
+console.log('Active memories:', activeMemories.memories.length);
```
```python Python
from datetime import datetime
-import json
# Soft delete pattern using metadata
-client.memories.update('memory_id', {
- 'metadata': {
+client.documents.update(
+ 'memory_id',
+ metadata={
'deleted': True,
'deletedAt': datetime.now().isoformat(),
'deletedBy': 'user_123'
}
-})
+)
# Filter out deleted memories
-active_memories = client.memories.list(
- filters=json.dumps({
+active_memories = client.documents.list(
+ filters={
"AND": [
{"key": "deleted", "value": "true", "negate": True}
]
- })
+ }
)
-print(f'Active memories: {len(active_memories.results)}')
+print(f'Active memories: {len(active_memories.memories)}')
```
```bash cURL
@@ -407,7 +407,7 @@ async function batchDeleteMemories(memoryIds: string[], batchSize = 100) {
console.log(`Processing batch ${Math.floor(i/batchSize) + 1} of ${Math.ceil(memoryIds.length/batchSize)}`);
try {
- const result = await client.memories.bulkDelete({ ids: batch });
+ const result = await client.documents.deleteBulk({ ids: batch });
results.push(result);
// Brief delay between batches to avoid rate limiting
@@ -446,7 +446,7 @@ def batch_delete_memories(memory_ids, batch_size=100):
print(f'Processing batch {batch_num} of {total_batches}')
try:
- result = client.memories.bulk_delete(ids=batch)
+ result = client.documents.delete_bulk(ids=batch)
results.append(result)
# Brief delay between batches to avoid rate limiting
diff --git a/apps/docs/user-profiles.mdx b/apps/docs/user-profiles.mdx
new file mode 100644
index 00000000..ec9ff57d
--- /dev/null
+++ b/apps/docs/user-profiles.mdx
@@ -0,0 +1,266 @@
+---
+title: "User Profiles"
+sidebarTitle: "User Profiles"
+description: "Fetch and use automatically maintained user context"
+icon: "user"
+---
+
+User profiles are extremely short summaries of context about an entity (Usually a user, but can be anything) which includes both the *static* facts about them, as well as a few recent episodes.
+
+> You can think of these as a dynamic compaction that's done by supermemory in real-time.
+
+This profile should be injected into the agent context for truly personalized experiences. To read more, visit [User profiles - Concept](/concepts/user-profiles)
+
+Get a user's profile — their static facts and dynamic context — with a single API call.
+
+<Tip>
+Profiles are built automatically as you [ingest content](/add-memories). No setup required.
+</Tip>
+
+## Quick Start
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ import Supermemory from 'supermemory';
+
+ const client = new Supermemory();
+
+ const { profile } = await client.profile({
+ containerTag: "user_123"
+ });
+
+ console.log(profile.static); // Long-term facts
+ console.log(profile.dynamic); // Recent context
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ from supermemory import Supermemory
+
+ client = Supermemory()
+
+ result = client.profile(container_tag="user_123")
+
+ print(result.profile.static) # Long-term facts
+ print(result.profile.dynamic) # Recent context
+ ```
+ </Tab>
+ <Tab title="cURL">
+ ```bash
+ curl -X POST "https://api.supermemory.ai/v4/profile" \
+ -H "Authorization: Bearer $SUPERMEMORY_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{"containerTag": "user_123"}'
+ ```
+ </Tab>
+</Tabs>
+
+**Response:**
+```json
+{
+ "profile": {
+ "static": [
+ "User is a software engineer",
+ "User specializes in Python and React",
+ "User prefers dark mode interfaces"
+ ],
+ "dynamic": [
+ "User is working on Project Alpha",
+ "User recently started learning Rust",
+ "User is debugging authentication issues"
+ ]
+ }
+}
+```
+
+---
+
+## Profile + Search
+
+Get profile and search results in one call by adding the `q` parameter:
+
+<Tabs>
+ <Tab title="TypeScript">
+ ```typescript
+ const result = await client.profile({
+ containerTag: "user_123",
+ q: "deployment errors"
+ });
+
+ // Profile data
+ const { static: facts, dynamic: context } = result.profile;
+
+ // Search results (only if q was provided)
+ const memories = result.searchResults?.results || [];
+ ```
+ </Tab>
+ <Tab title="Python">
+ ```python
+ result = client.profile(
+ container_tag="user_123",
+ q="deployment errors"
+ )
+
+ # Profile data
+ facts = result.profile.static
+ context = result.profile.dynamic
+
+ # Search results
+ memories = result.search_results.results if result.search_results else []
+ ```
+ </Tab>
+</Tabs>
+
+---
+
+## Parameters
+
+| Parameter | Type | Required | Description |
+|-----------|------|----------|-------------|
+| `containerTag` | string | Yes | User/project identifier |
+| `q` | string | No | Search query (includes search results in response) |
+| `threshold` | 0-1 | No | Filter search results by relevance score |
+
+---
+
+## Building Prompts
+
+The most common pattern — inject profile into your LLM's system prompt:
+
+```typescript
+async function chat(userId: string, message: string) {
+ const { profile } = await client.profile({ containerTag: userId });
+
+ const systemPrompt = `You are assisting a user.
+
+ABOUT THE USER:
+${profile.static?.join('\n') || 'No profile yet.'}
+
+CURRENT CONTEXT:
+${profile.dynamic?.join('\n') || 'No recent activity.'}
+
+Personalize responses to their expertise and preferences.`;
+
+ return llm.chat({
+ messages: [
+ { role: "system", content: systemPrompt },
+ { role: "user", content: message }
+ ]
+ });
+}
+```
+
+---
+
+## Full Context Pattern
+
+Get profile + query-specific memories in one call:
+
+```typescript
+async function getContext(userId: string, query: string) {
+ const result = await client.profile({
+ containerTag: userId,
+ q: query,
+ threshold: 0.6
+ });
+
+ return `
+User Background:
+${result.profile.static.join('\n')}
+
+Current Context:
+${result.profile.dynamic.join('\n')}
+
+Relevant Memories:
+${result.searchResults?.results.map(m => m.memory).join('\n') || 'None'}
+ `;
+}
+```
+
+---
+
+## Framework Examples
+
+<Accordion title="Express.js Middleware">
+ ```typescript
+ async function withProfile(req, res, next) {
+ if (!req.user?.id) return next();
+
+ try {
+ const { profile } = await client.profile({
+ containerTag: req.user.id
+ });
+ req.userProfile = profile;
+ } catch (e) {
+ req.userProfile = null;
+ }
+ next();
+ }
+
+ app.use(withProfile);
+
+ app.post('/chat', (req, res) => {
+ // req.userProfile available in all routes
+ });
+ ```
+</Accordion>
+
+<Accordion title="Next.js API Route">
+ ```typescript
+ // app/api/chat/route.ts
+ export async function POST(req: NextRequest) {
+ const { userId, message } = await req.json();
+
+ const { profile } = await client.profile({
+ containerTag: userId
+ });
+
+ const response = await generateResponse(message, profile);
+ return NextResponse.json({ response });
+ }
+ ```
+</Accordion>
+
+<Accordion title="AI SDK Integration">
+ ```typescript
+ import { withSupermemory } from "@supermemory/tools/ai-sdk"
+ import { openai } from "@ai-sdk/openai"
+
+ // Profiles automatically injected
+ const model = withSupermemory(openai("gpt-4"), "user-123")
+
+ const result = await generateText({
+ model,
+ messages: [{ role: "user", content: "Help with my project" }]
+ });
+ ```
+
+ See [AI SDK Integration](/integrations/ai-sdk) for details.
+</Accordion>
+
+---
+
+## Response Schema
+
+```typescript
+interface ProfileResponse {
+ profile: {
+ static: string[]; // Long-term facts
+ dynamic: string[]; // Recent context
+ };
+ searchResults?: { // Only if q parameter provided
+ results: SearchResult[];
+ total: number;
+ timing: number;
+ };
+}
+```
+
+---
+
+## Next Steps
+
+- [User Profiles Concept](/concepts/user-profiles) — Understand static vs dynamic
+- [Ingesting Content](/add-memories) — Build profiles by adding content
+- [AI SDK Integration](/integrations/ai-sdk) — Automatic profile injection
diff --git a/apps/docs/user-profiles/examples.mdx b/apps/docs/user-profiles/examples.mdx
index 496f905f..aa3b796b 100644
--- a/apps/docs/user-profiles/examples.mdx
+++ b/apps/docs/user-profiles/examples.mdx
@@ -362,6 +362,6 @@ const result = await generateText({
// Model automatically has access to user's profile!
```
-<Card title="AI SDK User Profiles" icon="triangle" href="/ai-sdk/user-profiles">
+<Card title="AI SDK User Profiles" icon="triangle" href="/integrations/ai-sdk">
Learn more about automatic profile injection with the AI SDK
</Card>
diff --git a/apps/docs/user-profiles/overview.mdx b/apps/docs/user-profiles/overview.mdx
index 80acd1c2..160807fa 100644
--- a/apps/docs/user-profiles/overview.mdx
+++ b/apps/docs/user-profiles/overview.mdx
@@ -126,7 +126,7 @@ User asks: **"Can you help me debug this?"**
<Card title="Code Examples" icon="laptop-code" href="/user-profiles/examples">
See complete integration examples
</Card>
- <Card title="AI SDK Integration" icon="triangle" href="/ai-sdk/user-profiles">
+ <Card title="AI SDK Integration" icon="triangle" href="/integrations/ai-sdk">
Use the AI SDK for automatic profile injection
</Card>
<Card title="Use Cases" icon="lightbulb" href="/user-profiles/use-cases">
diff --git a/apps/docs/vibe-coding.mdx b/apps/docs/vibe-coding.mdx
index 54b00321..bb5cd0e9 100644
--- a/apps/docs/vibe-coding.mdx
+++ b/apps/docs/vibe-coding.mdx
@@ -2,7 +2,7 @@
title: "Vibe Coding Setup"
description: "Automatic Supermemory integration using AI coding agents"
icon: "zap"
-sidebarTitle: "Automatic setup"
+sidebarTitle: "Install with AI"
---
Get your AI coding agent to integrate Supermemory in minutes. Copy the prompt below, paste it into Claude/GPT/Cursor, and let it do the work.
@@ -170,7 +170,7 @@ const { profile, searchResults } = await client.profile({
const context = `
Static facts: ${profile.static.join('\n')}
Recent context: ${profile.dynamic.join('\n')}
-${searchResults ? `Memories: ${searchResults.results.map(r => r.content).join('\n')}` : ''}
+${searchResults ? `Memories: ${searchResults.results.map(r => r.memory).join('\n')}` : ''}
`
// Send to LLM
@@ -180,7 +180,7 @@ const messages = [
]
// After LLM responds:
-await client.memories.add({
+await client.add({
content: `user: ${userMessage}\nassistant: ${response}`,
containerTag: userId
})
@@ -202,7 +202,7 @@ const results = await client.search({
})
// Build context
-const context = results.results.map(r => r.content).join('\n')
+const context = results.results.map(r => r.memory || r.chunk).join('\n')
// Send to LLM with context
const messages = [
@@ -211,7 +211,7 @@ const messages = [
]
// Store the conversation
-await client.memories.add({
+await client.add({
content: `user: ${userMessage}\nassistant: ${response}`,
containerTag: userId
})
@@ -372,11 +372,11 @@ The skill asks questions interactively and generates code for your specific setu
Manual integration guide
</Card>
- <Card title="User Profiles" icon="user" href="/user-profiles/overview">
+ <Card title="User Profiles" icon="user" href="/concepts/user-profiles">
Deep dive into profiles
</Card>
- <Card title="Search API" icon="search" href="/search/overview">
+ <Card title="Search API" icon="search" href="/search">
Search modes and parameters
</Card>
diff --git a/apps/docs/voice-realtime/pipecat.mdx b/apps/docs/voice-realtime/pipecat.mdx
index 6f100d65..3948a777 100644
--- a/apps/docs/voice-realtime/pipecat.mdx
+++ b/apps/docs/voice-realtime/pipecat.mdx
@@ -151,7 +151,7 @@ async def run_bot(websocket_client, user_id: str, session_id: str):
)
stt = OpenAISTTService(api_key=os.getenv("OPENAI_API_KEY"))
- llm = OpenAILLMService(api_key=os.getenv("OPENAI_API_KEY"), model="gpt-4o-mini")
+ llm = OpenAILLMService(api_key=os.getenv("OPENAI_API_KEY"), model="gpt-5-mini")
tts = OpenAITTSService(api_key=os.getenv("OPENAI_API_KEY"), voice="alloy")
# Supermemory memory service
diff --git a/apps/mcp/src/client.ts b/apps/mcp/src/client.ts
index 7112e451..cadfa734 100644
--- a/apps/mcp/src/client.ts
+++ b/apps/mcp/src/client.ts
@@ -76,7 +76,7 @@ export class SupermemoryClient {
content: string,
): Promise<{ id: string; status: string; containerTag: string }> {
try {
- const result = await this.client.memories.add({
+ const result = await this.client.add({
content,
containerTag: this.containerTag,
metadata: {