aboutsummaryrefslogtreecommitdiff
path: root/packages
diff options
context:
space:
mode:
authorCodeWithShreyans <[email protected]>2025-09-02 23:11:19 +0000
committerCodeWithShreyans <[email protected]>2025-09-02 23:11:19 +0000
commitcae7051d1a0547e78a8d32d865a89778456707ce (patch)
treeb5fbc018dc6e38b2618241046a8bf8129f34d4f2 /packages
parentux: support integration (#405) (diff)
downloadsupermemory-cae7051d1a0547e78a8d32d865a89778456707ce.tar.xz
supermemory-cae7051d1a0547e78a8d32d865a89778456707ce.zip
feat: new tools package (#407)
Diffstat (limited to 'packages')
-rw-r--r--packages/ai-sdk/package.json2
-rw-r--r--packages/ai-sdk/src/index.ts1
-rw-r--r--packages/ai-sdk/src/infinite-chat.test.ts365
-rw-r--r--packages/ai-sdk/src/infinite-chat.ts49
-rw-r--r--packages/openai-sdk-python/README.md131
-rw-r--r--packages/openai-sdk-python/pyproject.toml2
-rw-r--r--packages/openai-sdk-python/src/__init__.py20
-rw-r--r--packages/openai-sdk-python/src/infinite_chat.py268
-rw-r--r--packages/openai-sdk-python/tests/test_infinite_chat.py387
-rw-r--r--packages/openai-sdk-ts/README.md314
-rw-r--r--packages/openai-sdk-ts/src/index.ts2
-rw-r--r--packages/openai-sdk-ts/src/infinite-chat.test.ts338
-rw-r--r--packages/openai-sdk-ts/src/infinite-chat.ts133
-rw-r--r--packages/openai-sdk-ts/src/tools.test.ts297
-rw-r--r--packages/openai-sdk-ts/src/tools.ts299
-rw-r--r--packages/tools/.npmignore (renamed from packages/openai-sdk-ts/.npmignore)2
-rw-r--r--packages/tools/README.md155
-rw-r--r--packages/tools/package.json (renamed from packages/openai-sdk-ts/package.json)25
-rw-r--r--packages/tools/src/ai-sdk.ts121
-rw-r--r--packages/tools/src/index.ts2
-rw-r--r--packages/tools/src/openai.ts276
-rw-r--r--packages/tools/src/shared.ts47
-rw-r--r--packages/tools/src/tools.test.ts274
-rw-r--r--packages/tools/src/types.ts9
-rw-r--r--packages/tools/tsconfig.json (renamed from packages/openai-sdk-ts/tsconfig.json)0
-rw-r--r--packages/tools/tsdown.config.ts (renamed from packages/openai-sdk-ts/tsdown.config.ts)6
26 files changed, 928 insertions, 2597 deletions
diff --git a/packages/ai-sdk/package.json b/packages/ai-sdk/package.json
index 58a49198..64fd2114 100644
--- a/packages/ai-sdk/package.json
+++ b/packages/ai-sdk/package.json
@@ -1,7 +1,7 @@
{
"name": "@supermemory/ai-sdk",
"type": "module",
- "version": "1.0.7",
+ "version": "1.0.8",
"scripts": {
"build": "tsdown",
"dev": "tsdown --watch",
diff --git a/packages/ai-sdk/src/index.ts b/packages/ai-sdk/src/index.ts
index b6962318..e419075c 100644
--- a/packages/ai-sdk/src/index.ts
+++ b/packages/ai-sdk/src/index.ts
@@ -1,2 +1 @@
-export * from "./infinite-chat"
export * from "./tools"
diff --git a/packages/ai-sdk/src/infinite-chat.test.ts b/packages/ai-sdk/src/infinite-chat.test.ts
deleted file mode 100644
index 92562b2e..00000000
--- a/packages/ai-sdk/src/infinite-chat.test.ts
+++ /dev/null
@@ -1,365 +0,0 @@
-import { generateText } from "ai"
-import { describe, expect, it } from "vitest"
-import z from "zod"
-import {
- createSupermemoryInfiniteChat,
- type SupermemoryInfiniteChatConfig,
-} from "./infinite-chat"
-
-import "dotenv/config"
-
-const providers = z.enum([
- "openai",
- "anthropic",
- "openrouter",
- "deepinfra",
- "groq",
- "google",
- "cloudflare",
-] satisfies SupermemoryInfiniteChatConfig["providerName"][])
-
-describe("createSupermemoryInfiniteChat", () => {
- // Required API keys - tests will fail if not provided
- const testApiKey = process.env.SUPERMEMORY_API_KEY
- const testProviderApiKey = process.env.PROVIDER_API_KEY
-
- if (!testApiKey) {
- throw new Error(
- "SUPERMEMORY_API_KEY environment variable is required for tests",
- )
- }
- if (!testProviderApiKey) {
- throw new Error(
- "PROVIDER_API_KEY environment variable is required for tests",
- )
- }
-
- // Optional configuration with defaults
- const testProviderName = providers.parse(
- process.env.PROVIDER_NAME ?? "openai",
- )
- const testProviderUrl = process.env.PROVIDER_URL
- const testModelName = process.env.MODEL_NAME || "gpt-5-mini"
- const testHeaders = { "custom-header": "test-value" }
-
- // Validate provider configuration - either name OR URL, not both
- if (testProviderUrl && process.env.PROVIDER_NAME) {
- throw new Error(
- "Cannot specify both PROVIDER_NAME and PROVIDER_URL - use one or the other",
- )
- }
-
- // Test prompts and inputs
- const testPrompts = [
- "Hello, how are you?",
- "What is 2 + 2?",
- "Write a short poem about AI",
- "Explain quantum computing in simple terms",
- "What can you help me with today?",
- ]
-
- const testMessages = [
- [{ role: "user" as const, content: "Hello!" }],
- [
- { role: "system" as const, content: "You are a helpful assistant." },
- { role: "user" as const, content: "What is AI?" },
- ],
- [
- { role: "user" as const, content: "Tell me a joke" },
- {
- role: "assistant" as const,
- content:
- "Why don't scientists trust atoms? Because they make up everything!",
- },
- { role: "user" as const, content: "Tell me another one" },
- ],
- ]
-
- describe("client creation", () => {
- it("should create client with configured provider", () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: testHeaders,
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: testHeaders,
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- expect(client).toBeDefined()
- expect(typeof client).toBe("function")
- })
-
- it("should create client with openai provider configuration", () => {
- const config: SupermemoryInfiniteChatConfig = {
- providerName: "openai",
- providerApiKey: testProviderApiKey,
- headers: testHeaders,
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- expect(client).toBeDefined()
- expect(typeof client).toBe("function")
- })
-
- it("should create client with anthropic provider configuration", () => {
- const config: SupermemoryInfiniteChatConfig = {
- providerName: "anthropic",
- providerApiKey: testProviderApiKey,
- headers: testHeaders,
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- expect(client).toBeDefined()
- expect(typeof client).toBe("function")
- })
-
- it("should create client with custom provider URL", () => {
- const customUrl = "https://custom-provider.com/v1/chat"
- const config: SupermemoryInfiniteChatConfig = {
- providerUrl: customUrl,
- providerApiKey: testProviderApiKey,
- headers: testHeaders,
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- expect(client).toBeDefined()
- expect(typeof client).toBe("function")
- })
- })
-
- describe("AI SDK integration", () => {
- it("should generate text with simple prompt", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- console.log(client(testModelName))
-
- const result = await generateText({
- model: client(testModelName),
- prompt: testPrompts[0], // "Hello, how are you?"
- })
-
- expect(result).toBeDefined()
- expect(result.text).toBeDefined()
- expect(typeof result.text).toBe("string")
- expect(result.text.length).toBeGreaterThan(0)
- })
-
- it("should generate text with messages array", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- const result = await generateText({
- model: client(testModelName),
- messages: testMessages[1], // System + user messages
- })
-
- expect(result).toBeDefined()
- expect(result.text).toBeDefined()
- expect(typeof result.text).toBe("string")
- expect(result.text.length).toBeGreaterThan(0)
- })
-
- it("should handle conversation history", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- const result = await generateText({
- model: client(testModelName),
- messages: testMessages[2], // Multi-turn conversation
- })
-
- expect(result).toBeDefined()
- expect(result.text).toBeDefined()
- expect(typeof result.text).toBe("string")
- expect(result.text.length).toBeGreaterThan(0)
- })
-
- it("should work with different prompt variations", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- // Test multiple prompts
- for (const prompt of testPrompts.slice(0, 3)) {
- const result = await generateText({
- model: client(testModelName),
- prompt,
- })
-
- expect(result).toBeDefined()
- expect(result.text).toBeDefined()
- expect(typeof result.text).toBe("string")
- expect(result.text.length).toBeGreaterThan(0)
- }
- })
-
- it("should work with configured and alternate models", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- const modelsToTest = [testModelName]
- // Add alternate model for OpenAI
- if (testProviderName === "openai" && !testProviderUrl) {
- modelsToTest.push("gpt-4o-mini")
- }
-
- for (const modelName of modelsToTest) {
- const result = await generateText({
- model: client(modelName),
- prompt: "Say hello in one word",
- })
-
- expect(result).toBeDefined()
- expect(result.text).toBeDefined()
- expect(typeof result.text).toBe("string")
- }
- })
-
- it("should work with custom headers", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {
- "x-custom-header": "test-value",
- },
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {
- "x-custom-header": "test-value",
- },
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- const result = await generateText({
- model: client(testModelName),
- prompt: "Hello",
- })
-
- expect(result).toBeDefined()
- expect(result.text).toBeDefined()
- expect(typeof result.text).toBe("string")
- })
- })
-
- describe("configuration validation", () => {
- it("should handle empty headers object", () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- expect(client).toBeDefined()
- })
-
- it("should handle configuration with custom headers", () => {
- const customHeaders = {
- authorization: "Bearer custom-token",
- "x-custom": "custom-value",
- }
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: customHeaders,
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: customHeaders,
- }
-
- const client = createSupermemoryInfiniteChat(testApiKey, config)
-
- expect(client).toBeDefined()
- })
-
- it("should handle different API keys", () => {
- const config: SupermemoryInfiniteChatConfig = {
- providerName: "openai",
- providerApiKey: "different-provider-key",
- headers: {},
- }
-
- const client = createSupermemoryInfiniteChat("different-sm-key", config)
-
- expect(client).toBeDefined()
- })
- })
-})
diff --git a/packages/ai-sdk/src/infinite-chat.ts b/packages/ai-sdk/src/infinite-chat.ts
deleted file mode 100644
index 50e8bb0e..00000000
--- a/packages/ai-sdk/src/infinite-chat.ts
+++ /dev/null
@@ -1,49 +0,0 @@
-import { createOpenAI } from "@ai-sdk/openai"
-
-interface SupermemoryInfiniteChatConfigBase {
- providerApiKey: string
- headers: Record<string, string>
-}
-
-interface SupermemoryInfiniteChatConfigWithProviderName
- extends SupermemoryInfiniteChatConfigBase {
- providerName: keyof typeof providerMap
- providerUrl?: never
-}
-
-interface SupermemoryInfiniteChatConfigWithProviderUrl
- extends SupermemoryInfiniteChatConfigBase {
- providerUrl: string
- providerName?: never
-}
-
-export type SupermemoryInfiniteChatConfig =
- | SupermemoryInfiniteChatConfigWithProviderName
- | SupermemoryInfiniteChatConfigWithProviderUrl
-
-type SupermemoryApiKey = string
-
-const providerMap = {
- openai: "https://api.openai.com/v1",
- anthropic: "https://api.anthropic.com/v1",
- openrouter: "https://openrouter.ai/api/v1",
- deepinfra: "https://api.deepinfra.com/v1/openai",
- groq: "https://api.groq.com/openai/v1",
- google: "https://generativelanguage.googleapis.com/v1beta/openai",
- cloudflare: "https://gateway.ai.cloudflare.com/v1/*/unlimited-context/openai",
-} as const
-
-export const createSupermemoryInfiniteChat = (
- apiKey: SupermemoryApiKey,
- config?: SupermemoryInfiniteChatConfig,
-) =>
- createOpenAI({
- apiKey: config?.providerApiKey,
- baseURL: config?.providerName
- ? providerMap[config.providerName]
- : config?.providerUrl,
- headers: {
- "x-supermemory-api-key": apiKey,
- ...config?.headers,
- },
- }).chat
diff --git a/packages/openai-sdk-python/README.md b/packages/openai-sdk-python/README.md
index fdc03b54..1f4154bc 100644
--- a/packages/openai-sdk-python/README.md
+++ b/packages/openai-sdk-python/README.md
@@ -1,17 +1,8 @@
# Supermemory OpenAI Python SDK
-Enhanced OpenAI Python SDK with Supermemory infinite context integration.
+Memory tools for OpenAI function calling with Supermemory integration.
-This package extends the official [OpenAI Python SDK](https://github.com/openai/openai-python) with [Supermemory](https://supermemory.ai) capabilities, enabling infinite context chat completions and memory management tools.
-
-## Features
-
-- 🚀 **Infinite Context**: Chat completions with unlimited conversation history
-- 🧠 **Memory Tools**: Search, add, and fetch user memories seamlessly
-- 🔌 **Multiple Providers**: Support for OpenAI, Anthropic, Groq, and more
-- 🛠 **Function Calling**: Built-in memory tools for OpenAI function calling
-- 🔒 **Type Safe**: Full TypeScript-style type hints for Python
-- âš¡ **Async Support**: Full async/await support
+This package provides memory management tools for the official [OpenAI Python SDK](https://github.com/openai/openai-python) using [Supermemory](https://supermemory.ai) capabilities.
## Installation
@@ -29,58 +20,26 @@ pip install supermemory-openai
## Quick Start
-### Basic Chat Completion
+### Using Memory Tools with OpenAI
```python
import asyncio
-from supermemory_openai import SupermemoryOpenAI, SupermemoryInfiniteChatConfigWithProviderName
+import openai
+from supermemory_openai import SupermemoryTools, execute_memory_tool_calls
async def main():
- # Initialize client
- client = SupermemoryOpenAI(
- supermemory_api_key="your-supermemory-api-key",
- config=SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key="your-openai-api-key",
- )
- )
-
- # Create chat completion
- response = await client.chat_completion(
- messages=[
- {"role": "user", "content": "Hello, how are you?"}
- ],
- model="gpt-4o"
- )
-
- print(response.choices[0].message.content)
-
-asyncio.run(main())
-```
-
-### Using Memory Tools
-
-```python
-import asyncio
-from supermemory_openai import SupermemoryOpenAI, SupermemoryTools, SupermemoryInfiniteChatConfigWithProviderName
-
-async def main():
- # Initialize client and tools
- client = SupermemoryOpenAI(
- supermemory_api_key="your-supermemory-api-key",
- config=SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key="your-openai-api-key",
- )
- )
+ # Initialize OpenAI client
+ client = openai.AsyncOpenAI(api_key="your-openai-api-key")
+ # Initialize Supermemory tools
tools = SupermemoryTools(
api_key="your-supermemory-api-key",
config={"project_id": "my-project"}
)
# Chat with memory tools
- response = await client.chat_completion(
+ response = await client.chat.completions.create(
+ model="gpt-4o",
messages=[
{
"role": "system",
@@ -91,10 +50,18 @@ async def main():
"content": "Remember that I prefer tea over coffee"
}
],
- tools=tools.get_tool_definitions(),
- model="gpt-4o"
+ tools=tools.get_tool_definitions()
)
+ # Handle tool calls if present
+ if response.choices[0].message.tool_calls:
+ tool_results = await execute_memory_tool_calls(
+ api_key="your-supermemory-api-key",
+ tool_calls=response.choices[0].message.tool_calls,
+ config={"project_id": "my-project"}
+ )
+ print("Tool results:", tool_results)
+
print(response.choices[0].message.content)
asyncio.run(main())
@@ -102,42 +69,6 @@ asyncio.run(main())
## Configuration
-### Provider Configuration
-
-#### Using Provider Names
-
-```python
-from supermemory_openai import SupermemoryInfiniteChatConfigWithProviderName
-
-config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai", # or "anthropic", "groq", "openrouter", etc.
- provider_api_key="your-provider-api-key",
- headers={"custom-header": "value"} # optional
-)
-```
-
-#### Using Custom URLs
-
-```python
-from supermemory_openai import SupermemoryInfiniteChatConfigWithProviderUrl
-
-config = SupermemoryInfiniteChatConfigWithProviderUrl(
- provider_url="https://your-custom-endpoint.com/v1",
- provider_api_key="your-provider-api-key",
- headers={"custom-header": "value"} # optional
-)
-```
-
-### Supported Providers
-
-- `openai` - OpenAI API
-- `anthropic` - Anthropic Claude
-- `openrouter` - OpenRouter
-- `deepinfra` - DeepInfra
-- `groq` - Groq
-- `google` - Google AI
-- `cloudflare` - Cloudflare Workers AI
-
## Memory Tools
### SupermemoryTools Class
@@ -205,24 +136,6 @@ if response.choices[0].message.tool_calls:
## API Reference
-### SupermemoryOpenAI
-
-Enhanced OpenAI client with infinite context support.
-
-#### Constructor
-
-```python
-SupermemoryOpenAI(
- supermemory_api_key: str,
- config: Optional[SupermemoryInfiniteChatConfig] = None
-)
-```
-
-#### Methods
-
-- `chat_completion()` - Create chat completion with simplified interface
-- `create_chat_completion()` - Create chat completion with full OpenAI parameters
-
### SupermemoryTools
Memory management tools for function calling.
@@ -261,9 +174,7 @@ except Exception as e:
Set these environment variables for testing:
- `SUPERMEMORY_API_KEY` - Your Supermemory API key
-- `PROVIDER_API_KEY` - Your AI provider API key
-- `PROVIDER_NAME` - Provider name (default: "openai")
-- `PROVIDER_URL` - Custom provider URL (optional)
+- `OPENAI_API_KEY` - Your OpenAI API key
- `MODEL_NAME` - Model to use (default: "gpt-4o-mini")
- `SUPERMEMORY_BASE_URL` - Custom Supermemory base URL (optional)
diff --git a/packages/openai-sdk-python/pyproject.toml b/packages/openai-sdk-python/pyproject.toml
index 78ea3000..d674fec9 100644
--- a/packages/openai-sdk-python/pyproject.toml
+++ b/packages/openai-sdk-python/pyproject.toml
@@ -5,7 +5,7 @@ build-backend = "hatchling.build"
[project]
name = "supermemory-openai-sdk"
version = "1.0.0"
-description = "OpenAI SDK utilities for supermemory"
+description = "Memory tools for OpenAI function calling with supermemory"
readme = "README.md"
license = { text = "MIT" }
keywords = ["openai", "supermemory", "ai", "memory"]
diff --git a/packages/openai-sdk-python/src/__init__.py b/packages/openai-sdk-python/src/__init__.py
index 47a7569e..b8564471 100644
--- a/packages/openai-sdk-python/src/__init__.py
+++ b/packages/openai-sdk-python/src/__init__.py
@@ -1,14 +1,4 @@
-"""Supermemory OpenAI SDK - Enhanced OpenAI Python SDK with infinite context."""
-
-from .infinite_chat import (
- SupermemoryOpenAI,
- SupermemoryInfiniteChatConfig,
- SupermemoryInfiniteChatConfigWithProviderName,
- SupermemoryInfiniteChatConfigWithProviderUrl,
- ProviderName,
- PROVIDER_MAP,
- create_supermemory_openai,
-)
+"""Supermemory OpenAI SDK - Memory tools for OpenAI function calling."""
from .tools import (
SupermemoryTools,
@@ -29,14 +19,6 @@ from .tools import (
__version__ = "0.1.0"
__all__ = [
- # Infinite Chat
- "SupermemoryOpenAI",
- "SupermemoryInfiniteChatConfig",
- "SupermemoryInfiniteChatConfigWithProviderName",
- "SupermemoryInfiniteChatConfigWithProviderUrl",
- "ProviderName",
- "PROVIDER_MAP",
- "create_supermemory_openai",
# Tools
"SupermemoryTools",
"SupermemoryToolsConfig",
diff --git a/packages/openai-sdk-python/src/infinite_chat.py b/packages/openai-sdk-python/src/infinite_chat.py
deleted file mode 100644
index 1d3890ae..00000000
--- a/packages/openai-sdk-python/src/infinite_chat.py
+++ /dev/null
@@ -1,268 +0,0 @@
-"""Enhanced OpenAI client with Supermemory infinite context integration."""
-
-from typing import Dict, List, Optional, Union, overload, Unpack
-from typing_extensions import Literal
-
-from openai import OpenAI, AsyncStream
-from openai.types.chat import (
- ChatCompletion,
- ChatCompletionMessageParam,
- ChatCompletionToolParam,
- ChatCompletionToolChoiceOptionParam,
- CompletionCreateParams,
-)
-
-
-# Provider URL mapping
-PROVIDER_MAP = {
- "openai": "https://api.openai.com/v1",
- "anthropic": "https://api.anthropic.com/v1",
- "openrouter": "https://openrouter.ai/api/v1",
- "deepinfra": "https://api.deepinfra.com/v1/openai",
- "groq": "https://api.groq.com/openai/v1",
- "google": "https://generativelanguage.googleapis.com/v1beta/openai",
- "cloudflare": "https://gateway.ai.cloudflare.com/v1/*/unlimited-context/openai",
-}
-
-ProviderName = Literal[
- "openai", "anthropic", "openrouter", "deepinfra", "groq", "google", "cloudflare"
-]
-
-
-class SupermemoryInfiniteChatConfigBase:
- """Base configuration for Supermemory infinite chat."""
-
- def __init__(
- self,
- provider_api_key: str,
- headers: Optional[Dict[str, str]] = None,
- ):
- self.provider_api_key = provider_api_key
- self.headers = headers or {}
-
-
-class SupermemoryInfiniteChatConfigWithProviderName(SupermemoryInfiniteChatConfigBase):
- """Configuration using a predefined provider name."""
-
- def __init__(
- self,
- provider_name: ProviderName,
- provider_api_key: str,
- headers: Optional[Dict[str, str]] = None,
- ):
- super().__init__(provider_api_key, headers)
- self.provider_name = provider_name
- self.provider_url: None = None
-
-
-class SupermemoryInfiniteChatConfigWithProviderUrl(SupermemoryInfiniteChatConfigBase):
- """Configuration using a custom provider URL."""
-
- def __init__(
- self,
- provider_url: str,
- provider_api_key: str,
- headers: Optional[Dict[str, str]] = None,
- ):
- super().__init__(provider_api_key, headers)
- self.provider_url = provider_url
- self.provider_name: None = None
-
-
-SupermemoryInfiniteChatConfig = Union[
- SupermemoryInfiniteChatConfigWithProviderName,
- SupermemoryInfiniteChatConfigWithProviderUrl,
-]
-
-
-class SupermemoryOpenAI(OpenAI):
- """Enhanced OpenAI client with supermemory integration.
-
- Only chat completions are supported - all other OpenAI API endpoints are disabled.
- """
-
- def __init__(
- self,
- supermemory_api_key: str,
- config: Optional[SupermemoryInfiniteChatConfig] = None,
- ):
- """Initialize the SupermemoryOpenAI client.
-
- Args:
- supermemory_api_key: API key for Supermemory service
- config: Configuration for the AI provider
- """
- # Determine base URL
- if config is None:
- base_url = "https://api.openai.com/v1"
- api_key = None
- headers = {}
- elif hasattr(config, "provider_name") and config.provider_name:
- base_url = PROVIDER_MAP[config.provider_name]
- api_key = config.provider_api_key
- headers = config.headers
- else:
- base_url = config.provider_url
- api_key = config.provider_api_key
- headers = config.headers
-
- # Prepare default headers
- default_headers = {
- "x-supermemory-api-key": supermemory_api_key,
- **headers,
- }
-
- # Initialize the parent OpenAI client
- super().__init__(
- api_key=api_key,
- base_url=base_url,
- default_headers=default_headers,
- )
-
- self._supermemory_api_key = supermemory_api_key
-
- # Disable unsupported endpoints
- self._disable_unsupported_endpoints()
-
- def _disable_unsupported_endpoints(self) -> None:
- """Disable all OpenAI endpoints except chat completions."""
-
- def unsupported_error() -> None:
- raise RuntimeError(
- "Supermemory only supports chat completions. "
- "Use chat_completion() or chat.completions.create() instead."
- )
-
- # List of endpoints to disable
- endpoints = [
- "embeddings",
- "fine_tuning",
- "images",
- "audio",
- "models",
- "moderations",
- "files",
- "batches",
- "uploads",
- "beta",
- ]
-
- # Override endpoints with error function
- for endpoint in endpoints:
- setattr(self, endpoint, property(lambda self: unsupported_error()))
-
- async def create_chat_completion(
- self,
- **params: Unpack[CompletionCreateParams],
- ) -> ChatCompletion:
- """Create chat completions with infinite context support.
-
- Args:
- **params: Parameters for chat completion
-
- Returns:
- ChatCompletion response
- """
- return await self.chat.completions.create(**params)
-
- @overload
- async def chat_completion(
- self,
- messages: List[ChatCompletionMessageParam],
- *,
- model: Optional[str] = None,
- temperature: Optional[float] = None,
- max_tokens: Optional[int] = None,
- tools: Optional[List[ChatCompletionToolParam]] = None,
- tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
- stream: Literal[False] = False,
- **kwargs: Unpack[CompletionCreateParams],
- ) -> ChatCompletion: ...
-
- @overload
- async def chat_completion(
- self,
- messages: List[ChatCompletionMessageParam],
- *,
- model: Optional[str] = None,
- temperature: Optional[float] = None,
- max_tokens: Optional[int] = None,
- tools: Optional[List[ChatCompletionToolParam]] = None,
- tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
- stream: Literal[True],
- **kwargs: Unpack[CompletionCreateParams],
- ) -> AsyncStream[ChatCompletion]: ...
-
- async def chat_completion(
- self,
- messages: List[ChatCompletionMessageParam],
- *,
- model: Optional[str] = None,
- temperature: Optional[float] = None,
- max_tokens: Optional[int] = None,
- tools: Optional[List[ChatCompletionToolParam]] = None,
- tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
- stream: bool = False,
- **kwargs: Unpack[CompletionCreateParams],
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletion]]:
- """Create chat completions with simplified interface.
-
- Args:
- messages: List of chat messages
- model: Model to use (defaults to gpt-4o)
- temperature: Sampling temperature
- max_tokens: Maximum tokens to generate
- tools: Available tools for function calling
- tool_choice: Tool choice strategy
- stream: Whether to stream the response
- **kwargs: Additional parameters
-
- Returns:
- ChatCompletion response or stream
- """
- params: Dict[
- str,
- Union[
- str,
- List[ChatCompletionMessageParam],
- List[ChatCompletionToolParam],
- ChatCompletionToolChoiceOptionParam,
- bool,
- float,
- int,
- ],
- ] = {
- "model": model or "gpt-4o",
- "messages": messages,
- **kwargs,
- }
-
- # Add optional parameters if provided
- if temperature is not None:
- params["temperature"] = temperature
- if max_tokens is not None:
- params["max_tokens"] = max_tokens
- if tools is not None:
- params["tools"] = tools
- if tool_choice is not None:
- params["tool_choice"] = tool_choice
- if stream is not None:
- params["stream"] = stream
-
- return await self.chat.completions.create(**params)
-
-
-def create_supermemory_openai(
- supermemory_api_key: str,
- config: Optional[SupermemoryInfiniteChatConfig] = None,
-) -> SupermemoryOpenAI:
- """Helper function to create a SupermemoryOpenAI instance.
-
- Args:
- supermemory_api_key: API key for Supermemory service
- config: Configuration for the AI provider
-
- Returns:
- SupermemoryOpenAI instance
- """
- return SupermemoryOpenAI(supermemory_api_key, config)
diff --git a/packages/openai-sdk-python/tests/test_infinite_chat.py b/packages/openai-sdk-python/tests/test_infinite_chat.py
deleted file mode 100644
index 9fdf52c5..00000000
--- a/packages/openai-sdk-python/tests/test_infinite_chat.py
+++ /dev/null
@@ -1,387 +0,0 @@
-"""Tests for infinite_chat module."""
-
-import os
-import pytest
-from typing import List
-
-from openai.types.chat import ChatCompletionMessageParam
-from ..src import (
- SupermemoryOpenAI,
- SupermemoryInfiniteChatConfigWithProviderName,
- SupermemoryInfiniteChatConfigWithProviderUrl,
- ProviderName,
-)
-
-
-# Test configuration
-PROVIDERS: List[ProviderName] = [
- "openai",
- "anthropic",
- "openrouter",
- "deepinfra",
- "groq",
- "google",
- "cloudflare",
-]
-
-
-def test_api_key() -> str:
- """Get test Supermemory API key from environment."""
- api_key = os.getenv("SUPERMEMORY_API_KEY")
- if not api_key:
- pytest.skip("SUPERMEMORY_API_KEY environment variable is required for tests")
- return api_key
-
-
-def test_provider_api_key() -> str:
- """Get test provider API key from environment."""
- api_key = os.getenv("PROVIDER_API_KEY")
- if not api_key:
- pytest.skip("PROVIDER_API_KEY environment variable is required for tests")
- return api_key
-
-
-def test_provider_name() -> ProviderName:
- """Get test provider name from environment."""
- provider_name = os.getenv("PROVIDER_NAME", "openai")
- if provider_name not in PROVIDERS:
- pytest.fail(f"Invalid provider name: {provider_name}")
- return provider_name # type: ignore
-
-
-def test_provider_url() -> str:
- """Get test provider URL from environment."""
- return os.getenv("PROVIDER_URL", "")
-
-
-def test_model_name() -> str:
- """Get test model name from environment."""
- return os.getenv("MODEL_NAME", "gpt-4o-mini")
-
-
-def test_headers() -> dict:
- """Get test headers."""
- return {"custom-header": "test-value"}
-
-
-def test_messages() -> List[List[ChatCompletionMessageParam]]:
- """Test message sets."""
- return [
- [{"role": "user", "content": "Hello!"}],
- [
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "What is AI?"},
- ],
- [
- {"role": "user", "content": "Tell me a joke"},
- {
- "role": "assistant",
- "content": "Why don't scientists trust atoms? Because they make up everything!",
- },
- {"role": "user", "content": "Tell me another one"},
- ],
- ]
-
-
-class TestClientCreation:
- """Test client creation."""
-
- def test_create_client_with_provider_name(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_headers: dict,
- ):
- """Test creating client with provider name configuration."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers=test_headers,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
- assert client.chat is not None
-
- def test_create_client_with_openai_provider(
- self, test_api_key: str, test_provider_api_key: str, test_headers: dict
- ):
- """Test creating client with OpenAI provider configuration."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key=test_provider_api_key,
- headers=test_headers,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
-
- def test_create_client_with_custom_provider_url(
- self, test_api_key: str, test_provider_api_key: str, test_headers: dict
- ):
- """Test creating client with custom provider URL."""
- custom_url = "https://custom-provider.com/v1"
- config = SupermemoryInfiniteChatConfigWithProviderUrl(
- provider_url=custom_url,
- provider_api_key=test_provider_api_key,
- headers=test_headers,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
-
-
-class TestChatCompletions:
- """Test chat completions functionality."""
-
- @pytest.mark.asyncio
- async def test_create_chat_completion_simple_message(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_model_name: str,
- test_messages: List[List[ChatCompletionMessageParam]],
- ):
- """Test creating chat completion with simple message."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- result = await client.create_chat_completion(
- model=test_model_name,
- messages=test_messages[0], # "Hello!"
- )
-
- assert result is not None
- assert hasattr(result, "choices")
- assert len(result.choices) > 0
- assert result.choices[0].message.content is not None
-
- @pytest.mark.asyncio
- async def test_chat_completion_convenience_method(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_model_name: str,
- test_messages: List[List[ChatCompletionMessageParam]],
- ):
- """Test chat completion using convenience method."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- result = await client.chat_completion(
- messages=test_messages[1], # System + user messages
- model=test_model_name,
- temperature=0.7,
- )
-
- assert result is not None
- assert hasattr(result, "choices")
- assert len(result.choices) > 0
- assert result.choices[0].message.content is not None
-
- @pytest.mark.asyncio
- async def test_handle_conversation_history(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_model_name: str,
- test_messages: List[List[ChatCompletionMessageParam]],
- ):
- """Test handling conversation history."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- result = await client.chat_completion(
- messages=test_messages[2], # Multi-turn conversation
- model=test_model_name,
- )
-
- assert result is not None
- assert hasattr(result, "choices")
- assert len(result.choices) > 0
- assert result.choices[0].message.content is not None
-
- @pytest.mark.asyncio
- async def test_custom_headers(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_model_name: str,
- ):
- """Test working with custom headers."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={"x-custom-header": "test-value"},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- result = await client.chat_completion(
- messages=[{"role": "user", "content": "Hello"}],
- model=test_model_name,
- )
-
- assert result is not None
- assert hasattr(result, "choices")
-
-
-class TestConfigurationValidation:
- """Test configuration validation."""
-
- def test_handle_empty_headers_object(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- ):
- """Test handling empty headers object."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
-
- def test_handle_configuration_without_headers(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- ):
- """Test handling configuration without headers."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
-
- def test_handle_different_api_keys(self):
- """Test handling different API keys."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key="different-provider-key",
- )
-
- client = SupermemoryOpenAI("different-sm-key", config)
-
- assert client is not None
-
-
-class TestDisabledEndpoints:
- """Test that non-chat endpoints are disabled."""
-
- def test_disabled_endpoints_throw_errors(
- self, test_api_key: str, test_provider_api_key: str
- ):
- """Test that all disabled endpoints throw appropriate errors."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key=test_provider_api_key,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- # Test that all disabled endpoints throw appropriate errors
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.embeddings
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.fine_tuning
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.images
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.audio
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.models
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.moderations
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.files
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.batches
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.uploads
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.beta
-
- def test_chat_completions_still_work(
- self, test_api_key: str, test_provider_api_key: str
- ):
- """Test that chat completions still work after disabling other endpoints."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key=test_provider_api_key,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- # Chat completions should still be accessible
- assert client.chat is not None
- assert client.chat.completions is not None
- assert callable(client.create_chat_completion)
- assert callable(client.chat_completion)
diff --git a/packages/openai-sdk-ts/README.md b/packages/openai-sdk-ts/README.md
deleted file mode 100644
index 5cf8e8bc..00000000
--- a/packages/openai-sdk-ts/README.md
+++ /dev/null
@@ -1,314 +0,0 @@
-# supermemory OpenAI SDK Utilities
-
-OpenAI JS/TS SDK utilities for supermemory
-
-## Installation
-
-```bash
-npm install @supermemory/openai-sdk
-# or
-bun add @supermemory/openai-sdk
-# or
-pnpm add @supermemory/openai-sdk
-# or
-yarn add @supermemory/openai-sdk
-```
-
-## Features
-
-Choose **one** of the following approaches (they cannot be used together):
-
-- **Infinite Chat Client**: Enhanced OpenAI client with unlimited context support
-- **Memory Tools**: Search, add, and fetch memories from supermemory using OpenAI function calling
-
-## Infinite Chat Client
-
-The infinite chat client provides an enhanced OpenAI client with supermemory's context management.
-
-```typescript
-import { SupermemoryOpenAI } from '@supermemory/openai-sdk'
-
-// Using a named provider
-const client = new SupermemoryOpenAI('your-supermemory-api-key', {
- providerName: 'openai',
- providerApiKey: 'your-openai-api-key',
- headers: {
- // Optional additional headers
- }
-})
-
-// Using a custom provider URL
-const client = new SupermemoryOpenAI('your-supermemory-api-key', {
- providerUrl: 'https://your-custom-provider.com/v1',
- providerApiKey: 'your-provider-api-key',
- headers: {
- // Optional additional headers
- }
-})
-
-const response = await client.chat.completions.create({
- model: 'gpt-4o',
- messages: [
- { role: 'user', content: 'Hello, how are you?' }
- ]
-})
-```
-
-### Complete Infinite Chat Example
-
-```typescript
-import { SupermemoryOpenAI } from '@supermemory/openai-sdk'
-
-const supermemoryApiKey = process.env.SUPERMEMORY_API_KEY!
-const openaiApiKey = process.env.OPENAI_API_KEY!
-
-// Initialize infinite chat client
-const client = new SupermemoryOpenAI(supermemoryApiKey, {
- providerName: 'openai',
- providerApiKey: openaiApiKey
-})
-
-async function chat(userMessage: string) {
- const response = await client.chatCompletion([
- {
- role: 'system',
- content: 'You are a helpful assistant with unlimited context.'
- },
- {
- role: 'user',
- content: userMessage
- }
- ], {
- model: 'gpt-4o'
- })
-
- return response.choices[0].message.content
-}
-```
-
-### Configuration
-
-```typescript
-// Option 1: Use a named provider
-interface ConfigWithProviderName {
- providerName: 'openai' | 'anthropic' | 'openrouter' | 'deepinfra' | 'groq' | 'google' | 'cloudflare'
- providerApiKey: string
- headers?: Record<string, string>
-}
-
-// Option 2: Use a custom provider URL
-interface ConfigWithProviderUrl {
- providerUrl: string
- providerApiKey: string
- headers?: Record<string, string>
-}
-```
-
-## Memory Tools
-
-supermemory tools allow OpenAI function calling to interact with user memories for enhanced context and personalization.
-
-```typescript
-import { SupermemoryTools, executeMemoryToolCalls } from '@supermemory/openai-sdk'
-import OpenAI from 'openai'
-
-const openai = new OpenAI({ apiKey: 'your-openai-api-key' })
-
-const response = await openai.chat.completions.create({
- model: 'gpt-4o',
- messages: [
- { role: 'user', content: 'What do you remember about my preferences?' }
- ],
- tools: new SupermemoryTools('your-supermemory-api-key', {
- // Optional: specify a base URL for self-hosted instances
- baseUrl: 'https://api.supermemory.com',
-
- // Use either projectId OR containerTags, not both
- projectId: 'your-project-id',
- // OR
- containerTags: ['tag1', 'tag2']
-}).getToolDefinitions()
-})
-```
-
-### Complete Memory Tools Example
-
-```typescript
-import { SupermemoryTools, executeMemoryToolCalls } from '@supermemory/openai-sdk'
-import OpenAI from 'openai'
-
-const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY! })
-const supermemoryApiKey = process.env.SUPERMEMORY_API_KEY!
-
-async function chatWithTools(userMessage: string) {
- const tools = new SupermemoryTools(supermemoryApiKey, {
- projectId: 'my-project'
- })
-
- const response = await openai.chat.completions.create({
- model: 'gpt-4o',
- messages: [
- {
- role: 'system',
- content: 'You are a helpful assistant with access to user memories.'
- },
- {
- role: 'user',
- content: userMessage
- }
- ],
- tools: tools.getToolDefinitions()
- })
-
- // Handle tool calls if present
- if (response.choices[0].message.tool_calls) {
- const toolResults = await executeMemoryToolCalls(
- supermemoryApiKey,
- response.choices[0].message.tool_calls,
- { projectId: 'my-project' }
- )
-
- // Continue conversation with tool results...
- }
-
- return response.choices[0].message.content
-}
-```
-
-### Configuration
-
-```typescript
-interface SupermemoryToolsConfig {
- // Optional: Base URL for API calls (default: https://api.supermemory.com)
- baseUrl?: string
-
- // Container tags for organizing memories (cannot be used with projectId)
- containerTags?: string[]
-
- // Project ID for scoping memories (cannot be used with containerTags)
- projectId?: string
-}
-```
-
-### Self-Hosted supermemory
-
-If you're running a self-hosted supermemory instance:
-
-```typescript
-const tools = new SupermemoryTools('your-api-key', {
- baseUrl: 'https://your-supermemory-instance.com',
- containerTags: ['production', 'user-memories']
-})
-```
-
-### Available Tools
-
-##### Search Memories
-
-Search through user memories using semantic matching.
-
-```typescript
-const searchResult = await tools.searchMemories({
- informationToGet: 'user preferences about coffee'
-})
-```
-
-##### Add Memory
-
-Add new memories to the user's memory store.
-
-```typescript
-const addResult = await tools.addMemory({
- memory: 'User prefers dark roast coffee in the morning'
-})
-```
-
-##### Fetch Memory
-
-Retrieve a specific memory by its ID.
-
-```typescript
-const fetchResult = await tools.fetchMemory({
- memoryId: 'memory-id-123'
-})
-```
-
-### Using Individual Tools
-
-For more flexibility, you can import and use individual tools:
-
-```typescript
-import {
- createSearchMemoriesTool,
- createAddMemoryTool,
- createFetchMemoryTool
-} from '@supermemory/openai-sdk'
-
-const searchTool = createSearchMemoriesTool('your-api-key', {
- projectId: 'your-project-id'
-})
-
-// Use only the search tool
-const response = await openai.chat.completions.create({
- model: 'gpt-4o',
- messages: [...],
- tools: [searchTool.definition]
-})
-```
-
-### Error Handling
-
-All tool executions return a result object with a `success` field:
-
-```typescript
-const result = await tools.searchMemories({
- informationToGet: 'user preferences'
-})
-
-if (result.success) {
- console.log('Found memories:', result.results)
- console.log('Total count:', result.count)
-} else {
- console.error('Error searching memories:', result.error)
-}
-```
-
-## Development
-
-### Running Tests
-
-```bash
-# Run all tests
-bun test
-
-# Run tests in watch mode
-bun test --watch
-```
-
-#### Environment Variables for Tests
-
-All tests require API keys to run. Copy `.env.example` to `.env` and set the required values:
-
-```bash
-cp .env.example .env
-```
-
-**Required:**
-- `SUPERMEMORY_API_KEY`: Your Supermemory API key
-- `PROVIDER_API_KEY`: Your AI provider API key (OpenAI, etc.)
-
-**Optional:**
-- `SUPERMEMORY_BASE_URL`: Custom Supermemory base URL (defaults to `https://api.supermemory.ai`)
-- `PROVIDER_NAME`: Provider name (defaults to `openai`) - one of: `openai`, `anthropic`, `openrouter`, `deepinfra`, `groq`, `google`, `cloudflare`
-- `PROVIDER_URL`: Custom provider URL (use instead of `PROVIDER_NAME`)
-- `MODEL_NAME`: Model to use in tests (defaults to `gpt-4o-mini`)
-
-Tests will fail if required API keys are not provided.
-
-## License
-
-MIT
-
-## Support
-
-Email our [24/7 Founder/CEO/Support Executive](mailto:[email protected]) \ No newline at end of file
diff --git a/packages/openai-sdk-ts/src/index.ts b/packages/openai-sdk-ts/src/index.ts
deleted file mode 100644
index b6962318..00000000
--- a/packages/openai-sdk-ts/src/index.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-export * from "./infinite-chat"
-export * from "./tools"
diff --git a/packages/openai-sdk-ts/src/infinite-chat.test.ts b/packages/openai-sdk-ts/src/infinite-chat.test.ts
deleted file mode 100644
index f77d5285..00000000
--- a/packages/openai-sdk-ts/src/infinite-chat.test.ts
+++ /dev/null
@@ -1,338 +0,0 @@
-import { describe, expect, it } from "vitest"
-import z from "zod"
-import type OpenAI from "openai"
-import {
- SupermemoryOpenAI,
- type SupermemoryInfiniteChatConfig,
-} from "./infinite-chat"
-
-import "dotenv/config"
-
-const providers = z.enum([
- "openai",
- "anthropic",
- "openrouter",
- "deepinfra",
- "groq",
- "google",
- "cloudflare",
-] satisfies SupermemoryInfiniteChatConfig["providerName"][])
-
-describe("SupermemoryOpenAI", () => {
- // Required API keys - tests will fail if not provided
- const testApiKey = process.env.SUPERMEMORY_API_KEY
- const testProviderApiKey = process.env.PROVIDER_API_KEY
-
- if (!testApiKey) {
- throw new Error(
- "SUPERMEMORY_API_KEY environment variable is required for tests",
- )
- }
- if (!testProviderApiKey) {
- throw new Error(
- "PROVIDER_API_KEY environment variable is required for tests",
- )
- }
-
- // Optional configuration with defaults
- const testProviderName = providers.parse(
- process.env.PROVIDER_NAME ?? "openai",
- )
- const testProviderUrl = process.env.PROVIDER_URL
- const testModelName = process.env.MODEL_NAME || "gpt-4o-mini"
- const testHeaders = { "custom-header": "test-value" }
-
- // Validate provider configuration - either name OR URL, not both
- if (testProviderUrl && process.env.PROVIDER_NAME) {
- throw new Error(
- "Cannot specify both PROVIDER_NAME and PROVIDER_URL - use one or the other",
- )
- }
-
- // Test prompts
- const testMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[][] = [
- [{ role: "user", content: "Hello!" }],
- [
- { role: "system", content: "You are a helpful assistant." },
- { role: "user", content: "What is AI?" },
- ],
- [
- { role: "user", content: "Tell me a joke" },
- {
- role: "assistant",
- content:
- "Why don't scientists trust atoms? Because they make up everything!",
- },
- { role: "user", content: "Tell me another one" },
- ],
- ]
-
- describe("client creation", () => {
- it("should create client with SupermemoryOpenAI class", () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: testHeaders,
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: testHeaders,
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- expect(client).toBeDefined()
- expect(client.chat).toBeDefined()
- })
-
- it("should create client with openai provider configuration", () => {
- const config: SupermemoryInfiniteChatConfig = {
- providerName: "openai",
- providerApiKey: testProviderApiKey,
- headers: testHeaders,
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- expect(client).toBeDefined()
- })
-
- it("should create client with custom provider URL", () => {
- const customUrl = "https://custom-provider.com/v1"
- const config: SupermemoryInfiniteChatConfig = {
- providerUrl: customUrl,
- providerApiKey: testProviderApiKey,
- headers: testHeaders,
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- expect(client).toBeDefined()
- })
- })
-
- describe("chat completions", () => {
- it("should create chat completion with simple message", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- const result = await client.createChatCompletion({
- model: testModelName,
- messages: testMessages[0]!, // "Hello!"
- })
-
- expect(result).toBeDefined()
- expect("choices" in result).toBe(true)
- if ("choices" in result) {
- expect(result.choices).toBeDefined()
- expect(result.choices.length).toBeGreaterThan(0)
- expect(result.choices[0]!.message.content).toBeDefined()
- }
- })
-
- it("should create chat completion using convenience method", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- const result = await client.chatCompletion(testMessages[1]!, {
- model: testModelName,
- temperature: 0.7,
- })
-
- expect(result).toBeDefined()
- expect("choices" in result).toBe(true)
- if ("choices" in result) {
- expect(result.choices).toBeDefined()
- expect(result.choices.length).toBeGreaterThan(0)
- expect(result.choices[0]!.message.content).toBeDefined()
- }
- })
-
- it("should handle conversation history", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- const result = await client.chatCompletion(testMessages[2]!, {
- model: testModelName,
- })
-
- expect(result).toBeDefined()
- expect("choices" in result).toBe(true)
- if ("choices" in result) {
- expect(result.choices).toBeDefined()
- expect(result.choices.length).toBeGreaterThan(0)
- expect(result.choices[0]!.message.content).toBeDefined()
- }
- })
-
- it("should work with custom headers", async () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {
- "x-custom-header": "test-value",
- },
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {
- "x-custom-header": "test-value",
- },
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- const result = await client.chatCompletion(
- [{ role: "user", content: "Hello" }],
- {
- model: testModelName,
- },
- )
-
- expect(result).toBeDefined()
- expect("choices" in result).toBe(true)
- })
- })
-
- describe("configuration validation", () => {
- it("should handle empty headers object", () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- headers: {},
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- expect(client).toBeDefined()
- })
-
- it("should handle configuration without headers", () => {
- const config: SupermemoryInfiniteChatConfig = testProviderUrl
- ? {
- providerUrl: testProviderUrl,
- providerApiKey: testProviderApiKey,
- }
- : {
- providerName: testProviderName,
- providerApiKey: testProviderApiKey,
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- expect(client).toBeDefined()
- })
-
- it("should handle different API keys", () => {
- const config: SupermemoryInfiniteChatConfig = {
- providerName: "openai",
- providerApiKey: "different-provider-key",
- }
-
- const client = new SupermemoryOpenAI("different-sm-key", config)
-
- expect(client).toBeDefined()
- })
- })
-
- describe("disabled endpoints", () => {
- it("should throw errors for disabled OpenAI endpoints", () => {
- const config: SupermemoryInfiniteChatConfig = {
- providerName: "openai",
- providerApiKey: testProviderApiKey,
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- // Test that all disabled endpoints throw appropriate errors
- expect(() => client.embeddings).toThrow(
- "Supermemory only supports chat completions",
- )
- expect(() => client.fineTuning).toThrow(
- "Supermemory only supports chat completions",
- )
- expect(() => client.images).toThrow(
- "Supermemory only supports chat completions",
- )
- expect(() => client.audio).toThrow(
- "Supermemory only supports chat completions",
- )
- expect(() => client.models).toThrow(
- "Supermemory only supports chat completions",
- )
- expect(() => client.moderations).toThrow(
- "Supermemory only supports chat completions",
- )
- expect(() => client.files).toThrow(
- "Supermemory only supports chat completions",
- )
- expect(() => client.batches).toThrow(
- "Supermemory only supports chat completions",
- )
- expect(() => client.uploads).toThrow(
- "Supermemory only supports chat completions",
- )
- expect(() => client.beta).toThrow(
- "Supermemory only supports chat completions",
- )
- })
-
- it("should still allow chat completions to work", () => {
- const config: SupermemoryInfiniteChatConfig = {
- providerName: "openai",
- providerApiKey: testProviderApiKey,
- }
-
- const client = new SupermemoryOpenAI(testApiKey, config)
-
- // Chat completions should still be accessible
- expect(client.chat).toBeDefined()
- expect(client.chat.completions).toBeDefined()
- expect(client.createChatCompletion).toBeDefined()
- expect(client.chatCompletion).toBeDefined()
- })
- })
-})
diff --git a/packages/openai-sdk-ts/src/infinite-chat.ts b/packages/openai-sdk-ts/src/infinite-chat.ts
deleted file mode 100644
index ed335d82..00000000
--- a/packages/openai-sdk-ts/src/infinite-chat.ts
+++ /dev/null
@@ -1,133 +0,0 @@
-import OpenAI from "openai"
-
-interface SupermemoryInfiniteChatConfigBase {
- providerApiKey: string
- headers?: Record<string, string>
-}
-
-interface SupermemoryInfiniteChatConfigWithProviderName
- extends SupermemoryInfiniteChatConfigBase {
- providerName: keyof typeof providerMap
- providerUrl?: never
-}
-
-interface SupermemoryInfiniteChatConfigWithProviderUrl
- extends SupermemoryInfiniteChatConfigBase {
- providerUrl: string
- providerName?: never
-}
-
-export type SupermemoryInfiniteChatConfig =
- | SupermemoryInfiniteChatConfigWithProviderName
- | SupermemoryInfiniteChatConfigWithProviderUrl
-
-type SupermemoryApiKey = string
-
-const providerMap = {
- openai: "https://api.openai.com/v1",
- anthropic: "https://api.anthropic.com/v1",
- openrouter: "https://openrouter.ai/api/v1",
- deepinfra: "https://api.deepinfra.com/v1/openai",
- groq: "https://api.groq.com/openai/v1",
- google: "https://generativelanguage.googleapis.com/v1beta/openai",
- cloudflare: "https://gateway.ai.cloudflare.com/v1/*/unlimited-context/openai",
-} as const
-
-/**
- * Enhanced OpenAI client with supermemory integration
- * Only chat completions are supported - all other OpenAI API endpoints are disabled
- */
-export class SupermemoryOpenAI extends OpenAI {
- private supermemoryApiKey: string
-
- constructor(
- supermemoryApiKey: SupermemoryApiKey,
- config?: SupermemoryInfiniteChatConfig,
- ) {
- const baseURL = config?.providerName
- ? providerMap[config.providerName]
- : (config?.providerUrl ?? "https://api.openai.com/v1")
-
- super({
- apiKey: config?.providerApiKey,
- baseURL,
- defaultHeaders: {
- "x-supermemory-api-key": supermemoryApiKey,
- ...config?.headers,
- },
- })
-
- this.supermemoryApiKey = supermemoryApiKey
-
- // Disable all non-chat completion endpoints
- this.disableUnsupportedEndpoints()
- }
-
- /**
- * Disable all OpenAI endpoints except chat completions
- */
- private disableUnsupportedEndpoints() {
- const unsupportedError = (): never => {
- throw new Error(
- "Supermemory only supports chat completions. Use chatCompletion() or chat.completions.create() instead.",
- )
- }
-
- // Override all other OpenAI API endpoints using Object.defineProperty
- const endpoints = [
- "embeddings",
- "fineTuning",
- "images",
- "audio",
- "models",
- "moderations",
- "files",
- "batches",
- "uploads",
- "beta",
- ]
-
- for (const endpoint of endpoints) {
- Object.defineProperty(this, endpoint, {
- get: unsupportedError,
- configurable: true,
- })
- }
- }
-
- /**
- * Create chat completions with infinite context support
- */
- async createChatCompletion<
- T extends OpenAI.Chat.Completions.ChatCompletionCreateParams,
- >(params: T) {
- return this.chat.completions.create(params)
- }
-
- /**
- * Create chat completions with simplified interface
- */
- async chatCompletion(
- messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[],
- options?: {
- model?: string
- temperature?: number
- maxTokens?: number
- tools?: OpenAI.Chat.Completions.ChatCompletionTool[]
- toolChoice?: OpenAI.Chat.Completions.ChatCompletionToolChoiceOption
- stream?: boolean
- },
- ) {
- const params = {
- model: options?.model ?? "gpt-4o",
- messages,
- temperature: options?.temperature,
- max_tokens: options?.maxTokens,
- tools: options?.tools,
- tool_choice: options?.toolChoice,
- stream: options?.stream,
- } satisfies OpenAI.Chat.Completions.ChatCompletionCreateParams
-
- return this.chat.completions.create(params)
- }
-}
diff --git a/packages/openai-sdk-ts/src/tools.test.ts b/packages/openai-sdk-ts/src/tools.test.ts
deleted file mode 100644
index 6e334c73..00000000
--- a/packages/openai-sdk-ts/src/tools.test.ts
+++ /dev/null
@@ -1,297 +0,0 @@
-import { describe, expect, it } from "vitest"
-import {
- SupermemoryTools,
- createSupermemoryTools,
- getMemoryToolDefinitions,
- executeMemoryToolCalls,
- createSearchMemoriesTool,
- createAddMemoryTool,
- type SupermemoryToolsConfig,
-} from "./tools"
-import { SupermemoryOpenAI } from "./infinite-chat"
-
-import "dotenv/config"
-
-describe("SupermemoryTools", () => {
- // Required API keys - tests will fail if not provided
- const testApiKey = process.env.SUPERMEMORY_API_KEY
- const testProviderApiKey = process.env.PROVIDER_API_KEY
-
- if (!testApiKey) {
- throw new Error(
- "SUPERMEMORY_API_KEY environment variable is required for tests",
- )
- }
- if (!testProviderApiKey) {
- throw new Error(
- "PROVIDER_API_KEY environment variable is required for tests",
- )
- }
-
- // Optional configuration with defaults
- const testBaseUrl = process.env.SUPERMEMORY_BASE_URL ?? undefined
- const testModelName = process.env.MODEL_NAME || "gpt-4o-mini"
-
- describe("tool initialization", () => {
- it("should create tools with default configuration", () => {
- const config: SupermemoryToolsConfig = {}
- const tools = new SupermemoryTools(testApiKey, config)
-
- expect(tools).toBeDefined()
- expect(tools.getToolDefinitions()).toBeDefined()
- expect(tools.getToolDefinitions().length).toBe(3)
- })
-
- it("should create tools with createSupermemoryTools helper", () => {
- const tools = createSupermemoryTools(testApiKey, {
- projectId: "test-project",
- })
-
- expect(tools).toBeDefined()
- expect(tools.getToolDefinitions()).toBeDefined()
- })
-
- it("should create tools with custom baseUrl", () => {
- const config: SupermemoryToolsConfig = {
- baseUrl: testBaseUrl,
- }
- const tools = new SupermemoryTools(testApiKey, config)
-
- expect(tools).toBeDefined()
- expect(tools.getToolDefinitions().length).toBe(3)
- })
-
- it("should create tools with projectId configuration", () => {
- const config: SupermemoryToolsConfig = {
- projectId: "test-project-123",
- }
- const tools = new SupermemoryTools(testApiKey, config)
-
- expect(tools).toBeDefined()
- expect(tools.getToolDefinitions().length).toBe(3)
- })
-
- it("should create tools with custom container tags", () => {
- const config: SupermemoryToolsConfig = {
- containerTags: ["custom-tag-1", "custom-tag-2"],
- }
- const tools = new SupermemoryTools(testApiKey, config)
-
- expect(tools).toBeDefined()
- expect(tools.getToolDefinitions().length).toBe(3)
- })
- })
-
- describe("tool definitions", () => {
- it("should return proper OpenAI function definitions", () => {
- const definitions = getMemoryToolDefinitions()
-
- expect(definitions).toBeDefined()
- expect(definitions.length).toBe(3)
-
- // Check searchMemories
- const searchTool = definitions.find(
- (d) => d.function.name === "searchMemories",
- )
- expect(searchTool).toBeDefined()
- expect(searchTool!.type).toBe("function")
- expect(searchTool!.function.parameters?.required).toContain(
- "informationToGet",
- )
-
- // Check addMemory
- const addTool = definitions.find((d) => d.function.name === "addMemory")
- expect(addTool).toBeDefined()
- expect(addTool!.type).toBe("function")
- expect(addTool!.function.parameters?.required).toContain("memory")
- })
-
- it("should have consistent tool definitions from class and helper", () => {
- const tools = new SupermemoryTools(testApiKey)
- const classDefinitions = tools.getToolDefinitions()
- const helperDefinitions = getMemoryToolDefinitions()
-
- expect(classDefinitions).toEqual(helperDefinitions)
- })
- })
-
- describe("memory operations", () => {
- it("should search memories", async () => {
- const tools = new SupermemoryTools(testApiKey, {
- projectId: "test-search",
- baseUrl: testBaseUrl,
- })
-
- const result = await tools.searchMemories({
- informationToGet: "test preferences",
- limit: 5,
- })
-
- expect(result).toBeDefined()
- expect(result.success).toBeDefined()
- expect(typeof result.success).toBe("boolean")
-
- if (result.success) {
- expect(result.results).toBeDefined()
- expect(result.count).toBeDefined()
- expect(typeof result.count).toBe("number")
- } else {
- expect(result.error).toBeDefined()
- }
- })
-
- it("should add memory", async () => {
- const tools = new SupermemoryTools(testApiKey, {
- containerTags: ["test-add-memory"],
- baseUrl: testBaseUrl,
- })
-
- const result = await tools.addMemory({
- memory: "User prefers dark roast coffee in the morning - test memory",
- })
-
- expect(result).toBeDefined()
- expect(result.success).toBeDefined()
- expect(typeof result.success).toBe("boolean")
-
- if (result.success) {
- expect(result.memory).toBeDefined()
- } else {
- expect(result.error).toBeDefined()
- }
- })
- })
-
- describe("individual tool creators", () => {
- it("should create individual search tool", () => {
- const searchTool = createSearchMemoriesTool(testApiKey, {
- projectId: "test-individual",
- })
-
- expect(searchTool).toBeDefined()
- expect(searchTool.definition).toBeDefined()
- expect(searchTool.execute).toBeDefined()
- expect(searchTool.definition.function.name).toBe("searchMemories")
- })
-
- it("should create individual add tool", () => {
- const addTool = createAddMemoryTool(testApiKey, {
- projectId: "test-individual",
- })
-
- expect(addTool).toBeDefined()
- expect(addTool.definition).toBeDefined()
- expect(addTool.execute).toBeDefined()
- expect(addTool.definition.function.name).toBe("addMemory")
- })
- })
-
- describe("OpenAI integration", () => {
- it("should work with SupermemoryOpenAI for function calling", async () => {
- const client = new SupermemoryOpenAI(testApiKey, {
- providerName: "openai",
- providerApiKey: testProviderApiKey,
- })
-
- const tools = new SupermemoryTools(testApiKey, {
- projectId: "test-openai-integration",
- baseUrl: testBaseUrl,
- })
-
- const response = await client.chatCompletion(
- [
- {
- role: "system",
- content:
- "You are a helpful assistant with access to user memories. When the user asks you to remember something, use the addMemory tool.",
- },
- {
- role: "user",
- content: "Please remember that I prefer tea over coffee",
- },
- ],
- {
- model: testModelName,
- tools: tools.getToolDefinitions(),
- },
- )
-
- expect(response).toBeDefined()
- expect("choices" in response).toBe(true)
-
- if ("choices" in response) {
- const choice = response.choices[0]!
- expect(choice.message).toBeDefined()
-
- // If the model decided to use function calling, test the execution
- if (choice.message.tool_calls && choice.message.tool_calls.length > 0) {
- const toolResults = await executeMemoryToolCalls(
- testApiKey,
- choice.message.tool_calls,
- {
- projectId: "test-openai-integration",
- baseUrl: testBaseUrl,
- },
- )
-
- expect(toolResults).toBeDefined()
- expect(toolResults.length).toBe(choice.message.tool_calls.length)
-
- for (const result of toolResults) {
- expect(result.role).toBe("tool")
- expect(result.content).toBeDefined()
- expect(result.tool_call_id).toBeDefined()
- }
- }
- }
- })
-
- it("should handle multiple tool calls", async () => {
- const tools = new SupermemoryTools(testApiKey, {
- containerTags: ["test-multi-tools"],
- baseUrl: testBaseUrl,
- })
-
- // Simulate tool calls (normally these would come from OpenAI)
- const mockToolCalls = [
- {
- id: "call_1",
- type: "function" as const,
- function: {
- name: "searchMemories",
- arguments: JSON.stringify({ informationToGet: "preferences" }),
- },
- },
- {
- id: "call_2",
- type: "function" as const,
- function: {
- name: "addMemory",
- arguments: JSON.stringify({
- memory: "Test memory for multiple calls",
- }),
- },
- },
- ]
-
- const results = await executeMemoryToolCalls(testApiKey, mockToolCalls, {
- containerTags: ["test-multi-tools"],
- baseUrl: testBaseUrl,
- })
-
- expect(results).toBeDefined()
- expect(results.length).toBe(2)
-
- expect(results[0]!.tool_call_id).toBe("call_1")
- expect(results[1]!.tool_call_id).toBe("call_2")
-
- for (const result of results) {
- expect(result.role).toBe("tool")
- expect(result.content).toBeDefined()
-
- const content = JSON.parse(result.content as string)
- expect(content.success).toBeDefined()
- }
- })
- })
-})
diff --git a/packages/openai-sdk-ts/src/tools.ts b/packages/openai-sdk-ts/src/tools.ts
deleted file mode 100644
index ccba6968..00000000
--- a/packages/openai-sdk-ts/src/tools.ts
+++ /dev/null
@@ -1,299 +0,0 @@
-import type OpenAI from "openai"
-import Supermemory from "supermemory"
-
-/**
- * Supermemory configuration
- * Only one of `projectId` or `containerTags` can be provided.
- */
-export interface SupermemoryToolsConfig {
- baseUrl?: string
- containerTags?: string[]
- projectId?: string
-}
-
-/**
- * Result types for memory operations
- */
-export interface MemorySearchResult {
- success: boolean
- results?: Awaited<ReturnType<Supermemory["search"]["execute"]>>["results"]
- count?: number
- error?: string
-}
-
-export interface MemoryAddResult {
- success: boolean
- memory?: Awaited<ReturnType<Supermemory["memories"]["add"]>>
- error?: string
-}
-
-export interface MemoryFetchResult {
- success: boolean
- memory?: Awaited<ReturnType<Supermemory["memories"]["get"]>>
- error?: string
-}
-
-/**
- * Function schemas for OpenAI function calling
- */
-export const memoryToolSchemas = {
- searchMemories: {
- name: "searchMemories",
- description:
- "Search (recall) memories/details/information about the user or other facts or entities. Run when explicitly asked or when context about user's past choices would be helpful.",
- parameters: {
- type: "object",
- properties: {
- informationToGet: {
- type: "string",
- description: "Terms to search for in the user's memories",
- },
- includeFullDocs: {
- type: "boolean",
- description:
- "Whether to include the full document content in the response. Defaults to true for better AI context.",
- default: true,
- },
- limit: {
- type: "number",
- description: "Maximum number of results to return",
- default: 10,
- },
- },
- required: ["informationToGet"],
- },
- } satisfies OpenAI.FunctionDefinition,
-
- addMemory: {
- name: "addMemory",
- description:
- "Add (remember) memories/details/information about the user or other facts or entities. Run when explicitly asked or when the user mentions any information generalizable beyond the context of the current conversation.",
- parameters: {
- type: "object",
- properties: {
- memory: {
- type: "string",
- description:
- "The text content of the memory to add. This should be a single sentence or a short paragraph.",
- },
- },
- required: ["memory"],
- },
- } satisfies OpenAI.FunctionDefinition,
-} as const
-
-/**
- * Create memory tool handlers for OpenAI function calling
- */
-export class SupermemoryTools {
- private client: Supermemory
- private containerTags: string[]
-
- constructor(apiKey: string, config?: SupermemoryToolsConfig) {
- this.client = new Supermemory({
- apiKey,
- ...(config?.baseUrl && { baseURL: config.baseUrl }),
- })
-
- this.containerTags = config?.projectId
- ? [`sm_project_${config.projectId}`]
- : (config?.containerTags ?? ["sm_project_default"])
- }
-
- /**
- * Get OpenAI function definitions for all memory tools
- */
- getToolDefinitions(): OpenAI.Chat.Completions.ChatCompletionTool[] {
- return [
- { type: "function", function: memoryToolSchemas.searchMemories },
- { type: "function", function: memoryToolSchemas.addMemory },
- ]
- }
-
- /**
- * Execute a tool call based on the function name and arguments
- */
- async executeToolCall(
- toolCall: OpenAI.Chat.Completions.ChatCompletionMessageToolCall,
- ): Promise<string> {
- const functionName = toolCall.function.name
- const args = JSON.parse(toolCall.function.arguments)
-
- switch (functionName) {
- case "searchMemories":
- return JSON.stringify(await this.searchMemories(args))
- case "addMemory":
- return JSON.stringify(await this.addMemory(args))
- default:
- return JSON.stringify({
- success: false,
- error: `Unknown function: ${functionName}`,
- })
- }
- }
-
- /**
- * Search memories
- */
- async searchMemories({
- informationToGet,
- includeFullDocs = true,
- limit = 10,
- }: {
- informationToGet: string
- includeFullDocs?: boolean
- limit?: number
- }): Promise<MemorySearchResult> {
- try {
- const response = await this.client.search.execute({
- q: informationToGet,
- containerTags: this.containerTags,
- limit,
- chunkThreshold: 0.6,
- includeFullDocs,
- })
-
- return {
- success: true,
- results: response.results,
- count: response.results?.length || 0,
- }
- } catch (error) {
- return {
- success: false,
- error: error instanceof Error ? error.message : "Unknown error",
- }
- }
- }
-
- /**
- * Add a memory
- */
- async addMemory({ memory }: { memory: string }): Promise<MemoryAddResult> {
- try {
- const metadata: Record<string, string | number | boolean> = {}
-
- const response = await this.client.memories.add({
- content: memory,
- containerTags: this.containerTags,
- ...(Object.keys(metadata).length > 0 && { metadata }),
- })
-
- return {
- success: true,
- memory: response,
- }
- } catch (error) {
- return {
- success: false,
- error: error instanceof Error ? error.message : "Unknown error",
- }
- }
- }
-
- /**
- * Fetch a specific memory by ID
- */
- async fetchMemory({
- memoryId,
- }: {
- memoryId: string
- }): Promise<MemoryFetchResult> {
- try {
- const response = await this.client.memories.get(memoryId)
-
- return {
- success: true,
- memory: response,
- }
- } catch (error) {
- return {
- success: false,
- error: error instanceof Error ? error.message : "Unknown error",
- }
- }
- }
-}
-
-/**
- * Helper function to create SupermemoryTools instance
- */
-export function createSupermemoryTools(
- apiKey: string,
- config?: SupermemoryToolsConfig,
-): SupermemoryTools {
- return new SupermemoryTools(apiKey, config)
-}
-
-/**
- * Get OpenAI function definitions for memory tools
- */
-export function getMemoryToolDefinitions(): OpenAI.Chat.Completions.ChatCompletionTool[] {
- return [
- { type: "function", function: memoryToolSchemas.searchMemories },
- { type: "function", function: memoryToolSchemas.addMemory },
- ]
-}
-
-/**
- * Execute tool calls from OpenAI function calling
- */
-export async function executeMemoryToolCalls(
- apiKey: string,
- toolCalls: OpenAI.Chat.Completions.ChatCompletionMessageToolCall[],
- config?: SupermemoryToolsConfig,
-): Promise<OpenAI.Chat.Completions.ChatCompletionToolMessageParam[]> {
- const tools = new SupermemoryTools(apiKey, config)
-
- const results = await Promise.all(
- toolCalls.map(async (toolCall) => {
- const result = await tools.executeToolCall(toolCall)
- return {
- tool_call_id: toolCall.id,
- role: "tool" as const,
- content: result,
- }
- }),
- )
-
- return results
-}
-
-/**
- * Individual tool creators for more granular control
- */
-export function createSearchMemoriesTool(
- apiKey: string,
- config?: SupermemoryToolsConfig,
-) {
- return {
- definition: {
- type: "function" as const,
- function: memoryToolSchemas.searchMemories,
- },
- execute: (args: {
- informationToGet: string
- includeFullDocs?: boolean
- limit?: number
- }) => {
- const tools = new SupermemoryTools(apiKey, config)
- return tools.searchMemories(args)
- },
- }
-}
-
-export function createAddMemoryTool(
- apiKey: string,
- config?: SupermemoryToolsConfig,
-) {
- return {
- definition: {
- type: "function" as const,
- function: memoryToolSchemas.addMemory,
- },
- execute: (args: { memory: string }) => {
- const tools = new SupermemoryTools(apiKey, config)
- return tools.addMemory(args)
- },
- }
-}
diff --git a/packages/openai-sdk-ts/.npmignore b/packages/tools/.npmignore
index aadf7e98..1e44e42f 100644
--- a/packages/openai-sdk-ts/.npmignore
+++ b/packages/tools/.npmignore
@@ -2,4 +2,4 @@ src/
.turbo/
.env
tsdown.config.ts
-tsconfig.json
+tsconfig.json \ No newline at end of file
diff --git a/packages/tools/README.md b/packages/tools/README.md
new file mode 100644
index 00000000..dfbf472c
--- /dev/null
+++ b/packages/tools/README.md
@@ -0,0 +1,155 @@
+# @supermemory/tools
+
+Memory tools for AI SDK and OpenAI function calling with supermemory.
+
+This package provides supermemory tools for both AI SDK and OpenAI function calling through dedicated submodule exports, each with function-based architectures optimized for their respective use cases.
+
+## Installation
+
+```bash
+npm install @supermemory/ai-sdk
+# or
+bun add @supermemory/ai-sdk
+# or
+pnpm add @supermemory/ai-sdk
+# or
+yarn add @supermemory/ai-sdk
+```
+
+## Usage
+
+The package provides two submodule imports:
+- `@supermemory/tools/ai-sdk` - For use with the AI SDK framework
+- `@supermemory/tools/openai` - For use with OpenAI's function calling
+
+### AI SDK Usage
+
+```typescript
+import { supermemoryTools, searchMemoriesTool, addMemoryTool } from "@supermemory/tools/ai-sdk"
+import { createOpenAI } from "@ai-sdk/openai"
+import { generateText } from "ai"
+
+const openai = createOpenAI({
+ apiKey: process.env.OPENAI_API_KEY!,
+})
+
+// Create all tools
+const tools = supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
+ projectId: "your-project-id",
+})
+
+// Use with AI SDK
+const result = await generateText({
+ model: openai("gpt-4"),
+ messages: [
+ {
+ role: "user",
+ content: "What do you remember about my preferences?",
+ },
+ ],
+ tools,
+})
+
+// Or create individual tools
+const searchTool = searchMemoriesTool(process.env.SUPERMEMORY_API_KEY!, {
+ projectId: "your-project-id",
+})
+
+const addTool = addMemoryTool(process.env.SUPERMEMORY_API_KEY!, {
+ projectId: "your-project-id",
+})
+```
+
+### OpenAI Function Calling Usage
+
+```typescript
+import { supermemoryTools, getToolDefinitions, createToolCallExecutor } from "@supermemory/tools/openai"
+import OpenAI from "openai"
+
+const client = new OpenAI({
+ apiKey: process.env.OPENAI_API_KEY!,
+})
+
+// Get tool definitions for OpenAI
+const toolDefinitions = getToolDefinitions()
+
+// Create tool executor
+const executeToolCall = createToolCallExecutor(process.env.SUPERMEMORY_API_KEY!, {
+ projectId: "your-project-id",
+})
+
+// Use with OpenAI Chat Completions
+const completion = await client.chat.completions.create({
+ model: "gpt-4",
+ messages: [
+ {
+ role: "user",
+ content: "What do you remember about my preferences?",
+ },
+ ],
+ tools: toolDefinitions,
+})
+
+// Execute tool calls if any
+if (completion.choices[0]?.message.tool_calls) {
+ for (const toolCall of completion.choices[0].message.tool_calls) {
+ const result = await executeToolCall(toolCall)
+ console.log(result)
+ }
+}
+
+// Or create individual function-based tools
+const tools = supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
+ projectId: "your-project-id",
+})
+
+const searchResult = await tools.searchMemories({
+ informationToGet: "user preferences",
+ limit: 10,
+})
+
+const addResult = await tools.addMemory({
+ memory: "User prefers dark roast coffee",
+})
+```
+
+## Configuration
+
+Both modules accept the same configuration interface:
+
+```typescript
+interface SupermemoryToolsConfig {
+ baseUrl?: string
+ containerTags?: string[]
+ projectId?: string
+}
+```
+
+- **baseUrl**: Custom base URL for the supermemory API
+- **containerTags**: Array of custom container tags (mutually exclusive with projectId)
+- **projectId**: Project ID which gets converted to container tag format (mutually exclusive with containerTags)
+
+## Available Tools
+
+### Search Memories
+Searches through stored memories based on a query string.
+
+**Parameters:**
+- `informationToGet` (string): Terms to search for
+- `includeFullDocs` (boolean, optional): Whether to include full document content (default: true)
+- `limit` (number, optional): Maximum number of results (default: 10)
+
+### Add Memory
+Adds a new memory to the system.
+
+**Parameters:**
+- `memory` (string): The content to remember
+
+
+
+## Environment Variables
+
+```env
+SUPERMEMORY_API_KEY=your_supermemory_api_key
+SUPERMEMORY_BASE_URL=https://your-custom-url # optional
+``` \ No newline at end of file
diff --git a/packages/openai-sdk-ts/package.json b/packages/tools/package.json
index 63074e0b..c7091029 100644
--- a/packages/openai-sdk-ts/package.json
+++ b/packages/tools/package.json
@@ -1,19 +1,22 @@
{
- "name": "@supermemory/openai-sdk",
- "version": "1.0.0",
+ "name": "@supermemory/tools",
"type": "module",
- "description": "OpenAI SDK utilities for supermemory",
+ "version": "1.0.4",
+ "description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
"dev": "tsdown --watch",
"check-types": "tsc --noEmit",
- "test": "vitest",
- "test:watch": "vitest --watch"
+ "test": "vitest --testTimeout 100000",
+ "test:watch": "vitest --watch --testTimeout 100000"
},
"dependencies": {
+ "@ai-sdk/openai": "^2.0.23",
+ "@ai-sdk/provider": "^2.0.0",
+ "ai": "^5.0.29",
"openai": "^4.104.0",
"supermemory": "^3.0.0-alpha.26",
- "zod": "^4.1.4"
+ "zod": "^4.1.5"
},
"devDependencies": {
"@total-typescript/tsconfig": "^1.0.4",
@@ -28,19 +31,23 @@
"types": "./dist/index.d.ts",
"exports": {
".": "./dist/index.js",
+ "./ai-sdk": "./dist/ai-sdk.js",
+ "./openai": "./dist/openai.js",
"./package.json": "./package.json"
},
"repository": {
"url": "https://github.com/supermemoryai/supermemory",
- "directory": "packages/openai-sdk-ts"
+ "directory": "packages/tools"
},
"keywords": [
+ "ai",
+ "sdk",
"openai",
"typescript",
"supermemory",
- "ai",
"memory",
- "context"
+ "context",
+ "tools"
],
"license": "MIT"
}
diff --git a/packages/tools/src/ai-sdk.ts b/packages/tools/src/ai-sdk.ts
new file mode 100644
index 00000000..703175e7
--- /dev/null
+++ b/packages/tools/src/ai-sdk.ts
@@ -0,0 +1,121 @@
+import Supermemory from "supermemory"
+import { tool } from "ai"
+import { z } from "zod"
+import {
+ DEFAULT_VALUES,
+ PARAMETER_DESCRIPTIONS,
+ TOOL_DESCRIPTIONS,
+ getContainerTags,
+} from "./shared"
+import type { SupermemoryToolsConfig } from "./types"
+
+// Export individual tool creators
+export const searchMemoriesTool = (
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) => {
+ const client = new Supermemory({
+ apiKey,
+ ...(config?.baseUrl ? { baseURL: config.baseUrl } : {}),
+ })
+
+ const containerTags = getContainerTags(config)
+
+ return tool({
+ description: TOOL_DESCRIPTIONS.searchMemories,
+ inputSchema: z.object({
+ informationToGet: z
+ .string()
+ .describe(PARAMETER_DESCRIPTIONS.informationToGet),
+ includeFullDocs: z
+ .boolean()
+ .optional()
+ .default(DEFAULT_VALUES.includeFullDocs)
+ .describe(PARAMETER_DESCRIPTIONS.includeFullDocs),
+ limit: z
+ .number()
+ .optional()
+ .default(DEFAULT_VALUES.limit)
+ .describe(PARAMETER_DESCRIPTIONS.limit),
+ }),
+ execute: async ({
+ informationToGet,
+ includeFullDocs = DEFAULT_VALUES.includeFullDocs,
+ limit = DEFAULT_VALUES.limit,
+ }) => {
+ try {
+ const response = await client.search.execute({
+ q: informationToGet,
+ containerTags,
+ limit,
+ chunkThreshold: DEFAULT_VALUES.chunkThreshold,
+ includeFullDocs,
+ })
+
+ return {
+ success: true,
+ results: response.results,
+ count: response.results?.length || 0,
+ }
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : "Unknown error",
+ }
+ }
+ },
+ })
+}
+
+export const addMemoryTool = (
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) => {
+ const client = new Supermemory({
+ apiKey,
+ ...(config?.baseUrl ? { baseURL: config.baseUrl } : {}),
+ })
+
+ const containerTags = getContainerTags(config)
+
+ return tool({
+ description: TOOL_DESCRIPTIONS.addMemory,
+ inputSchema: z.object({
+ memory: z.string().describe(PARAMETER_DESCRIPTIONS.memory),
+ }),
+ execute: async ({ memory }) => {
+ try {
+ const metadata: Record<string, string | number | boolean> = {}
+
+ const response = await client.memories.add({
+ content: memory,
+ containerTags,
+ ...(Object.keys(metadata).length > 0 && { metadata }),
+ })
+
+ return {
+ success: true,
+ memory: response,
+ }
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : "Unknown error",
+ }
+ }
+ },
+ })
+}
+
+/**
+ * Create Supermemory tools for AI SDK
+ */
+export function supermemoryTools(
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) {
+ return {
+ searchMemories: searchMemoriesTool(apiKey, config),
+ addMemory: addMemoryTool(apiKey, config),
+ }
+}
diff --git a/packages/tools/src/index.ts b/packages/tools/src/index.ts
new file mode 100644
index 00000000..4f21246e
--- /dev/null
+++ b/packages/tools/src/index.ts
@@ -0,0 +1,2 @@
+// Export shared types and utilities
+export type { SupermemoryToolsConfig } from "./types"
diff --git a/packages/tools/src/openai.ts b/packages/tools/src/openai.ts
new file mode 100644
index 00000000..5c79a9c1
--- /dev/null
+++ b/packages/tools/src/openai.ts
@@ -0,0 +1,276 @@
+import type OpenAI from "openai"
+import Supermemory from "supermemory"
+import {
+ DEFAULT_VALUES,
+ PARAMETER_DESCRIPTIONS,
+ TOOL_DESCRIPTIONS,
+ getContainerTags,
+} from "./shared"
+import type { SupermemoryToolsConfig } from "./types"
+
+/**
+ * Result types for memory operations
+ */
+export interface MemorySearchResult {
+ success: boolean
+ results?: Awaited<ReturnType<Supermemory["search"]["execute"]>>["results"]
+ count?: number
+ error?: string
+}
+
+export interface MemoryAddResult {
+ success: boolean
+ memory?: Awaited<ReturnType<Supermemory["memories"]["add"]>>
+ error?: string
+}
+
+/**
+ * Function schemas for OpenAI function calling
+ */
+export const memoryToolSchemas = {
+ searchMemories: {
+ name: "searchMemories",
+ description: TOOL_DESCRIPTIONS.searchMemories,
+ parameters: {
+ type: "object",
+ properties: {
+ informationToGet: {
+ type: "string",
+ description: PARAMETER_DESCRIPTIONS.informationToGet,
+ },
+ includeFullDocs: {
+ type: "boolean",
+ description: PARAMETER_DESCRIPTIONS.includeFullDocs,
+ default: DEFAULT_VALUES.includeFullDocs,
+ },
+ limit: {
+ type: "number",
+ description: PARAMETER_DESCRIPTIONS.limit,
+ default: DEFAULT_VALUES.limit,
+ },
+ },
+ required: ["informationToGet"],
+ },
+ } satisfies OpenAI.FunctionDefinition,
+
+ addMemory: {
+ name: "addMemory",
+ description: TOOL_DESCRIPTIONS.addMemory,
+ parameters: {
+ type: "object",
+ properties: {
+ memory: {
+ type: "string",
+ description: PARAMETER_DESCRIPTIONS.memory,
+ },
+ },
+ required: ["memory"],
+ },
+ } satisfies OpenAI.FunctionDefinition,
+} as const
+
+/**
+ * Create a Supermemory client with configuration
+ */
+function createClient(apiKey: string, config?: SupermemoryToolsConfig) {
+ const client = new Supermemory({
+ apiKey,
+ ...(config?.baseUrl && { baseURL: config.baseUrl }),
+ })
+
+ const containerTags = getContainerTags(config)
+
+ return { client, containerTags }
+}
+
+/**
+ * Search memories function
+ */
+export function createSearchMemoriesFunction(
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) {
+ const { client, containerTags } = createClient(apiKey, config)
+
+ return async function searchMemories({
+ informationToGet,
+ includeFullDocs = DEFAULT_VALUES.includeFullDocs,
+ limit = DEFAULT_VALUES.limit,
+ }: {
+ informationToGet: string
+ includeFullDocs?: boolean
+ limit?: number
+ }): Promise<MemorySearchResult> {
+ try {
+ const response = await client.search.execute({
+ q: informationToGet,
+ containerTags,
+ limit,
+ chunkThreshold: DEFAULT_VALUES.chunkThreshold,
+ includeFullDocs,
+ })
+
+ return {
+ success: true,
+ results: response.results,
+ count: response.results?.length || 0,
+ }
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : "Unknown error",
+ }
+ }
+ }
+}
+
+/**
+ * Add memory function
+ */
+export function createAddMemoryFunction(
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) {
+ const { client, containerTags } = createClient(apiKey, config)
+
+ return async function addMemory({
+ memory,
+ }: {
+ memory: string
+ }): Promise<MemoryAddResult> {
+ try {
+ const metadata: Record<string, string | number | boolean> = {}
+
+ const response = await client.memories.add({
+ content: memory,
+ containerTags,
+ ...(Object.keys(metadata).length > 0 && { metadata }),
+ })
+
+ return {
+ success: true,
+ memory: response,
+ }
+ } catch (error) {
+ return {
+ success: false,
+ error: error instanceof Error ? error.message : "Unknown error",
+ }
+ }
+ }
+}
+
+/**
+ * Create all memory tools functions
+ */
+export function supermemoryTools(
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) {
+ const searchMemories = createSearchMemoriesFunction(apiKey, config)
+ const addMemory = createAddMemoryFunction(apiKey, config)
+
+ return {
+ searchMemories,
+ addMemory,
+ }
+}
+
+/**
+ * Get OpenAI function definitions for all memory tools
+ */
+export function getToolDefinitions(): OpenAI.Chat.Completions.ChatCompletionTool[] {
+ return [
+ { type: "function", function: memoryToolSchemas.searchMemories },
+ { type: "function", function: memoryToolSchemas.addMemory },
+ ]
+}
+
+/**
+ * Execute a tool call based on the function name and arguments
+ */
+export function createToolCallExecutor(
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) {
+ const tools = supermemoryTools(apiKey, config)
+
+ return async function executeToolCall(
+ toolCall: OpenAI.Chat.Completions.ChatCompletionMessageToolCall,
+ ): Promise<string> {
+ const functionName = toolCall.function.name
+ const args = JSON.parse(toolCall.function.arguments)
+
+ switch (functionName) {
+ case "searchMemories":
+ return JSON.stringify(await tools.searchMemories(args))
+ case "addMemory":
+ return JSON.stringify(await tools.addMemory(args))
+ default:
+ return JSON.stringify({
+ success: false,
+ error: `Unknown function: ${functionName}`,
+ })
+ }
+ }
+}
+
+/**
+ * Execute tool calls from OpenAI function calling
+ */
+export function createToolCallsExecutor(
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) {
+ const executeToolCall = createToolCallExecutor(apiKey, config)
+
+ return async function executeToolCalls(
+ toolCalls: OpenAI.Chat.Completions.ChatCompletionMessageToolCall[],
+ ): Promise<OpenAI.Chat.Completions.ChatCompletionToolMessageParam[]> {
+ const results = await Promise.all(
+ toolCalls.map(async (toolCall) => {
+ const result = await executeToolCall(toolCall)
+ return {
+ tool_call_id: toolCall.id,
+ role: "tool" as const,
+ content: result,
+ }
+ }),
+ )
+
+ return results
+ }
+}
+
+/**
+ * Individual tool creators for more granular control
+ */
+export function createSearchMemoriesTool(
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) {
+ const searchMemories = createSearchMemoriesFunction(apiKey, config)
+
+ return {
+ definition: {
+ type: "function" as const,
+ function: memoryToolSchemas.searchMemories,
+ },
+ execute: searchMemories,
+ }
+}
+
+export function createAddMemoryTool(
+ apiKey: string,
+ config?: SupermemoryToolsConfig,
+) {
+ const addMemory = createAddMemoryFunction(apiKey, config)
+
+ return {
+ definition: {
+ type: "function" as const,
+ function: memoryToolSchemas.addMemory,
+ },
+ execute: addMemory,
+ }
+}
diff --git a/packages/tools/src/shared.ts b/packages/tools/src/shared.ts
new file mode 100644
index 00000000..0ff14e86
--- /dev/null
+++ b/packages/tools/src/shared.ts
@@ -0,0 +1,47 @@
+/**
+ * Shared constants and descriptions for Supermemory tools
+ */
+
+// Tool descriptions
+export const TOOL_DESCRIPTIONS = {
+ searchMemories:
+ "Search (recall) memories/details/information about the user or other facts or entities. Run when explicitly asked or when context about user's past choices would be helpful.",
+ addMemory:
+ "Add (remember) memories/details/information about the user or other facts or entities. Run when explicitly asked or when the user mentions any information generalizable beyond the context of the current conversation.",
+} as const
+
+// Parameter descriptions
+export const PARAMETER_DESCRIPTIONS = {
+ informationToGet: "Terms to search for in the user's memories",
+ includeFullDocs:
+ "Whether to include the full document content in the response. Defaults to true for better AI context.",
+ limit: "Maximum number of results to return",
+ memory:
+ "The text content of the memory to add. This should be a single sentence or a short paragraph.",
+} as const
+
+// Default values
+export const DEFAULT_VALUES = {
+ includeFullDocs: true,
+ limit: 10,
+ chunkThreshold: 0.6,
+} as const
+
+// Container tag constants
+export const CONTAINER_TAG_CONSTANTS = {
+ projectPrefix: "sm_project_",
+ defaultTags: ["sm_project_default"] as string[],
+} as const
+
+/**
+ * Helper function to generate container tags based on config
+ */
+export function getContainerTags(config?: {
+ projectId?: string
+ containerTags?: string[]
+}): string[] {
+ if (config?.projectId) {
+ return [`${CONTAINER_TAG_CONSTANTS.projectPrefix}${config.projectId}`]
+ }
+ return config?.containerTags ?? CONTAINER_TAG_CONSTANTS.defaultTags
+}
diff --git a/packages/tools/src/tools.test.ts b/packages/tools/src/tools.test.ts
new file mode 100644
index 00000000..5cde729d
--- /dev/null
+++ b/packages/tools/src/tools.test.ts
@@ -0,0 +1,274 @@
+import { createOpenAI } from "@ai-sdk/openai"
+import { generateText } from "ai"
+import { describe, expect, it } from "vitest"
+import * as aiSdk from "./ai-sdk"
+import * as openAi from "./openai"
+import type { SupermemoryToolsConfig } from "./types"
+
+import "dotenv/config"
+
+describe("@supermemory/tools", () => {
+ // Required API keys - tests will fail if not provided
+ const testApiKey = process.env.SUPERMEMORY_API_KEY
+ const testOpenAIKey = process.env.OPENAI_API_KEY
+
+ if (!testApiKey) {
+ throw new Error(
+ "SUPERMEMORY_API_KEY environment variable is required for tests",
+ )
+ }
+ if (!testOpenAIKey) {
+ throw new Error("OPENAI_API_KEY environment variable is required for tests")
+ }
+
+ // Optional configuration with defaults
+ const testBaseUrl = process.env.SUPERMEMORY_BASE_URL ?? undefined
+ const testModelName = process.env.MODEL_NAME || "gpt-4o-mini"
+
+ describe("aiSdk module", () => {
+ describe("client initialization", () => {
+ it("should create tools with default configuration", () => {
+ const config: SupermemoryToolsConfig = {}
+ const tools = aiSdk.supermemoryTools(testApiKey, config)
+
+ expect(tools).toBeDefined()
+ expect(tools.searchMemories).toBeDefined()
+ expect(tools.addMemory).toBeDefined()
+ })
+
+ it("should create tools with custom baseUrl", () => {
+ const config: SupermemoryToolsConfig = {
+ baseUrl: testBaseUrl,
+ }
+ const tools = aiSdk.supermemoryTools(testApiKey, config)
+
+ expect(tools).toBeDefined()
+ expect(tools.searchMemories).toBeDefined()
+ expect(tools.addMemory).toBeDefined()
+ })
+
+ it("should create individual tools", () => {
+ const searchTool = aiSdk.searchMemoriesTool(testApiKey, {
+ projectId: "test-project-123",
+ })
+ const addTool = aiSdk.addMemoryTool(testApiKey, {
+ projectId: "test-project-123",
+ })
+
+ expect(searchTool).toBeDefined()
+ expect(addTool).toBeDefined()
+ })
+ })
+
+ describe("AI SDK integration", () => {
+ it("should work with AI SDK generateText", async () => {
+ const openai = createOpenAI({
+ apiKey: testOpenAIKey,
+ })
+
+ const result = await generateText({
+ model: openai(testModelName),
+ messages: [
+ {
+ role: "system",
+ content:
+ "You are a helpful assistant with access to user memories. Use the search tool when the user asks about preferences or past information.",
+ },
+ {
+ role: "user",
+ content: "What do you remember about my preferences?",
+ },
+ ],
+ tools: {
+ ...aiSdk.supermemoryTools(testApiKey, {
+ projectId: "test-ai-integration",
+ baseUrl: testBaseUrl,
+ }),
+ },
+ })
+
+ expect(result).toBeDefined()
+ expect(result.text).toBeDefined()
+ expect(typeof result.text).toBe("string")
+ })
+
+ it("should use tools when prompted", async () => {
+ const openai = createOpenAI({
+ apiKey: testOpenAIKey,
+ })
+
+ const tools = aiSdk.supermemoryTools(testApiKey, {
+ projectId: "test-tool-usage",
+ baseUrl: testBaseUrl,
+ })
+
+ const result = await generateText({
+ model: openai(testModelName),
+ messages: [
+ {
+ role: "system",
+ content:
+ "You are a helpful assistant. When the user asks you to remember something, use the addMemory tool.",
+ },
+ {
+ role: "user",
+ content: "Please remember that I prefer dark roast coffee",
+ },
+ ],
+ tools: {
+ addMemory: tools.addMemory,
+ },
+ })
+
+ expect(result).toBeDefined()
+ expect(result.text).toBeDefined()
+ })
+ })
+ })
+
+ describe("openAi module", () => {
+ describe("function-based tools", () => {
+ it("should create function-based tools", () => {
+ const tools = openAi.supermemoryTools(testApiKey, {
+ projectId: "test-openai-functions",
+ })
+
+ expect(tools).toBeDefined()
+ expect(tools.searchMemories).toBeDefined()
+ expect(tools.addMemory).toBeDefined()
+ })
+
+ it("should create individual tool functions", () => {
+ const searchFunction = openAi.createSearchMemoriesFunction(testApiKey, {
+ projectId: "test-individual",
+ })
+ const addFunction = openAi.createAddMemoryFunction(testApiKey, {
+ projectId: "test-individual",
+ })
+
+ expect(searchFunction).toBeDefined()
+ expect(addFunction).toBeDefined()
+ expect(typeof searchFunction).toBe("function")
+ expect(typeof addFunction).toBe("function")
+ })
+ })
+
+ describe("tool definitions", () => {
+ it("should return proper OpenAI function definitions", () => {
+ const definitions = openAi.getToolDefinitions()
+
+ expect(definitions).toBeDefined()
+ expect(definitions.length).toBe(2)
+
+ // Check searchMemories
+ const searchTool = definitions.find(
+ (d) => d.function.name === "searchMemories",
+ )
+ expect(searchTool).toBeDefined()
+ expect(searchTool!.type).toBe("function")
+ expect(searchTool!.function.parameters?.required).toContain(
+ "informationToGet",
+ )
+
+ // Check addMemory
+ const addTool = definitions.find((d) => d.function.name === "addMemory")
+ expect(addTool).toBeDefined()
+ expect(addTool!.type).toBe("function")
+ expect(addTool!.function.parameters?.required).toContain("memory")
+ })
+ })
+
+ describe("tool execution", () => {
+ it("should create tool call executor", () => {
+ const executor = openAi.createToolCallExecutor(testApiKey, {
+ containerTags: ["test-executor"],
+ baseUrl: testBaseUrl,
+ })
+
+ expect(executor).toBeDefined()
+ expect(typeof executor).toBe("function")
+ })
+
+ it("should create tool calls executor", () => {
+ const executor = openAi.createToolCallsExecutor(testApiKey, {
+ containerTags: ["test-executors"],
+ baseUrl: testBaseUrl,
+ })
+
+ expect(executor).toBeDefined()
+ expect(typeof executor).toBe("function")
+ })
+ })
+
+ describe("individual tool creators", () => {
+ it("should create individual search tool", () => {
+ const searchTool = openAi.createSearchMemoriesTool(testApiKey, {
+ projectId: "test-individual",
+ })
+
+ expect(searchTool).toBeDefined()
+ expect(searchTool.definition).toBeDefined()
+ expect(searchTool.execute).toBeDefined()
+ expect(searchTool.definition.function.name).toBe("searchMemories")
+ })
+
+ it("should create individual add tool", () => {
+ const addTool = openAi.createAddMemoryTool(testApiKey, {
+ projectId: "test-individual",
+ })
+
+ expect(addTool).toBeDefined()
+ expect(addTool.definition).toBeDefined()
+ expect(addTool.execute).toBeDefined()
+ expect(addTool.definition.function.name).toBe("addMemory")
+ })
+ })
+
+ describe("memory operations", () => {
+ it("should search memories", async () => {
+ const searchFunction = openAi.createSearchMemoriesFunction(testApiKey, {
+ projectId: "test-search",
+ baseUrl: testBaseUrl,
+ })
+
+ const result = await searchFunction({
+ informationToGet: "test preferences",
+ limit: 5,
+ })
+
+ expect(result).toBeDefined()
+ expect(result.success).toBeDefined()
+ expect(typeof result.success).toBe("boolean")
+
+ if (result.success) {
+ expect(result.results).toBeDefined()
+ expect(result.count).toBeDefined()
+ expect(typeof result.count).toBe("number")
+ } else {
+ expect(result.error).toBeDefined()
+ }
+ })
+
+ it("should add memory", async () => {
+ const addFunction = openAi.createAddMemoryFunction(testApiKey, {
+ containerTags: ["test-add-memory"],
+ baseUrl: testBaseUrl,
+ })
+
+ const result = await addFunction({
+ memory: "User prefers dark roast coffee in the morning - test memory",
+ })
+
+ expect(result).toBeDefined()
+ expect(result.success).toBeDefined()
+ expect(typeof result.success).toBe("boolean")
+
+ if (result.success) {
+ expect(result.memory).toBeDefined()
+ } else {
+ expect(result.error).toBeDefined()
+ }
+ })
+ })
+ })
+})
diff --git a/packages/tools/src/types.ts b/packages/tools/src/types.ts
new file mode 100644
index 00000000..dfff0f00
--- /dev/null
+++ b/packages/tools/src/types.ts
@@ -0,0 +1,9 @@
+/**
+ * Supermemory configuration
+ * Only one of `projectId` or `containerTags` can be provided.
+ */
+export interface SupermemoryToolsConfig {
+ baseUrl?: string
+ containerTags?: string[]
+ projectId?: string
+}
diff --git a/packages/openai-sdk-ts/tsconfig.json b/packages/tools/tsconfig.json
index d40ba072..d40ba072 100644
--- a/packages/openai-sdk-ts/tsconfig.json
+++ b/packages/tools/tsconfig.json
diff --git a/packages/openai-sdk-ts/tsdown.config.ts b/packages/tools/tsdown.config.ts
index f587b211..59be1b93 100644
--- a/packages/openai-sdk-ts/tsdown.config.ts
+++ b/packages/tools/tsdown.config.ts
@@ -1,15 +1,15 @@
import { defineConfig } from "tsdown"
export default defineConfig({
- entry: ["src/index.ts"],
+ entry: ["src/index.ts", "src/ai-sdk.ts", "src/openai.ts"],
format: "esm",
- sourcemap: true,
+ sourcemap: false,
target: "es2020",
tsconfig: "./tsconfig.json",
clean: true,
minify: true,
dts: {
- sourcemap: true,
+ sourcemap: false,
},
exports: true,
})