+ Want to self-host? See our Self-Hosting Guide for enterprise deployment options.
+
diff --git a/apps/docs/cookbook/personal-assistant.mdx b/apps/docs/cookbook/personal-assistant.mdx
index c13b7471f..7d5f256b4 100644
--- a/apps/docs/cookbook/personal-assistant.mdx
+++ b/apps/docs/cookbook/personal-assistant.mdx
@@ -3,764 +3,862 @@ title: "Personal AI Assistant"
description: "Build an AI assistant that remembers user preferences, habits, and context across conversations"
---
-Build a personal AI assistant that learns and remembers everything about the user - their preferences, habits, work context, and conversation history. This recipe shows how to create a truly personalized AI experience using Supermemory's memory tools.
+Build a personal AI assistant that learns and remembers everything about the user - their preferences, habits, work context, and conversation history.
## What You'll Build
A personal AI assistant that:
- **Remembers user preferences** (dietary restrictions, work schedule, communication style)
-- **Learns from conversations** and improves responses over time
-- **Maintains context** across multiple chat sessions
+- **Maintains context** across multiple chat sessions
- **Provides personalized recommendations** based on user history
- **Handles multiple conversation topics** while maintaining context
+## Choose Your Implementation
+
+
+
+ Thoroughly tested, production-ready. Uses FastAPI + Streamlit + OpenAI.
+
+
+ Modern React approach. Uses Next.js + Vercel AI SDK + Supermemory tools.
+
+
+
## Prerequisites
-- Node.js 18+ or Python 3.8+
-- Supermemory API key
-- OpenAI or Anthropic API key
-- Basic understanding of chat applications
+- **Python 3.8+** or **Node.js 18+**
+- **Supermemory API key** ([get one here](https://console.supermemory.ai))
+- **OpenAI API key** ([get one here](https://platform.openai.com/api-keys))
-## Implementation
+
+Never hardcode API keys in your code. Use environment variables.
+
+
+---
+
+## Python Implementation
### Step 1: Project Setup
-
-
- ```bash
- npx create-next-app@latest personal-ai --typescript --tailwind --eslint
- cd personal-ai
- npm install @supermemory/tools ai openai
- ```
-
- Create your environment variables:
- ```bash .env.local
- SUPERMEMORY_API_KEY=your_supermemory_key
- OPENAI_API_KEY=your_openai_key
- ```
-
-
-
- ```bash
- mkdir personal-ai && cd personal-ai
- python -m venv venv
- source venv/bin/activate # On Windows: venv\Scripts\activate
- pip install supermemory openai fastapi uvicorn python-multipart
- ```
-
- Create your environment variables:
- ```bash .env
- SUPERMEMORY_API_KEY=your_supermemory_key
- OPENAI_API_KEY=your_openai_key
- ```
-
-
-
-### Step 2: Core Assistant Logic
-
-
-
- ```typescript app/api/chat/route.ts
- import { streamText } from 'ai'
- import { createOpenAI } from '@ai-sdk/openai'
- import { supermemoryTools } from '@supermemory/tools/ai-sdk'
-
- const openai = createOpenAI({
- apiKey: process.env.OPENAI_API_KEY!
- })
+```bash
+mkdir personal-ai && cd personal-ai
+python -m venv venv
+source venv/bin/activate # On Windows: venv\Scripts\activate
+pip install supermemory openai fastapi uvicorn python-dotenv streamlit requests
+```
- export async function POST(request: Request) {
- const { messages, userId = 'default-user' } = await request.json()
-
- const result = await streamText({
- model: openai('gpt-5'),
- messages,
- tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
- containerTags: [userId]
- }),
- system: `You are a highly personalized AI assistant. Your primary goal is to learn about the user and provide increasingly personalized help over time.
-
- MEMORY MANAGEMENT:
- 1. When users share personal information, preferences, or context, immediately use addMemory to store it
- 2. Before responding to requests, search your memories for relevant context about the user
- 3. Use past conversations to inform current responses
- 4. Remember user's communication style, preferences, and frequently discussed topics
-
- PERSONALITY:
- - Adapt your communication style to match the user's preferences
- - Reference past conversations naturally when relevant
- - Proactively offer help based on learned patterns
- - Be genuinely helpful while respecting privacy
-
- EXAMPLES OF WHAT TO REMEMBER:
- - Work schedule and role
- - Dietary preferences/restrictions
- - Communication preferences (formal/casual)
- - Frequent topics of interest
- - Goals and projects they're working on
- - Family/personal context they share
- - Preferred tools and workflows
- - Time zone and availability
-
- Always search memories before responding to provide personalized, contextual help.`
- })
-
- return result.toAIStreamResponse()
- }
- ```
-
-
-
- ```python main.py
- from fastapi import FastAPI, HTTPException
- from fastapi.responses import StreamingResponse
- import openai
- from supermemory import Supermemory
- import json
- import os
- from typing import List, Dict, Any
- import asyncio
-
- app = FastAPI()
-
- openai_client = openai.AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
- supermemory_client = Supermemory(api_key=os.getenv("SUPERMEMORY_API_KEY"))
-
- SYSTEM_PROMPT = """You are a highly personalized AI assistant. Your primary goal is to learn about the user and provide increasingly personalized help over time.
-
- MEMORY MANAGEMENT:
- 1. When users share personal information, preferences, or context, immediately store it
- 2. Before responding to requests, search for relevant context about the user
- 3. Use past conversations to inform current responses
- 4. Remember user's communication style, preferences, and frequently discussed topics
-
- PERSONALITY:
- - Adapt your communication style to match the user's preferences
- - Reference past conversations naturally when relevant
- - Proactively offer help based on learned patterns
- - Be genuinely helpful while respecting privacy
-
- Always search memories before responding to provide personalized, contextual help."""
-
- async def search_user_memories(query: str, user_id: str) -> str:
- """Search user's memories for relevant context"""
- try:
- results = supermemory_client.search.memories(
- q=query,
- container_tag=f"user_{user_id}",
- limit=5
- )
-
- if results.results:
- context = "\n".join([r.memory for r in results.results])
- return f"Relevant memories about the user:\n{context}"
- return "No relevant memories found."
- except Exception as e:
- return f"Error searching memories: {e}"
-
- async def add_user_memory(content: str, user_id: str):
- """Add new information to user's memory"""
- try:
- supermemory_client.memories.add(
- content=content,
- container_tag=f"user_{user_id}",
- metadata={"type": "personal_info", "timestamp": "auto"}
- )
- except Exception as e:
- print(f"Error adding memory: {e}")
-
- @app.post("/chat")
- async def chat_endpoint(data: dict):
- messages = data.get("messages", [])
- user_id = data.get("userId", "default-user")
-
- if not messages:
- raise HTTPException(status_code=400, detail="No messages provided")
-
- # Get user's last message for memory search
- user_message = messages[-1]["content"] if messages else ""
-
- # Search for relevant memories
- memory_context = await search_user_memories(user_message, user_id)
-
- # Add system message with memory context
- enhanced_messages = [
- {"role": "system", "content": f"{SYSTEM_PROMPT}\n\n{memory_context}"}
- ] + messages
-
- try:
- response = await openai_client.chat.completions.create(
- model="gpt-5",
- messages=enhanced_messages,
- stream=True,
- temperature=0.7
- )
-
- async def generate():
- full_response = ""
+Create a `.env` file:
+
+```bash
+SUPERMEMORY_API_KEY=your_supermemory_key_here
+OPENAI_API_KEY=your_openai_key_here
+```
+
+### Step 2: Backend (FastAPI)
+
+Create `main.py`. Let's build it step by step:
+
+#### Import Dependencies
+
+```python
+from fastapi import FastAPI, HTTPException
+from fastapi.responses import StreamingResponse
+from openai import AsyncOpenAI
+from supermemory import Supermemory
+import json
+import os
+import uuid
+from dotenv import load_dotenv
+```
+
+- **FastAPI**: Web framework for building the API endpoint
+- **StreamingResponse**: Enables real-time response streaming (words appear as they're generated)
+- **AsyncOpenAI**: OpenAI client that supports async/await for non-blocking operations
+- **Supermemory**: Client for storing and retrieving long-term memories
+- **uuid**: Creates stable, deterministic user IDs from emails
+
+#### Initialize Application and Clients
+
+```python
+load_dotenv()
+app = FastAPI()
+
+openai_client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+supermemory_client = Supermemory(api_key=os.getenv("SUPERMEMORY_API_KEY"))
+```
+
+`load_dotenv()` loads API keys from your `.env` file into environment variables. We create two clients:
+- **OpenAI client**: Handles conversations and generates responses
+- **Supermemory client**: Stores and retrieves user-specific memories
+
+These are separate because you can swap providers independently (e.g., switch from OpenAI to Anthropic without changing memory logic).
+
+#### Define System Prompt
+
+```python
+SYSTEM_PROMPT = """You are a highly personalized AI assistant.
+
+MEMORY MANAGEMENT:
+1. When users share personal information, store it immediately
+2. Search for relevant context before responding
+3. Use past conversations to inform current responses
+
+Always be helpful while respecting privacy."""
+```
+
+This prompt guides the assistant's behavior. It tells the AI to:
+- Be proactive about learning user preferences
+- Always search memory before responding
+- Respect privacy boundaries
+
+The system prompt is injected at the start of every conversation, so the AI consistently follows these rules.
+
+#### Create Identity Helpers
+
+```python
+def normalize_email(email: str) -> str:
+ return (email or "").strip().lower()
+
+def stable_user_id_from_email(email: str) -> str:
+ norm = normalize_email(email)
+ if not norm:
+ raise ValueError("Email is required")
+ return uuid.uuid5(uuid.NAMESPACE_DNS, norm).hex
+```
+
+**Why normalize?** `"User@Mail.com"` and `" user@mail.com "` should map to the same person. We trim whitespace and lowercase to ensure consistency.
+
+**Why UUIDv5?** It's deterministic—same email always produces the same ID. This means:
+- User memories persist across sessions
+- No raw emails in logs or database tags
+- Privacy-preserving yet stable identity
+
+We use `uuid.NAMESPACE_DNS` as the namespace to ensure uniqueness.
+
+#### Memory Search Function
+
+```python
+async def search_user_memories(query: str, container_tag: str) -> str:
+ try:
+ results = supermemory_client.search.memories(
+ q=query,
+ container_tag=container_tag,
+ limit=5
+ )
+ if results.results:
+ context = "\n".join([r.memory for r in results.results])
+ return f"Relevant memories:\n{context}"
+ return "No relevant memories found."
+ except Exception as e:
+ return f"Error searching memories: {e}"
+```
+
+This searches the user's memory store for context relevant to their current message.
+
+**Parameters:**
+- `q`: The search query (usually the user's latest message)
+- `container_tag`: Isolates memories per user (e.g., `user_abc123`)
+- `limit=5`: Returns top 5 most relevant memories
+
+**Why search before responding?** The AI can provide personalized answers based on what it knows about the user (e.g., dietary preferences, work context, communication style).
+
+**Error handling:** If memory search fails, we return a fallback message instead of crashing. The conversation continues even if memory has a hiccup.
+
+#### Memory Storage Function
+
+```python
+async def add_user_memory(content: str, container_tag: str, email: str = None):
+ try:
+ supermemory_client.memories.add(
+ content=content,
+ container_tag=container_tag,
+ metadata={"type": "personal_info", "email": normalize_email(email) if email else None}
+ )
+ except Exception as e:
+ print(f"Error adding memory: {e}")
+```
+
+Stores new information about the user.
+
+**Parameters:**
+- `content`: The text to remember
+- `container_tag`: User isolation tag
+- `metadata`: Additional context (type of info, associated email)
+
+**Why metadata?** Makes it easier to filter and organize memories later (e.g., "show me all personal_info memories").
+
+**Error handling:** We log errors but don't crash. Failing to save one memory shouldn't break the entire conversation.
+
+#### Main Chat Endpoint
+
+```python
+@app.post("/chat")
+async def chat_endpoint(data: dict):
+ messages = data.get("messages", [])
+ email = data.get("email")
+
+ if not messages:
+ raise HTTPException(status_code=400, detail="No messages provided")
+ if not email:
+ raise HTTPException(status_code=400, detail="Email required")
+```
+
+This endpoint receives the chat request. It expects:
+- `messages`: Full conversation history `[{role: "user", content: "..."}]`
+- `email`: User's email for identity
+
+**Why require email?** Without it, we can't create a stable user ID, meaning no persistent personalization.
+
+#### Derive User Identity
+
+```python
+ try:
+ user_id = stable_user_id_from_email(email)
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+ container_tag = f"user_{user_id}"
+```
+
+Convert email → stable user ID → container tag.
+
+The container tag (`user_abc123`) isolates this user's memories from everyone else's. Each user has their own "memory box."
+
+#### Search and Inject Memories
+
+```python
+ user_message = messages[-1]["content"]
+ memory_context = await search_user_memories(user_message, container_tag)
+
+ enhanced_messages = [
+ {"role": "system", "content": f"{SYSTEM_PROMPT}\n\n{memory_context}"}
+ ] + messages
+```
+
+We take the user's latest message, search for relevant memories, then inject them into the system prompt.
+
+**Example:**
+```
+Original: "What should I eat for breakfast?"
+
+Enhanced system message:
+"You are a helpful assistant... [system prompt]
+
+Relevant memories:
+- User is vegetarian
+- User works out at 6 AM
+- User prefers quick meals"
+```
+
+Now the AI can answer: "Try overnight oats with plant-based protein—perfect for post-workout!"
+
+#### Stream OpenAI Response
+
+```python
+ try:
+ response = await openai_client.chat.completions.create(
+ model="gpt-4o",
+ messages=enhanced_messages,
+ temperature=0.7,
+ stream=True
+ )
+```
+
+**Key parameters:**
+- `model="gpt-4o"`: Fast, capable model
+- `messages`: Full conversation + memory context
+- `temperature=0.7`: Balanced creativity (0=deterministic, 1=creative)
+- `stream=True`: Enables word-by-word streaming
+
+**Why stream?** Users see responses appear in real-time instead of waiting for the complete answer. Much better UX.
+
+#### Handle Streaming
+
+```python
+ async def generate():
+ try:
async for chunk in response:
if chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
- full_response += content
yield f"data: {json.dumps({'content': content})}\n\n"
+ except Exception as e:
+ yield f"data: {json.dumps({'error': str(e)})}\n\n"
+```
- # After response is complete, analyze for memory-worthy content
- if "remember" in user_message.lower() or any(word in user_message.lower() for word in ["prefer", "like", "dislike", "work", "schedule", "diet"]):
- await add_user_memory(user_message, user_id)
-
- return StreamingResponse(generate(), media_type="text/plain")
-
- except Exception as e:
- raise HTTPException(status_code=500, detail=str(e))
-
- if __name__ == "__main__":
- import uvicorn
- uvicorn.run(app, host="0.0.0.0", port=8000)
- ```
-
-
-
-### Step 3: Frontend Interface
-
-
-
- ```tsx app/page.tsx
- 'use client'
-
- import { useChat } from 'ai/react'
- import { useState, useEffect } from 'react'
-
- export default function PersonalAssistant() {
- const [userId, setUserId] = useState('')
- const [userName, setUserName] = useState('')
-
- const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat({
- api: '/api/chat',
- body: {
- userId
- }
- })
-
- // Generate or retrieve user ID
- useEffect(() => {
- const storedUserId = localStorage.getItem('personal-ai-user-id')
- const storedUserName = localStorage.getItem('personal-ai-user-name')
-
- if (storedUserId) {
- setUserId(storedUserId)
- setUserName(storedUserName || '')
- } else {
- const newUserId = `user_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`
- localStorage.setItem('personal-ai-user-id', newUserId)
- setUserId(newUserId)
- }
- }, [])
-
- const handleNameSubmit = (e: React.FormEvent) => {
- e.preventDefault()
- if (userName.trim()) {
- localStorage.setItem('personal-ai-user-name', userName)
- // Send introduction message
- handleSubmit(e, {
- data: {
- content: `Hi! My name is ${userName}. I'm looking for a personal AI assistant that can learn about me and help me with various tasks.`
- }
- })
- }
- }
-
- return (
-
- {/* Header */}
-
-
Personal AI Assistant
-
- {userName ? `Hello ${userName}!` : 'Your AI that learns and remembers'}
-
-
+This async generator:
+1. Receives chunks from OpenAI as they're generated
+2. Extracts the text content from each chunk
+3. Formats it as Server-Sent Events (SSE): `data: {...}\n\n`
+4. Yields it to the client
- {/* Name Setup */}
- {!userName && (
-
-
-
- )}
-
- {/* Messages */}
-
- {messages.length === 0 && userName && (
-
-
- Hi {userName}! I'm your personal AI assistant. I'll learn about your preferences,
- work style, and interests as we chat. Feel free to share anything you'd like me to remember!
-
-
-
Try saying:
-
-
"I work as a software engineer and prefer concise responses"
-
"Remember that I'm vegetarian and allergic to nuts"
-
"I usually work from 9-5 EST and take lunch at noon"
-
-
-
- )}
-
- {messages.map((message) => (
-
-
- {message.role === 'assistant' && (
-
- AI
-
- )}
-
-
{message.content}
-
-
-
- ))}
-
- {isLoading && (
-
-
-
- AI
-
-
-
-
-
-
-
-
- )}
-
+**SSE format** is a web standard for server→client streaming. The frontend can process each chunk as it arrives.
- {/* Input */}
- {userName && (
-
- )}
-
- )
- }
- ```
-
-
-
- ```python streamlit_app.py
- import streamlit as st
- import requests
- import json
- import uuid
-
- st.set_page_config(page_title="Personal AI Assistant", page_icon="🤖", layout="wide")
-
- # Initialize session state
- if 'messages' not in st.session_state:
- st.session_state.messages = []
- if 'user_id' not in st.session_state:
- st.session_state.user_id = f"user_{uuid.uuid4().hex[:8]}"
- if 'user_name' not in st.session_state:
- st.session_state.user_name = None
-
- # Header
- st.title("🤖 Personal AI Assistant")
- st.markdown("*Your AI that learns and remembers*")
-
- # Sidebar for user info
- with st.sidebar:
- st.header("👤 User Profile")
-
- if not st.session_state.user_name:
- name = st.text_input("What should I call you?")
- if st.button("Get Started") and name:
+#### Optional Memory Storage
+
+```python
+ if "remember this" in user_message.lower():
+ await add_user_memory(user_message, container_tag, email=email)
+```
+
+After streaming completes, check if the user explicitly asked to remember something. If yes, store it.
+
+**Why opt-in?** Gives users control over what gets remembered. You could also make this automatic based on content analysis.
+
+#### Return Streaming Response
+
+```python
+ return StreamingResponse(generate(), media_type="text/plain")
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+```
+
+`StreamingResponse` keeps the HTTP connection open and sends chunks as they're generated. The frontend receives them in real-time.
+
+#### Local Development Server
+
+```python
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=8000)
+```
+
+Run with `python main.py` and the server starts on port 8000. `0.0.0.0` means it accepts connections from any IP (useful for testing from other devices).
+
+### Step 3: Frontend (Streamlit)
+
+Create `streamlit_app.py`:
+
+
+
+```python
+import streamlit as st
+import requests
+import json
+import uuid
+
+st.set_page_config(page_title="Personal AI Assistant", page_icon="🤖", layout="wide")
+
+def normalize_email(email: str) -> str:
+ return (email or "").strip().lower()
+
+def stable_user_id_from_email(email: str) -> str:
+ return uuid.uuid5(uuid.NAMESPACE_DNS, normalize_email(email)).hex
+
+# Session state
+if 'messages' not in st.session_state:
+ st.session_state.messages = []
+if 'user_name' not in st.session_state:
+ st.session_state.user_name = None
+if 'email' not in st.session_state:
+ st.session_state.email = None
+if 'user_id' not in st.session_state:
+ st.session_state.user_id = None
+
+st.title("🤖 Personal AI Assistant")
+st.markdown("*Your AI that learns and remembers*")
+
+with st.sidebar:
+ st.header("👤 User Profile")
+
+ if not st.session_state.user_name or not st.session_state.email:
+ name = st.text_input("What should I call you?")
+ email = st.text_input("Email", placeholder="you@example.com")
+
+ if st.button("Get Started"):
+ if name and email:
st.session_state.user_name = name
+ st.session_state.email = normalize_email(email)
+ st.session_state.user_id = stable_user_id_from_email(st.session_state.email)
st.session_state.messages.append({
"role": "user",
- "content": f"Hi! My name is {name}. I'm looking for a personal AI assistant."
+ "content": f"Hi! My name is {name}."
})
st.rerun()
- else:
- st.write(f"**Name:** {st.session_state.user_name}")
- st.write(f"**User ID:** {st.session_state.user_id[:12]}...")
+ else:
+ st.warning("Please enter both fields.")
+ else:
+ st.write(f"**Name:** {st.session_state.user_name}")
+ st.write(f"**Email:** {st.session_state.email}")
+ if st.button("Reset Conversation"):
+ st.session_state.messages = []
+ st.rerun()
+
+if st.session_state.user_name and st.session_state.email:
+ for message in st.session_state.messages:
+ with st.chat_message(message["role"]):
+ st.markdown(message["content"])
+
+ if prompt := st.chat_input("Message..."):
+ st.session_state.messages.append({"role": "user", "content": prompt})
+ with st.chat_message("user"):
+ st.markdown(prompt)
+
+ with st.chat_message("assistant"):
+ try:
+ response = requests.post(
+ "http://localhost:8000/chat",
+ json={
+ "messages": st.session_state.messages,
+ "email": st.session_state.email
+ },
+ stream=True,
+ timeout=30
+ )
+
+ if response.status_code == 200:
+ full_response = ""
+ for line in response.iter_lines():
+ if line:
+ try:
+ data = json.loads(line.decode('utf-8').replace('data: ', ''))
+ if 'content' in data:
+ full_response += data['content']
+ except:
+ continue
+
+ st.markdown(full_response)
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
+ else:
+ st.error(f"Error: {response.status_code}")
+ except Exception as e:
+ st.error(f"Error: {e}")
+else:
+ st.info("Please enter your profile in the sidebar")
+```
- if st.button("Reset Conversation"):
- st.session_state.messages = []
- st.rerun()
+
- st.markdown("---")
- st.markdown("""
- ### 💡 Try saying:
- - "I work as a software engineer and prefer concise responses"
- - "Remember that I'm vegetarian"
- - "I usually work from 9-5 EST"
- """)
-
- # Main chat interface
- if st.session_state.user_name:
- # Display messages
- for message in st.session_state.messages:
- with st.chat_message(message["role"]):
- st.markdown(message["content"])
-
- # Chat input
- if prompt := st.chat_input("Tell me something about yourself, or ask for help..."):
- # Add user message
- st.session_state.messages.append({"role": "user", "content": prompt})
-
- with st.chat_message("user"):
- st.markdown(prompt)
-
- # Get AI response
- with st.chat_message("assistant"):
- with st.spinner("Thinking..."):
- try:
- response = requests.post(
- "http://localhost:8000/chat",
- json={
- "messages": st.session_state.messages,
- "userId": st.session_state.user_id
- },
- timeout=30
- )
-
- if response.status_code == 200:
- # Handle streaming response
- full_response = ""
- for line in response.iter_lines():
- if line:
- try:
- data = json.loads(line.decode('utf-8').replace('data: ', ''))
- if 'content' in data:
- full_response += data['content']
- except:
- continue
-
- st.markdown(full_response)
- st.session_state.messages.append({
- "role": "assistant",
- "content": full_response
- })
- else:
- st.error(f"Error: {response.status_code}")
- except Exception as e:
- st.error(f"Connection error: {e}")
+### Step 4: Run It
- else:
- st.info("👆 Please enter your name in the sidebar to get started!")
+Terminal 1 - Start backend:
+```bash
+python main.py
+```
- # Run with: streamlit run streamlit_app.py
- ```
-
-
+Terminal 2 - Start frontend:
+```bash
+streamlit run streamlit_app.py
+```
-## Testing Your Assistant
+Open `http://localhost:8501` in your browser.
-### Step 4: Test Memory Formation
+---
-Try these conversation flows to test memory capabilities:
+## TypeScript Implementation
-1. **Personal Preferences**:
- ```
- User: "Hi! I'm Sarah, a product manager at a tech startup. I prefer brief, actionable responses and I'm always busy with user research."
+### Step 1: Project Setup
- Assistant: [Should remember name, role, communication preference]
+```bash
+npx create-next-app@latest personal-ai --typescript --tailwind --app
+cd personal-ai
+npm install @supermemory/tools ai @ai-sdk/openai
+```
+
+Create `.env.local`:
+
+```bash
+SUPERMEMORY_API_KEY=your_supermemory_key_here
+OPENAI_API_KEY=your_openai_key_here
+```
+
+### Step 2: API Route
- User: "What's a good way to prioritize features?"
+Create `app/api/chat/route.ts`. Let's break it down:
- Assistant: [Should reference that you're a PM and prefer brief responses]
- ```
+#### Import Dependencies
-2. **Dietary & Lifestyle**:
- ```
- User: "Remember that I'm vegan and I work out every morning at 6 AM."
+```typescript
+import { streamText } from 'ai'
+import { createOpenAI } from '@ai-sdk/openai'
+import { supermemoryTools } from '@supermemory/tools/ai-sdk'
+```
- User: "Suggest a quick breakfast for tomorrow."
+- **streamText**: Vercel AI SDK function that handles streaming responses and tool calling
+- **createOpenAI**: Factory function to create an OpenAI provider
+- **supermemoryTools**: Pre-built tools for memory search and storage
- Assistant: [Should suggest vegan options that work for pre/post workout]
- ```
+#### Initialize OpenAI Provider
-3. **Work Context**:
- ```
- User: "I'm working on a React project and I prefer TypeScript over JavaScript."
+```typescript
+const openai = createOpenAI({
+ apiKey: process.env.OPENAI_API_KEY!
+})
+```
- User: "Help me with state management."
+Creates an OpenAI provider configured with your API key. The `!` tells TypeScript "this definitely exists" (because we set it in `.env.local`).
- Assistant: [Should suggest TypeScript-specific solutions]
- ```
+This provider object will be passed to `streamText` to specify which AI model to use.
-### Step 5: Verify Memory Storage
+#### Define System Prompt
-Check that memories are being stored properly:
+```typescript
+const SYSTEM_PROMPT = `You are a highly personalized AI assistant.
-
-
- ```typescript scripts/check-memories.ts
- import { Supermemory } from '@supermemory/tools'
+When users share personal information, remember it using the addMemory tool.
+Before responding, search your memories using searchMemories to provide personalized help.
+Always be helpful while respecting privacy.`
+```
- const client = new Supermemory({
- apiKey: process.env.SUPERMEMORY_API_KEY!
- })
+This guides the AI's behavior and tells it:
+- **When to use tools**: Search memories before responding, add memories when users share info
+- **Personality**: Be helpful and personalized
+- **Boundaries**: Respect privacy
- async function checkUserMemories(userId: string) {
- try {
- const memories = await client.memories.list({
- containerTags: [userId],
- limit: 20,
- sort: 'updatedAt',
- order: 'desc'
- })
-
- console.log(`Found ${memories.memories.length} memories for ${userId}:`)
- memories.memories.forEach((memory, i) => {
- console.log(`${i + 1}. ${memory.content.substring(0, 100)}...`)
- })
-
- // Test search
- const searchResults = await client.search.memories({
- q: "preferences work",
- containerTag: userId,
- limit: 5
- })
-
- console.log('\nSearch Results:')
- searchResults.results.forEach((result, i) => {
- console.log(`${i + 1}. (${result.similarity}) ${result.memory.substring(0, 100)}...`)
- })
-
- } catch (error) {
- console.error('Error:', error)
- }
- }
+The AI SDK uses this to decide when to call `searchMemories` and `addMemory` tools automatically.
- // Run: npx ts-node scripts/check-memories.ts USER_ID_HERE
- checkUserMemories(process.argv[2] || 'default-user')
- ```
-
-
-
- ```python check_memories.py
- from supermemory import Supermemory
- import os
- import sys
-
- client = Supermemory(api_key=os.getenv("SUPERMEMORY_API_KEY"))
-
- def check_user_memories(user_id):
- try:
- # List all memories for user
- memories = client.memories.list(
- container_tags=[user_id],
- limit=20,
- sort="updatedAt",
- order="desc"
- )
-
- print(f"Found {len(memories.memories)} memories for {user_id}:")
- for i, memory in enumerate(memories.memories):
- print(f"{i + 1}. {memory.content[:100]}...")
-
- # Test search
- search_results = client.search.memories(
- q="preferences work",
- container_tag=user_id,
- limit=5
- )
-
- print('\nSearch Results:')
- for i, result in enumerate(search_results.results):
- print(f"{i + 1}. ({result.similarity}) {result.memory[:100]}...")
-
- except Exception as error:
- print(f'Error: {error}')
-
- # Run: python check_memories.py USER_ID_HERE
- user_id = sys.argv[1] if len(sys.argv) > 1 else 'default-user'
- check_user_memories(user_id)
- ```
-
-
-
-## Production Considerations
-
-### Security & Privacy
-
-1. **User Isolation**:
- ```typescript
- // Always use user-specific container tags
- const tools = supermemoryTools(apiKey, {
- containerTags: [userId]
- })
- ```
-
-2. **Memory Encryption**:
- ```typescript
- // For sensitive data, consider client-side encryption
- const encryptedContent = encrypt(sensitiveData, userKey)
- await client.memories.add({
- content: encryptedContent,
- containerTag: userId,
- metadata: { encrypted: true }
- })
- ```
-
-### Performance Optimization
-
-1. **Memory Search Optimization**:
- ```typescript
- // Use appropriate thresholds for speed vs accuracy
- const quickSearch = await client.search.memories({
- q: userQuery,
- containerTag: userId,
- threshold: 0.6, // Balanced
- rerank: false, // Skip for speed
- limit: 3 // Fewer results
- })
- ```
-
-2. **Caching Strategy**:
- ```typescript
- // Cache frequently accessed user context
- const userContext = await redis.get(`user_context:${userId}`)
- if (!userContext) {
- const memories = await client.search.memories({
- q: "user preferences work style",
- containerTag: userId,
- limit: 10
- })
- await redis.setex(`user_context:${userId}`, 300, JSON.stringify(memories))
- }
- ```
-
-### Monitoring & Analytics
+#### Create POST Handler
```typescript
-// Track memory formation and retrieval
-const analytics = {
- memoriesCreated: await redis.incr(`memories_created:${userId}`),
- searchesPerformed: await redis.incr(`searches:${userId}`),
- conversationLength: messages.length
-}
+export async function POST(req: Request) {
+ try {
+ const { messages, email } = await req.json()
+```
-// Log for analysis
-console.log('User Interaction:', {
- userId,
- action: 'chat_response',
- memoriesFound: searchResults.results.length,
- responseTime: Date.now() - startTime,
- ...analytics
-})
+Next.js App Router convention: export an async function named after the HTTP method. This handles POST requests to `/api/chat`.
+
+We extract:
+- `messages`: Chat history array `[{role, content}]`
+- `email`: User identifier
+
+#### Validate Input
+
+```typescript
+ if (!messages?.length) {
+ return new Response('No messages provided', { status: 400 })
+ }
+ if (!email) {
+ return new Response('Email required', { status: 400 })
+ }
```
-## Extensions & Customization
+**Why validate?** Prevents crashes from malformed requests. We need:
+- At least one message to respond to
+- An email to isolate user memories
-### 1. Add Personality Profiles
+Without email, we can't maintain personalization across sessions.
+
+#### Create Container Tag
```typescript
-const personalityProfiles = {
- professional: "Respond in a formal, business-appropriate tone",
- casual: "Use a friendly, conversational tone with occasional humor",
- technical: "Provide detailed technical explanations with examples",
- concise: "Keep responses brief and to the point"
-}
+ const containerTag = `user_${email.toLowerCase().trim()}`
+```
+
+Convert email to a container tag for memory isolation.
+
+**Simpler than Python**: We skip UUID generation here for simplicity. In production, you might want to hash the email for privacy:
+
+```typescript
+// Optional: More privacy-preserving approach
+import crypto from 'crypto'
+const containerTag = `user_${crypto.createHash('sha256').update(email).digest('hex').slice(0, 16)}`
+```
+
+#### Call streamText with Tools
+
+```typescript
+ const result = streamText({
+ model: openai('gpt-4o'),
+ messages,
+ tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, {
+ containerTags: [containerTag]
+ }),
+ system: SYSTEM_PROMPT
+ })
+```
+
+This is where the magic happens! Let's break down each parameter:
+
+**`model: openai('gpt-4o')`**
+- Specifies which AI model to use
+- The AI SDK handles the API calls
+
+**`messages`**
+- Full conversation history
+- Format: `[{role: "user"|"assistant", content: "..."}]`
+
+**`tools: supermemoryTools(...)`**
+- Gives the AI access to memory operations
+- The AI SDK automatically:
+ - Decides when to call tools based on the conversation
+ - Calls `searchMemories` when it needs context
+ - Calls `addMemory` when users share information
+ - Handles tool execution and error handling
+
+**`containerTags: [containerTag]`**
+- Scopes all memory operations to this specific user
+- Ensures User A can't access User B's memories
+
+**`system: SYSTEM_PROMPT`**
+- Guides the AI's behavior and tool usage
+
+**How tools work:**
+1. User: "Remember that I'm vegetarian"
+2. AI SDK detects this is memory-worthy
+3. Automatically calls `addMemory("User is vegetarian")`
+4. Stores in Supermemory with the user's container tag
+5. Responds: "Got it, I'll remember that!"
+
+Later:
+1. User: "What should I eat?"
+2. AI SDK calls `searchMemories("food preferences")`
+3. Retrieves: "User is vegetarian"
+4. Responds: "How about a delicious veggie stir-fry?"
-// Add to system prompt based on user preference
-const userProfile = await getUserProfile(userId)
-const systemPrompt = `${basePrompt}\n\nCommunication Style: ${personalityProfiles[userProfile.style]}`
+**No manual tool handling needed!** The AI SDK manages the entire flow.
+
+#### Return Streaming Response
+
+```typescript
+ return result.toAIStreamResponse()
```
-### 2. Smart Notifications
+`toAIStreamResponse()` converts the streaming result into a format the frontend can consume. It:
+- Sets appropriate headers for streaming
+- Formats data for the `useChat` hook
+- Handles errors gracefully
+
+This returns immediately (doesn't wait for completion), and chunks stream to the client as they're generated.
+
+#### Error Handling
```typescript
-// Proactive suggestions based on user patterns
-const shouldSuggest = await analyzeUserPatterns(userId)
-if (shouldSuggest.type === 'daily_standup') {
- return {
- message: "Based on your schedule, would you like me to help prepare for your 9 AM standup?",
- suggestedActions: ["Review yesterday's progress", "Prepare today's goals"]
+ } catch (error: any) {
+ console.error('Chat error:', error)
+ return new Response(error.message, { status: 500 })
}
}
```
-### 3. Multi-Modal Memory
+Catches any errors (API failures, tool errors, etc.) and returns a clean error response.
+
+**Why log to console?** In production, you'd send this to a monitoring service (Sentry, DataDog, etc.) to track issues.
+
+---
+
+**Key Differences from Python:**
+
+| Aspect | Python | TypeScript |
+|--------|--------|------------|
+| **Memory Search** | Manual `search_user_memories()` call | AI SDK calls `searchMemories` tool automatically |
+| **Memory Add** | Manual `add_user_memory()` call | AI SDK calls `addMemory` tool automatically |
+| **Tool Decision** | You decide when to search/add | AI decides based on conversation context |
+| **Streaming** | Manual SSE formatting | `toAIStreamResponse()` handles it |
+| **Error Handling** | Try/catch in each function | AI SDK handles tool errors |
+
+**Python = Manual Control**
+You explicitly search and add memories. More control, more code.
+
+**TypeScript = AI-Driven**
+The AI decides when to use tools. Less code, more "magic."
+
+### Step 3: Chat UI
+
+Replace `app/page.tsx`:
+
+
```typescript
-// Handle images and documents
-if (message.attachments) {
- for (const attachment of message.attachments) {
- await client.memories.uploadFile({
- file: attachment,
- containerTag: userId,
- metadata: {
- type: 'user_shared',
- context: message.content
- }
- })
+'use client'
+import { useChat } from 'ai/react'
+import { useState } from 'react'
+
+export default function ChatPage() {
+ const [email, setEmail] = useState('')
+ const [userName, setUserName] = useState('')
+ const [tempEmail, setTempEmail] = useState('')
+ const [tempName, setTempName] = useState('')
+
+ const { messages, input, handleInputChange, handleSubmit } = useChat({
+ api: '/api/chat',
+ body: { email }
+ })
+
+ if (!email) {
+ return (
+
+ )
+}
diff --git a/apps/web/components/chrome-extension-detail-view.tsx b/apps/web/components/chrome-extension-detail-view.tsx
new file mode 100644
index 000000000..5b4170954
--- /dev/null
+++ b/apps/web/components/chrome-extension-detail-view.tsx
@@ -0,0 +1,113 @@
+"use client"
+
+import { Button } from "@ui/components/button"
+import { cn } from "@lib/utils"
+import { dmSansClassName } from "@/utils/fonts"
+import Image from "next/image"
+
+interface ChromeExtensionDetailViewProps {
+ onBack: () => void
+}
+
+const steps = [
+ {
+ number: 1,
+ title: "Install the Chrome Extension and login with your supermemory",
+ image: "/onboarding/chrome-ext-1.png",
+ },
+ {
+ number: 2,
+ title: "Visit the bookmarks tab on X and one-click import your bookmarks",
+ image: "/onboarding/chrome-ext-2.png",
+ },
+ {
+ number: 3,
+ title: "Talk to your bookmarks via Nova & see it in your memory graph",
+ image: "/onboarding/chrome-ext-3.png",
+ },
+]
+
+export function ChromeExtensionDetailView({
+ onBack,
+}: ChromeExtensionDetailViewProps) {
+ const handleInstall = () => {
+ window.open(
+ "https://chromewebstore.google.com/detail/supermemory/afpgkkipfdpeaflnangednailhoegogi",
+ "_blank",
+ )
+ }
+
+ return (
+
+
+
+ ← Back
+
+
+
+
+
+
+ Import your X bookmarks via the Chrome Extension
+
+
+
+ Bring your X bookmarks into Supermemory in just a few clicks.
+ They'll be automatically embedded so you can easily find what you
+ need, right when you need it.
+
+
+
+ {steps.map((step) => (
+
+
+
+
+
+
+
+ Step {step.number}
+
+
+
+ {step.title}
+
+
+
+ ))}
+
+
+
+
+ Install Chrome Extension →
+
+
+
+ )
+}
diff --git a/apps/web/components/initial-header.tsx b/apps/web/components/initial-header.tsx
new file mode 100644
index 000000000..59fb7a756
--- /dev/null
+++ b/apps/web/components/initial-header.tsx
@@ -0,0 +1,33 @@
+import { Logo } from "@ui/assets/Logo"
+import { Button } from "@ui/components/button"
+
+export function InitialHeader({
+ showUserSupermemory,
+ name,
+}: {
+ showUserSupermemory?: boolean
+ name?: string
+}) {
+ return (
+