diff --git a/README.md b/README.md index a69b1f965..a8c8176ad 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Raycast Extension   - + Discord

@@ -27,6 +27,9 @@

Building with Supermemory? Check out the Developer Console and Documentation for API access.

+

+ Want to self-host? See our Self-Hosting Guide for enterprise deployment options. +


diff --git a/apps/docs/cookbook/personal-assistant.mdx b/apps/docs/cookbook/personal-assistant.mdx index c13b7471f..7d5f256b4 100644 --- a/apps/docs/cookbook/personal-assistant.mdx +++ b/apps/docs/cookbook/personal-assistant.mdx @@ -3,764 +3,862 @@ title: "Personal AI Assistant" description: "Build an AI assistant that remembers user preferences, habits, and context across conversations" --- -Build a personal AI assistant that learns and remembers everything about the user - their preferences, habits, work context, and conversation history. This recipe shows how to create a truly personalized AI experience using Supermemory's memory tools. +Build a personal AI assistant that learns and remembers everything about the user - their preferences, habits, work context, and conversation history. ## What You'll Build A personal AI assistant that: - **Remembers user preferences** (dietary restrictions, work schedule, communication style) -- **Learns from conversations** and improves responses over time -- **Maintains context** across multiple chat sessions +- **Maintains context** across multiple chat sessions - **Provides personalized recommendations** based on user history - **Handles multiple conversation topics** while maintaining context +## Choose Your Implementation + + + + Thoroughly tested, production-ready. Uses FastAPI + Streamlit + OpenAI. + + + Modern React approach. Uses Next.js + Vercel AI SDK + Supermemory tools. + + + ## Prerequisites -- Node.js 18+ or Python 3.8+ -- Supermemory API key -- OpenAI or Anthropic API key -- Basic understanding of chat applications +- **Python 3.8+** or **Node.js 18+** +- **Supermemory API key** ([get one here](https://console.supermemory.ai)) +- **OpenAI API key** ([get one here](https://platform.openai.com/api-keys)) -## Implementation + +Never hardcode API keys in your code. Use environment variables. + + +--- + +## Python Implementation ### Step 1: Project Setup - - - ```bash - npx create-next-app@latest personal-ai --typescript --tailwind --eslint - cd personal-ai - npm install @supermemory/tools ai openai - ``` - - Create your environment variables: - ```bash .env.local - SUPERMEMORY_API_KEY=your_supermemory_key - OPENAI_API_KEY=your_openai_key - ``` - - - - ```bash - mkdir personal-ai && cd personal-ai - python -m venv venv - source venv/bin/activate # On Windows: venv\Scripts\activate - pip install supermemory openai fastapi uvicorn python-multipart - ``` - - Create your environment variables: - ```bash .env - SUPERMEMORY_API_KEY=your_supermemory_key - OPENAI_API_KEY=your_openai_key - ``` - - - -### Step 2: Core Assistant Logic - - - - ```typescript app/api/chat/route.ts - import { streamText } from 'ai' - import { createOpenAI } from '@ai-sdk/openai' - import { supermemoryTools } from '@supermemory/tools/ai-sdk' - - const openai = createOpenAI({ - apiKey: process.env.OPENAI_API_KEY! - }) +```bash +mkdir personal-ai && cd personal-ai +python -m venv venv +source venv/bin/activate # On Windows: venv\Scripts\activate +pip install supermemory openai fastapi uvicorn python-dotenv streamlit requests +``` - export async function POST(request: Request) { - const { messages, userId = 'default-user' } = await request.json() - - const result = await streamText({ - model: openai('gpt-5'), - messages, - tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, { - containerTags: [userId] - }), - system: `You are a highly personalized AI assistant. Your primary goal is to learn about the user and provide increasingly personalized help over time. - - MEMORY MANAGEMENT: - 1. When users share personal information, preferences, or context, immediately use addMemory to store it - 2. Before responding to requests, search your memories for relevant context about the user - 3. Use past conversations to inform current responses - 4. Remember user's communication style, preferences, and frequently discussed topics - - PERSONALITY: - - Adapt your communication style to match the user's preferences - - Reference past conversations naturally when relevant - - Proactively offer help based on learned patterns - - Be genuinely helpful while respecting privacy - - EXAMPLES OF WHAT TO REMEMBER: - - Work schedule and role - - Dietary preferences/restrictions - - Communication preferences (formal/casual) - - Frequent topics of interest - - Goals and projects they're working on - - Family/personal context they share - - Preferred tools and workflows - - Time zone and availability - - Always search memories before responding to provide personalized, contextual help.` - }) - - return result.toAIStreamResponse() - } - ``` - - - - ```python main.py - from fastapi import FastAPI, HTTPException - from fastapi.responses import StreamingResponse - import openai - from supermemory import Supermemory - import json - import os - from typing import List, Dict, Any - import asyncio - - app = FastAPI() - - openai_client = openai.AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")) - supermemory_client = Supermemory(api_key=os.getenv("SUPERMEMORY_API_KEY")) - - SYSTEM_PROMPT = """You are a highly personalized AI assistant. Your primary goal is to learn about the user and provide increasingly personalized help over time. - - MEMORY MANAGEMENT: - 1. When users share personal information, preferences, or context, immediately store it - 2. Before responding to requests, search for relevant context about the user - 3. Use past conversations to inform current responses - 4. Remember user's communication style, preferences, and frequently discussed topics - - PERSONALITY: - - Adapt your communication style to match the user's preferences - - Reference past conversations naturally when relevant - - Proactively offer help based on learned patterns - - Be genuinely helpful while respecting privacy - - Always search memories before responding to provide personalized, contextual help.""" - - async def search_user_memories(query: str, user_id: str) -> str: - """Search user's memories for relevant context""" - try: - results = supermemory_client.search.memories( - q=query, - container_tag=f"user_{user_id}", - limit=5 - ) - - if results.results: - context = "\n".join([r.memory for r in results.results]) - return f"Relevant memories about the user:\n{context}" - return "No relevant memories found." - except Exception as e: - return f"Error searching memories: {e}" - - async def add_user_memory(content: str, user_id: str): - """Add new information to user's memory""" - try: - supermemory_client.memories.add( - content=content, - container_tag=f"user_{user_id}", - metadata={"type": "personal_info", "timestamp": "auto"} - ) - except Exception as e: - print(f"Error adding memory: {e}") - - @app.post("/chat") - async def chat_endpoint(data: dict): - messages = data.get("messages", []) - user_id = data.get("userId", "default-user") - - if not messages: - raise HTTPException(status_code=400, detail="No messages provided") - - # Get user's last message for memory search - user_message = messages[-1]["content"] if messages else "" - - # Search for relevant memories - memory_context = await search_user_memories(user_message, user_id) - - # Add system message with memory context - enhanced_messages = [ - {"role": "system", "content": f"{SYSTEM_PROMPT}\n\n{memory_context}"} - ] + messages - - try: - response = await openai_client.chat.completions.create( - model="gpt-5", - messages=enhanced_messages, - stream=True, - temperature=0.7 - ) - - async def generate(): - full_response = "" +Create a `.env` file: + +```bash +SUPERMEMORY_API_KEY=your_supermemory_key_here +OPENAI_API_KEY=your_openai_key_here +``` + +### Step 2: Backend (FastAPI) + +Create `main.py`. Let's build it step by step: + +#### Import Dependencies + +```python +from fastapi import FastAPI, HTTPException +from fastapi.responses import StreamingResponse +from openai import AsyncOpenAI +from supermemory import Supermemory +import json +import os +import uuid +from dotenv import load_dotenv +``` + +- **FastAPI**: Web framework for building the API endpoint +- **StreamingResponse**: Enables real-time response streaming (words appear as they're generated) +- **AsyncOpenAI**: OpenAI client that supports async/await for non-blocking operations +- **Supermemory**: Client for storing and retrieving long-term memories +- **uuid**: Creates stable, deterministic user IDs from emails + +#### Initialize Application and Clients + +```python +load_dotenv() +app = FastAPI() + +openai_client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")) +supermemory_client = Supermemory(api_key=os.getenv("SUPERMEMORY_API_KEY")) +``` + +`load_dotenv()` loads API keys from your `.env` file into environment variables. We create two clients: +- **OpenAI client**: Handles conversations and generates responses +- **Supermemory client**: Stores and retrieves user-specific memories + +These are separate because you can swap providers independently (e.g., switch from OpenAI to Anthropic without changing memory logic). + +#### Define System Prompt + +```python +SYSTEM_PROMPT = """You are a highly personalized AI assistant. + +MEMORY MANAGEMENT: +1. When users share personal information, store it immediately +2. Search for relevant context before responding +3. Use past conversations to inform current responses + +Always be helpful while respecting privacy.""" +``` + +This prompt guides the assistant's behavior. It tells the AI to: +- Be proactive about learning user preferences +- Always search memory before responding +- Respect privacy boundaries + +The system prompt is injected at the start of every conversation, so the AI consistently follows these rules. + +#### Create Identity Helpers + +```python +def normalize_email(email: str) -> str: + return (email or "").strip().lower() + +def stable_user_id_from_email(email: str) -> str: + norm = normalize_email(email) + if not norm: + raise ValueError("Email is required") + return uuid.uuid5(uuid.NAMESPACE_DNS, norm).hex +``` + +**Why normalize?** `"User@Mail.com"` and `" user@mail.com "` should map to the same person. We trim whitespace and lowercase to ensure consistency. + +**Why UUIDv5?** It's deterministic—same email always produces the same ID. This means: +- User memories persist across sessions +- No raw emails in logs or database tags +- Privacy-preserving yet stable identity + +We use `uuid.NAMESPACE_DNS` as the namespace to ensure uniqueness. + +#### Memory Search Function + +```python +async def search_user_memories(query: str, container_tag: str) -> str: + try: + results = supermemory_client.search.memories( + q=query, + container_tag=container_tag, + limit=5 + ) + if results.results: + context = "\n".join([r.memory for r in results.results]) + return f"Relevant memories:\n{context}" + return "No relevant memories found." + except Exception as e: + return f"Error searching memories: {e}" +``` + +This searches the user's memory store for context relevant to their current message. + +**Parameters:** +- `q`: The search query (usually the user's latest message) +- `container_tag`: Isolates memories per user (e.g., `user_abc123`) +- `limit=5`: Returns top 5 most relevant memories + +**Why search before responding?** The AI can provide personalized answers based on what it knows about the user (e.g., dietary preferences, work context, communication style). + +**Error handling:** If memory search fails, we return a fallback message instead of crashing. The conversation continues even if memory has a hiccup. + +#### Memory Storage Function + +```python +async def add_user_memory(content: str, container_tag: str, email: str = None): + try: + supermemory_client.memories.add( + content=content, + container_tag=container_tag, + metadata={"type": "personal_info", "email": normalize_email(email) if email else None} + ) + except Exception as e: + print(f"Error adding memory: {e}") +``` + +Stores new information about the user. + +**Parameters:** +- `content`: The text to remember +- `container_tag`: User isolation tag +- `metadata`: Additional context (type of info, associated email) + +**Why metadata?** Makes it easier to filter and organize memories later (e.g., "show me all personal_info memories"). + +**Error handling:** We log errors but don't crash. Failing to save one memory shouldn't break the entire conversation. + +#### Main Chat Endpoint + +```python +@app.post("/chat") +async def chat_endpoint(data: dict): + messages = data.get("messages", []) + email = data.get("email") + + if not messages: + raise HTTPException(status_code=400, detail="No messages provided") + if not email: + raise HTTPException(status_code=400, detail="Email required") +``` + +This endpoint receives the chat request. It expects: +- `messages`: Full conversation history `[{role: "user", content: "..."}]` +- `email`: User's email for identity + +**Why require email?** Without it, we can't create a stable user ID, meaning no persistent personalization. + +#### Derive User Identity + +```python + try: + user_id = stable_user_id_from_email(email) + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + container_tag = f"user_{user_id}" +``` + +Convert email → stable user ID → container tag. + +The container tag (`user_abc123`) isolates this user's memories from everyone else's. Each user has their own "memory box." + +#### Search and Inject Memories + +```python + user_message = messages[-1]["content"] + memory_context = await search_user_memories(user_message, container_tag) + + enhanced_messages = [ + {"role": "system", "content": f"{SYSTEM_PROMPT}\n\n{memory_context}"} + ] + messages +``` + +We take the user's latest message, search for relevant memories, then inject them into the system prompt. + +**Example:** +``` +Original: "What should I eat for breakfast?" + +Enhanced system message: +"You are a helpful assistant... [system prompt] + +Relevant memories: +- User is vegetarian +- User works out at 6 AM +- User prefers quick meals" +``` + +Now the AI can answer: "Try overnight oats with plant-based protein—perfect for post-workout!" + +#### Stream OpenAI Response + +```python + try: + response = await openai_client.chat.completions.create( + model="gpt-4o", + messages=enhanced_messages, + temperature=0.7, + stream=True + ) +``` + +**Key parameters:** +- `model="gpt-4o"`: Fast, capable model +- `messages`: Full conversation + memory context +- `temperature=0.7`: Balanced creativity (0=deterministic, 1=creative) +- `stream=True`: Enables word-by-word streaming + +**Why stream?** Users see responses appear in real-time instead of waiting for the complete answer. Much better UX. + +#### Handle Streaming + +```python + async def generate(): + try: async for chunk in response: if chunk.choices[0].delta.content: content = chunk.choices[0].delta.content - full_response += content yield f"data: {json.dumps({'content': content})}\n\n" + except Exception as e: + yield f"data: {json.dumps({'error': str(e)})}\n\n" +``` - # After response is complete, analyze for memory-worthy content - if "remember" in user_message.lower() or any(word in user_message.lower() for word in ["prefer", "like", "dislike", "work", "schedule", "diet"]): - await add_user_memory(user_message, user_id) - - return StreamingResponse(generate(), media_type="text/plain") - - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - if __name__ == "__main__": - import uvicorn - uvicorn.run(app, host="0.0.0.0", port=8000) - ``` - - - -### Step 3: Frontend Interface - - - - ```tsx app/page.tsx - 'use client' - - import { useChat } from 'ai/react' - import { useState, useEffect } from 'react' - - export default function PersonalAssistant() { - const [userId, setUserId] = useState('') - const [userName, setUserName] = useState('') - - const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat({ - api: '/api/chat', - body: { - userId - } - }) - - // Generate or retrieve user ID - useEffect(() => { - const storedUserId = localStorage.getItem('personal-ai-user-id') - const storedUserName = localStorage.getItem('personal-ai-user-name') - - if (storedUserId) { - setUserId(storedUserId) - setUserName(storedUserName || '') - } else { - const newUserId = `user_${Date.now()}_${Math.random().toString(36).substr(2, 9)}` - localStorage.setItem('personal-ai-user-id', newUserId) - setUserId(newUserId) - } - }, []) - - const handleNameSubmit = (e: React.FormEvent) => { - e.preventDefault() - if (userName.trim()) { - localStorage.setItem('personal-ai-user-name', userName) - // Send introduction message - handleSubmit(e, { - data: { - content: `Hi! My name is ${userName}. I'm looking for a personal AI assistant that can learn about me and help me with various tasks.` - } - }) - } - } - - return ( -
- {/* Header */} -
-

Personal AI Assistant

-

- {userName ? `Hello ${userName}!` : 'Your AI that learns and remembers'} -

-
+This async generator: +1. Receives chunks from OpenAI as they're generated +2. Extracts the text content from each chunk +3. Formats it as Server-Sent Events (SSE): `data: {...}\n\n` +4. Yields it to the client - {/* Name Setup */} - {!userName && ( -
-
- setUserName(e.target.value)} - placeholder="What should I call you?" - className="flex-1 p-2 border border-gray-300 rounded focus:outline-none focus:ring-2 focus:ring-blue-500" - /> - -
-
- )} - - {/* Messages */} -
- {messages.length === 0 && userName && ( -
-

- Hi {userName}! I'm your personal AI assistant. I'll learn about your preferences, - work style, and interests as we chat. Feel free to share anything you'd like me to remember! -

-
-

Try saying:

-
    -
  • "I work as a software engineer and prefer concise responses"
  • -
  • "Remember that I'm vegetarian and allergic to nuts"
  • -
  • "I usually work from 9-5 EST and take lunch at noon"
  • -
-
-
- )} - - {messages.map((message) => ( -
-
- {message.role === 'assistant' && ( -
- AI -
- )} -
-

{message.content}

-
-
-
- ))} - - {isLoading && ( -
-
-
- AI -
-
-
-
-
-
-
-
- )} -
+**SSE format** is a web standard for server→client streaming. The frontend can process each chunk as it arrives. - {/* Input */} - {userName && ( -
- - -
- )} -
- ) - } - ``` -
- - - ```python streamlit_app.py - import streamlit as st - import requests - import json - import uuid - - st.set_page_config(page_title="Personal AI Assistant", page_icon="🤖", layout="wide") - - # Initialize session state - if 'messages' not in st.session_state: - st.session_state.messages = [] - if 'user_id' not in st.session_state: - st.session_state.user_id = f"user_{uuid.uuid4().hex[:8]}" - if 'user_name' not in st.session_state: - st.session_state.user_name = None - - # Header - st.title("🤖 Personal AI Assistant") - st.markdown("*Your AI that learns and remembers*") - - # Sidebar for user info - with st.sidebar: - st.header("👤 User Profile") - - if not st.session_state.user_name: - name = st.text_input("What should I call you?") - if st.button("Get Started") and name: +#### Optional Memory Storage + +```python + if "remember this" in user_message.lower(): + await add_user_memory(user_message, container_tag, email=email) +``` + +After streaming completes, check if the user explicitly asked to remember something. If yes, store it. + +**Why opt-in?** Gives users control over what gets remembered. You could also make this automatic based on content analysis. + +#### Return Streaming Response + +```python + return StreamingResponse(generate(), media_type="text/plain") + + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) +``` + +`StreamingResponse` keeps the HTTP connection open and sends chunks as they're generated. The frontend receives them in real-time. + +#### Local Development Server + +```python +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) +``` + +Run with `python main.py` and the server starts on port 8000. `0.0.0.0` means it accepts connections from any IP (useful for testing from other devices). + +### Step 3: Frontend (Streamlit) + +Create `streamlit_app.py`: + + + +```python +import streamlit as st +import requests +import json +import uuid + +st.set_page_config(page_title="Personal AI Assistant", page_icon="🤖", layout="wide") + +def normalize_email(email: str) -> str: + return (email or "").strip().lower() + +def stable_user_id_from_email(email: str) -> str: + return uuid.uuid5(uuid.NAMESPACE_DNS, normalize_email(email)).hex + +# Session state +if 'messages' not in st.session_state: + st.session_state.messages = [] +if 'user_name' not in st.session_state: + st.session_state.user_name = None +if 'email' not in st.session_state: + st.session_state.email = None +if 'user_id' not in st.session_state: + st.session_state.user_id = None + +st.title("🤖 Personal AI Assistant") +st.markdown("*Your AI that learns and remembers*") + +with st.sidebar: + st.header("👤 User Profile") + + if not st.session_state.user_name or not st.session_state.email: + name = st.text_input("What should I call you?") + email = st.text_input("Email", placeholder="you@example.com") + + if st.button("Get Started"): + if name and email: st.session_state.user_name = name + st.session_state.email = normalize_email(email) + st.session_state.user_id = stable_user_id_from_email(st.session_state.email) st.session_state.messages.append({ "role": "user", - "content": f"Hi! My name is {name}. I'm looking for a personal AI assistant." + "content": f"Hi! My name is {name}." }) st.rerun() - else: - st.write(f"**Name:** {st.session_state.user_name}") - st.write(f"**User ID:** {st.session_state.user_id[:12]}...") + else: + st.warning("Please enter both fields.") + else: + st.write(f"**Name:** {st.session_state.user_name}") + st.write(f"**Email:** {st.session_state.email}") + if st.button("Reset Conversation"): + st.session_state.messages = [] + st.rerun() + +if st.session_state.user_name and st.session_state.email: + for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + if prompt := st.chat_input("Message..."): + st.session_state.messages.append({"role": "user", "content": prompt}) + with st.chat_message("user"): + st.markdown(prompt) + + with st.chat_message("assistant"): + try: + response = requests.post( + "http://localhost:8000/chat", + json={ + "messages": st.session_state.messages, + "email": st.session_state.email + }, + stream=True, + timeout=30 + ) + + if response.status_code == 200: + full_response = "" + for line in response.iter_lines(): + if line: + try: + data = json.loads(line.decode('utf-8').replace('data: ', '')) + if 'content' in data: + full_response += data['content'] + except: + continue + + st.markdown(full_response) + st.session_state.messages.append({"role": "assistant", "content": full_response}) + else: + st.error(f"Error: {response.status_code}") + except Exception as e: + st.error(f"Error: {e}") +else: + st.info("Please enter your profile in the sidebar") +``` - if st.button("Reset Conversation"): - st.session_state.messages = [] - st.rerun() + - st.markdown("---") - st.markdown(""" - ### 💡 Try saying: - - "I work as a software engineer and prefer concise responses" - - "Remember that I'm vegetarian" - - "I usually work from 9-5 EST" - """) - - # Main chat interface - if st.session_state.user_name: - # Display messages - for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - - # Chat input - if prompt := st.chat_input("Tell me something about yourself, or ask for help..."): - # Add user message - st.session_state.messages.append({"role": "user", "content": prompt}) - - with st.chat_message("user"): - st.markdown(prompt) - - # Get AI response - with st.chat_message("assistant"): - with st.spinner("Thinking..."): - try: - response = requests.post( - "http://localhost:8000/chat", - json={ - "messages": st.session_state.messages, - "userId": st.session_state.user_id - }, - timeout=30 - ) - - if response.status_code == 200: - # Handle streaming response - full_response = "" - for line in response.iter_lines(): - if line: - try: - data = json.loads(line.decode('utf-8').replace('data: ', '')) - if 'content' in data: - full_response += data['content'] - except: - continue - - st.markdown(full_response) - st.session_state.messages.append({ - "role": "assistant", - "content": full_response - }) - else: - st.error(f"Error: {response.status_code}") - except Exception as e: - st.error(f"Connection error: {e}") +### Step 4: Run It - else: - st.info("👆 Please enter your name in the sidebar to get started!") +Terminal 1 - Start backend: +```bash +python main.py +``` - # Run with: streamlit run streamlit_app.py - ``` - -
+Terminal 2 - Start frontend: +```bash +streamlit run streamlit_app.py +``` -## Testing Your Assistant +Open `http://localhost:8501` in your browser. -### Step 4: Test Memory Formation +--- -Try these conversation flows to test memory capabilities: +## TypeScript Implementation -1. **Personal Preferences**: - ``` - User: "Hi! I'm Sarah, a product manager at a tech startup. I prefer brief, actionable responses and I'm always busy with user research." +### Step 1: Project Setup - Assistant: [Should remember name, role, communication preference] +```bash +npx create-next-app@latest personal-ai --typescript --tailwind --app +cd personal-ai +npm install @supermemory/tools ai @ai-sdk/openai +``` + +Create `.env.local`: + +```bash +SUPERMEMORY_API_KEY=your_supermemory_key_here +OPENAI_API_KEY=your_openai_key_here +``` + +### Step 2: API Route - User: "What's a good way to prioritize features?" +Create `app/api/chat/route.ts`. Let's break it down: - Assistant: [Should reference that you're a PM and prefer brief responses] - ``` +#### Import Dependencies -2. **Dietary & Lifestyle**: - ``` - User: "Remember that I'm vegan and I work out every morning at 6 AM." +```typescript +import { streamText } from 'ai' +import { createOpenAI } from '@ai-sdk/openai' +import { supermemoryTools } from '@supermemory/tools/ai-sdk' +``` - User: "Suggest a quick breakfast for tomorrow." +- **streamText**: Vercel AI SDK function that handles streaming responses and tool calling +- **createOpenAI**: Factory function to create an OpenAI provider +- **supermemoryTools**: Pre-built tools for memory search and storage - Assistant: [Should suggest vegan options that work for pre/post workout] - ``` +#### Initialize OpenAI Provider -3. **Work Context**: - ``` - User: "I'm working on a React project and I prefer TypeScript over JavaScript." +```typescript +const openai = createOpenAI({ + apiKey: process.env.OPENAI_API_KEY! +}) +``` - User: "Help me with state management." +Creates an OpenAI provider configured with your API key. The `!` tells TypeScript "this definitely exists" (because we set it in `.env.local`). - Assistant: [Should suggest TypeScript-specific solutions] - ``` +This provider object will be passed to `streamText` to specify which AI model to use. -### Step 5: Verify Memory Storage +#### Define System Prompt -Check that memories are being stored properly: +```typescript +const SYSTEM_PROMPT = `You are a highly personalized AI assistant. - - - ```typescript scripts/check-memories.ts - import { Supermemory } from '@supermemory/tools' +When users share personal information, remember it using the addMemory tool. +Before responding, search your memories using searchMemories to provide personalized help. +Always be helpful while respecting privacy.` +``` - const client = new Supermemory({ - apiKey: process.env.SUPERMEMORY_API_KEY! - }) +This guides the AI's behavior and tells it: +- **When to use tools**: Search memories before responding, add memories when users share info +- **Personality**: Be helpful and personalized +- **Boundaries**: Respect privacy - async function checkUserMemories(userId: string) { - try { - const memories = await client.memories.list({ - containerTags: [userId], - limit: 20, - sort: 'updatedAt', - order: 'desc' - }) - - console.log(`Found ${memories.memories.length} memories for ${userId}:`) - memories.memories.forEach((memory, i) => { - console.log(`${i + 1}. ${memory.content.substring(0, 100)}...`) - }) - - // Test search - const searchResults = await client.search.memories({ - q: "preferences work", - containerTag: userId, - limit: 5 - }) - - console.log('\nSearch Results:') - searchResults.results.forEach((result, i) => { - console.log(`${i + 1}. (${result.similarity}) ${result.memory.substring(0, 100)}...`) - }) - - } catch (error) { - console.error('Error:', error) - } - } +The AI SDK uses this to decide when to call `searchMemories` and `addMemory` tools automatically. - // Run: npx ts-node scripts/check-memories.ts USER_ID_HERE - checkUserMemories(process.argv[2] || 'default-user') - ``` - - - - ```python check_memories.py - from supermemory import Supermemory - import os - import sys - - client = Supermemory(api_key=os.getenv("SUPERMEMORY_API_KEY")) - - def check_user_memories(user_id): - try: - # List all memories for user - memories = client.memories.list( - container_tags=[user_id], - limit=20, - sort="updatedAt", - order="desc" - ) - - print(f"Found {len(memories.memories)} memories for {user_id}:") - for i, memory in enumerate(memories.memories): - print(f"{i + 1}. {memory.content[:100]}...") - - # Test search - search_results = client.search.memories( - q="preferences work", - container_tag=user_id, - limit=5 - ) - - print('\nSearch Results:') - for i, result in enumerate(search_results.results): - print(f"{i + 1}. ({result.similarity}) {result.memory[:100]}...") - - except Exception as error: - print(f'Error: {error}') - - # Run: python check_memories.py USER_ID_HERE - user_id = sys.argv[1] if len(sys.argv) > 1 else 'default-user' - check_user_memories(user_id) - ``` - - - -## Production Considerations - -### Security & Privacy - -1. **User Isolation**: - ```typescript - // Always use user-specific container tags - const tools = supermemoryTools(apiKey, { - containerTags: [userId] - }) - ``` - -2. **Memory Encryption**: - ```typescript - // For sensitive data, consider client-side encryption - const encryptedContent = encrypt(sensitiveData, userKey) - await client.memories.add({ - content: encryptedContent, - containerTag: userId, - metadata: { encrypted: true } - }) - ``` - -### Performance Optimization - -1. **Memory Search Optimization**: - ```typescript - // Use appropriate thresholds for speed vs accuracy - const quickSearch = await client.search.memories({ - q: userQuery, - containerTag: userId, - threshold: 0.6, // Balanced - rerank: false, // Skip for speed - limit: 3 // Fewer results - }) - ``` - -2. **Caching Strategy**: - ```typescript - // Cache frequently accessed user context - const userContext = await redis.get(`user_context:${userId}`) - if (!userContext) { - const memories = await client.search.memories({ - q: "user preferences work style", - containerTag: userId, - limit: 10 - }) - await redis.setex(`user_context:${userId}`, 300, JSON.stringify(memories)) - } - ``` - -### Monitoring & Analytics +#### Create POST Handler ```typescript -// Track memory formation and retrieval -const analytics = { - memoriesCreated: await redis.incr(`memories_created:${userId}`), - searchesPerformed: await redis.incr(`searches:${userId}`), - conversationLength: messages.length -} +export async function POST(req: Request) { + try { + const { messages, email } = await req.json() +``` -// Log for analysis -console.log('User Interaction:', { - userId, - action: 'chat_response', - memoriesFound: searchResults.results.length, - responseTime: Date.now() - startTime, - ...analytics -}) +Next.js App Router convention: export an async function named after the HTTP method. This handles POST requests to `/api/chat`. + +We extract: +- `messages`: Chat history array `[{role, content}]` +- `email`: User identifier + +#### Validate Input + +```typescript + if (!messages?.length) { + return new Response('No messages provided', { status: 400 }) + } + if (!email) { + return new Response('Email required', { status: 400 }) + } ``` -## Extensions & Customization +**Why validate?** Prevents crashes from malformed requests. We need: +- At least one message to respond to +- An email to isolate user memories -### 1. Add Personality Profiles +Without email, we can't maintain personalization across sessions. + +#### Create Container Tag ```typescript -const personalityProfiles = { - professional: "Respond in a formal, business-appropriate tone", - casual: "Use a friendly, conversational tone with occasional humor", - technical: "Provide detailed technical explanations with examples", - concise: "Keep responses brief and to the point" -} + const containerTag = `user_${email.toLowerCase().trim()}` +``` + +Convert email to a container tag for memory isolation. + +**Simpler than Python**: We skip UUID generation here for simplicity. In production, you might want to hash the email for privacy: + +```typescript +// Optional: More privacy-preserving approach +import crypto from 'crypto' +const containerTag = `user_${crypto.createHash('sha256').update(email).digest('hex').slice(0, 16)}` +``` + +#### Call streamText with Tools + +```typescript + const result = streamText({ + model: openai('gpt-4o'), + messages, + tools: supermemoryTools(process.env.SUPERMEMORY_API_KEY!, { + containerTags: [containerTag] + }), + system: SYSTEM_PROMPT + }) +``` + +This is where the magic happens! Let's break down each parameter: + +**`model: openai('gpt-4o')`** +- Specifies which AI model to use +- The AI SDK handles the API calls + +**`messages`** +- Full conversation history +- Format: `[{role: "user"|"assistant", content: "..."}]` + +**`tools: supermemoryTools(...)`** +- Gives the AI access to memory operations +- The AI SDK automatically: + - Decides when to call tools based on the conversation + - Calls `searchMemories` when it needs context + - Calls `addMemory` when users share information + - Handles tool execution and error handling + +**`containerTags: [containerTag]`** +- Scopes all memory operations to this specific user +- Ensures User A can't access User B's memories + +**`system: SYSTEM_PROMPT`** +- Guides the AI's behavior and tool usage + +**How tools work:** +1. User: "Remember that I'm vegetarian" +2. AI SDK detects this is memory-worthy +3. Automatically calls `addMemory("User is vegetarian")` +4. Stores in Supermemory with the user's container tag +5. Responds: "Got it, I'll remember that!" + +Later: +1. User: "What should I eat?" +2. AI SDK calls `searchMemories("food preferences")` +3. Retrieves: "User is vegetarian" +4. Responds: "How about a delicious veggie stir-fry?" -// Add to system prompt based on user preference -const userProfile = await getUserProfile(userId) -const systemPrompt = `${basePrompt}\n\nCommunication Style: ${personalityProfiles[userProfile.style]}` +**No manual tool handling needed!** The AI SDK manages the entire flow. + +#### Return Streaming Response + +```typescript + return result.toAIStreamResponse() ``` -### 2. Smart Notifications +`toAIStreamResponse()` converts the streaming result into a format the frontend can consume. It: +- Sets appropriate headers for streaming +- Formats data for the `useChat` hook +- Handles errors gracefully + +This returns immediately (doesn't wait for completion), and chunks stream to the client as they're generated. + +#### Error Handling ```typescript -// Proactive suggestions based on user patterns -const shouldSuggest = await analyzeUserPatterns(userId) -if (shouldSuggest.type === 'daily_standup') { - return { - message: "Based on your schedule, would you like me to help prepare for your 9 AM standup?", - suggestedActions: ["Review yesterday's progress", "Prepare today's goals"] + } catch (error: any) { + console.error('Chat error:', error) + return new Response(error.message, { status: 500 }) } } ``` -### 3. Multi-Modal Memory +Catches any errors (API failures, tool errors, etc.) and returns a clean error response. + +**Why log to console?** In production, you'd send this to a monitoring service (Sentry, DataDog, etc.) to track issues. + +--- + +**Key Differences from Python:** + +| Aspect | Python | TypeScript | +|--------|--------|------------| +| **Memory Search** | Manual `search_user_memories()` call | AI SDK calls `searchMemories` tool automatically | +| **Memory Add** | Manual `add_user_memory()` call | AI SDK calls `addMemory` tool automatically | +| **Tool Decision** | You decide when to search/add | AI decides based on conversation context | +| **Streaming** | Manual SSE formatting | `toAIStreamResponse()` handles it | +| **Error Handling** | Try/catch in each function | AI SDK handles tool errors | + +**Python = Manual Control** +You explicitly search and add memories. More control, more code. + +**TypeScript = AI-Driven** +The AI decides when to use tools. Less code, more "magic." + +### Step 3: Chat UI + +Replace `app/page.tsx`: + + ```typescript -// Handle images and documents -if (message.attachments) { - for (const attachment of message.attachments) { - await client.memories.uploadFile({ - file: attachment, - containerTag: userId, - metadata: { - type: 'user_shared', - context: message.content - } - }) +'use client' +import { useChat } from 'ai/react' +import { useState } from 'react' + +export default function ChatPage() { + const [email, setEmail] = useState('') + const [userName, setUserName] = useState('') + const [tempEmail, setTempEmail] = useState('') + const [tempName, setTempName] = useState('') + + const { messages, input, handleInputChange, handleSubmit } = useChat({ + api: '/api/chat', + body: { email } + }) + + if (!email) { + return ( +
+
+

🤖 Personal AI Assistant

+ setTempName(e.target.value)} + className="w-full px-4 py-2 border rounded-lg" + /> + setTempEmail(e.target.value)} + className="w-full px-4 py-2 border rounded-lg" + /> + +
+
+ ) } + + return ( +
+
+ {messages.map((message) => ( +
+

{message.content}

+
+ ))} +
+ +
+ + +
+
+ ) } ``` -## Next Steps +
+ +### Step 4: Run It + +```bash +npm run dev +``` + +Open `http://localhost:3000` + +--- + +## Testing Your Assistant -- **Scale to multiple users**: Add user authentication and proper isolation -- **Add voice interaction**: Integrate with speech-to-text/text-to-speech APIs -- **Mobile app**: Create React Native or Flutter mobile version -- **Integrations**: Connect to calendar, email, task management tools -- **Advanced AI features**: Add emotion detection, conversation summarization +Try these conversations to test memory: + +**Personal Preferences:** +``` +User: "I'm Sarah, a product manager. I prefer brief responses." +[Later] +User: "What's a good way to prioritize features?" +Assistant: [Should reference PM role and brevity preference] +``` + +**Dietary & Lifestyle:** +``` +User: "Remember I'm vegan and work out at 6 AM." +[Later] +User: "Suggest a quick breakfast." +Assistant: [Should suggest vegan options for pre/post workout] +``` + +**Work Context:** +``` +User: "I'm working on a React project with TypeScript." +[Later] +User: "Help me with state management." +Assistant: [Should suggest TypeScript-specific solutions] +``` + +## Verify Memory Storage + +### Python + +Create `check_memories.py`: + +```python +from supermemory import Supermemory +import os +from dotenv import load_dotenv + +load_dotenv() +client = Supermemory(api_key=os.getenv("SUPERMEMORY_API_KEY")) + +# Replace with your user_id from console logs +user_id = "your_user_id_here" +container_tag = f"user_{user_id}" + +memories = client.memories.list( + container_tags=[container_tag], + limit=20, + sort="updatedAt", + order="desc" +) + +print(f"Found {len(memories.memories)} memories:") +for i, memory in enumerate(memories.memories): + full = client.memories.get(id=memory.id) + print(f"\n{i + 1}. {full.content}") +``` + +### TypeScript + +Create `scripts/check-memories.ts`: + +```typescript +const userId = "your_user_id_here" +const containerTag = `user_${userId}` + +const response = await fetch('https://api.supermemory.ai/v3/memories', { + method: 'POST', + headers: { + 'Authorization': `Bearer ${process.env.SUPERMEMORY_API_KEY}`, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + containerTags: [containerTag], + limit: 20, + sort: 'updatedAt', + order: 'desc' + }) +}) + +const data = await response.json() +console.log(`Found ${data.memories?.length || 0} memories`) +``` ## Troubleshooting **Memory not persisting?** -- Check that `x-sm-user-id` header is consistent -- Verify API key has write permissions -- Ensure container tags are properly set +- Verify container tags are consistent +- Check API key has write permissions +- Ensure email is properly normalized **Responses not personalized?** -- Increase search limit to find more relevant memories -- Lower threshold to cast a wider net -- Check that memories are being added with proper context +- Increase search limit to find more memories +- Check that memories are being added +- Verify system prompt guides tool usage **Performance issues?** -- Reduce search limits for faster responses -- Implement caching for frequent searches -- Use appropriate thresholds to balance speed vs accuracy +- Reduce search limits +- Implement caching for frequent queries +- Use appropriate thresholds --- -*This recipe provides the foundation for a personal AI assistant. Customize it based on your specific needs and use cases.* +*Built with Supermemory. Customize based on your needs.* diff --git a/apps/docs/memory-api/track-progress.mdx b/apps/docs/memory-api/track-progress.mdx index 97a7f5bc3..1d02cc0d3 100644 --- a/apps/docs/memory-api/track-progress.mdx +++ b/apps/docs/memory-api/track-progress.mdx @@ -154,6 +154,8 @@ curl -X GET "https://api.supermemory.ai/v3/documents/doc_abc123" \ } ``` +For more comprehensive information on the get documents by ID endpoint, refer to the [API reference.](/api-reference/manage-documents/get-document) + ## Status Values | Status | Description | Typical Duration | diff --git a/apps/docs/search/overview.mdx b/apps/docs/search/overview.mdx index d95b611d3..b6356202f 100644 --- a/apps/docs/search/overview.mdx +++ b/apps/docs/search/overview.mdx @@ -285,6 +285,48 @@ Companies like Composio [Rube.app](https://rube.app) use memories search for let The `/v4/search` endpoint searches through and returns memories. +## Direct Document Retrieval + +If you don't need semantic search and just want to retrieve a specific document you've uploaded by its ID, use the GET document endpoint: + +`GET /v3/documents/{id}` + +This is useful when: +- You know the exact document ID +- You want to retrieve the full document content and metadata +- You need to check processing status or document details + + + +```typescript TypeScript +// Get a specific document by ID +const document = await client.memories.get("doc_abc123"); + +console.log(document.content); // Full document content +console.log(document.status); // Processing status +console.log(document.metadata); // Document metadata +console.log(document.summary); // AI-generated summary +``` + +```python Python +# Get a specific document by ID +document = client.memories.get("doc_abc123") + +print(document.content) # Full document content +print(document.status) # Processing status +``` + +```bash cURL +curl -X GET "https://api.supermemory.ai/v3/documents/{YOUR-DOCUMENT-ID}" \ + -H "Authorization: Bearer $SUPERMEMORY_API_KEY" +``` + + + + +This endpoint returns the complete document with all fields including content, metadata, containerTags, summary, and processing status. For more details, see the [API reference](/api-reference/manage-documents/get-document). + + ## Search Flow Architecture ### Document Search (`/v3/search`) Flow diff --git a/apps/docs/update-delete-memories/overview.mdx b/apps/docs/update-delete-memories/overview.mdx index f708a561f..926e29717 100644 --- a/apps/docs/update-delete-memories/overview.mdx +++ b/apps/docs/update-delete-memories/overview.mdx @@ -151,6 +151,12 @@ curl -X POST "https://api.supermemory.ai/v3/documents" \ The `customId` enables idempotency across all endpoints. The `memoryId` doesn't support idempotency, only the `customId` does. + + +The `customId` can have a maximum length of 100 characters. + + + ## Single Delete Delete individual memories by their ID. This is a permanent hard delete with no recovery mechanism. diff --git a/apps/memory-graph-playground/next-env.d.ts b/apps/memory-graph-playground/next-env.d.ts new file mode 100644 index 000000000..c4b7818fb --- /dev/null +++ b/apps/memory-graph-playground/next-env.d.ts @@ -0,0 +1,6 @@ +/// +/// +import "./.next/dev/types/routes.d.ts"; + +// NOTE: This file should not be edited +// see https://nextjs.org/docs/app/api-reference/config/typescript for more information. diff --git a/apps/web/app/(navigation)/page.tsx b/apps/web/app/(navigation)/page.tsx index 212f33c07..7ad84caf9 100644 --- a/apps/web/app/(navigation)/page.tsx +++ b/apps/web/app/(navigation)/page.tsx @@ -42,7 +42,7 @@ export default function Page() { useEffect(() => { if (user && !onboardingLoading && shouldShowOnboarding()) { - router.push("/onboarding") + router.push("/onboarding?step=input&flow=welcome") } }, [user, shouldShowOnboarding, onboardingLoading, router]) diff --git a/apps/web/app/api/exa/fetch-content/route.ts b/apps/web/app/api/exa/fetch-content/route.ts new file mode 100644 index 000000000..6cdb40d5c --- /dev/null +++ b/apps/web/app/api/exa/fetch-content/route.ts @@ -0,0 +1,61 @@ +export interface ExaContentResult { + url: string + text: string + title: string + author?: string +} + +interface ExaApiResponse { + results: ExaContentResult[] +} + +export async function POST(request: Request) { + try { + const { urls } = await request.json() + + if (!Array.isArray(urls) || urls.length === 0) { + return Response.json( + { error: "Invalid input: urls must be a non-empty array" }, + { status: 400 }, + ) + } + + if (!urls.every((url) => typeof url === "string" && url.trim())) { + return Response.json( + { error: "Invalid input: all urls must be non-empty strings" }, + { status: 400 }, + ) + } + + const response = await fetch("https://api.exa.ai/contents", { + method: "POST", + headers: { + "x-api-key": process.env.EXA_API_KEY ?? "", + "Content-Type": "application/json", + }, + body: JSON.stringify({ + urls, + text: true, + livecrawl: "fallback", + }), + }) + + if (!response.ok) { + console.error( + "Exa API request failed:", + response.status, + response.statusText, + ) + return Response.json( + { error: "Failed to fetch content from Exa API" }, + { status: 500 }, + ) + } + + const data: ExaApiResponse = await response.json() + return Response.json({ results: data.results }) + } catch (error) { + console.error("Exa API request error:", error) + return Response.json({ error: "Internal server error" }, { status: 500 }) + } +} diff --git a/apps/web/app/new/page.tsx b/apps/web/app/new/page.tsx new file mode 100644 index 000000000..7e10a190e --- /dev/null +++ b/apps/web/app/new/page.tsx @@ -0,0 +1,29 @@ +"use client" + +import { Header } from "@/components/new/header" +import { ChatSidebar } from "@/components/chat" +import { AnimatePresence } from "motion/react" +import { MemoriesGrid } from "@/components/new/memories-grid" +import { AnimatedGradientBackground } from "@/app/onboarding/setup/page" + +export default function NewPage() { + return ( +
+
+ +
+
+
+
+ +
+ + + + +
+
+
+
+ ) +} diff --git a/apps/web/app/onboarding/animated-text.tsx b/apps/web/app/onboarding-old/animated-text.tsx similarity index 100% rename from apps/web/app/onboarding/animated-text.tsx rename to apps/web/app/onboarding-old/animated-text.tsx diff --git a/apps/web/app/onboarding/bio-form.tsx b/apps/web/app/onboarding-old/bio-form.tsx similarity index 100% rename from apps/web/app/onboarding/bio-form.tsx rename to apps/web/app/onboarding-old/bio-form.tsx diff --git a/apps/web/app/onboarding/extension-form.tsx b/apps/web/app/onboarding-old/extension-form.tsx similarity index 100% rename from apps/web/app/onboarding/extension-form.tsx rename to apps/web/app/onboarding-old/extension-form.tsx diff --git a/apps/web/app/onboarding/floating-orbs.tsx b/apps/web/app/onboarding-old/floating-orbs.tsx similarity index 100% rename from apps/web/app/onboarding/floating-orbs.tsx rename to apps/web/app/onboarding-old/floating-orbs.tsx diff --git a/apps/web/app/onboarding/intro.tsx b/apps/web/app/onboarding-old/intro.tsx similarity index 100% rename from apps/web/app/onboarding/intro.tsx rename to apps/web/app/onboarding-old/intro.tsx diff --git a/apps/web/app/onboarding/mcp-form.tsx b/apps/web/app/onboarding-old/mcp-form.tsx similarity index 100% rename from apps/web/app/onboarding/mcp-form.tsx rename to apps/web/app/onboarding-old/mcp-form.tsx diff --git a/apps/web/app/onboarding/name-form.tsx b/apps/web/app/onboarding-old/name-form.tsx similarity index 100% rename from apps/web/app/onboarding/name-form.tsx rename to apps/web/app/onboarding-old/name-form.tsx diff --git a/apps/web/app/onboarding/nav-menu.tsx b/apps/web/app/onboarding-old/nav-menu.tsx similarity index 100% rename from apps/web/app/onboarding/nav-menu.tsx rename to apps/web/app/onboarding-old/nav-menu.tsx diff --git a/apps/web/app/onboarding/onboarding-background.tsx b/apps/web/app/onboarding-old/onboarding-background.tsx similarity index 100% rename from apps/web/app/onboarding/onboarding-background.tsx rename to apps/web/app/onboarding-old/onboarding-background.tsx diff --git a/apps/web/app/onboarding/onboarding-context.tsx b/apps/web/app/onboarding-old/onboarding-context.tsx similarity index 100% rename from apps/web/app/onboarding/onboarding-context.tsx rename to apps/web/app/onboarding-old/onboarding-context.tsx diff --git a/apps/web/app/onboarding/onboarding-form.tsx b/apps/web/app/onboarding-old/onboarding-form.tsx similarity index 100% rename from apps/web/app/onboarding/onboarding-form.tsx rename to apps/web/app/onboarding-old/onboarding-form.tsx diff --git a/apps/web/app/onboarding-old/page.tsx b/apps/web/app/onboarding-old/page.tsx new file mode 100644 index 000000000..dcf64ad0c --- /dev/null +++ b/apps/web/app/onboarding-old/page.tsx @@ -0,0 +1,26 @@ +import { getSession } from "@lib/auth" +import { OnboardingForm } from "./onboarding-form" +import { OnboardingProvider } from "./onboarding-context" +import { OnboardingProgressBar } from "./progress-bar" +import { redirect } from "next/navigation" +import { OnboardingBackground } from "./onboarding-background" +import type { Metadata } from "next" +export const metadata: Metadata = { + title: "Welcome to Supermemory", + description: "We're excited to have you on board.", +} + +export default function OnboardingPage() { + const session = getSession() + + if (!session) redirect("/login") + + return ( + + + + + + + ) +} diff --git a/apps/web/app/onboarding/progress-bar.tsx b/apps/web/app/onboarding-old/progress-bar.tsx similarity index 100% rename from apps/web/app/onboarding/progress-bar.tsx rename to apps/web/app/onboarding-old/progress-bar.tsx diff --git a/apps/web/app/onboarding/welcome.tsx b/apps/web/app/onboarding-old/welcome.tsx similarity index 100% rename from apps/web/app/onboarding/welcome.tsx rename to apps/web/app/onboarding-old/welcome.tsx diff --git a/apps/web/app/onboarding/page.tsx b/apps/web/app/onboarding/page.tsx index dcf64ad0c..147bbfb65 100644 --- a/apps/web/app/onboarding/page.tsx +++ b/apps/web/app/onboarding/page.tsx @@ -1,26 +1,254 @@ -import { getSession } from "@lib/auth" -import { OnboardingForm } from "./onboarding-form" -import { OnboardingProvider } from "./onboarding-context" -import { OnboardingProgressBar } from "./progress-bar" -import { redirect } from "next/navigation" -import { OnboardingBackground } from "./onboarding-background" -import type { Metadata } from "next" -export const metadata: Metadata = { - title: "Welcome to Supermemory", - description: "We're excited to have you on board.", +"use client" + +import { useSearchParams } from "next/navigation" +import { motion, AnimatePresence } from "motion/react" +import { useState, useEffect } from "react" +import { useAuth } from "@lib/auth-context" +import { cn } from "@lib/utils" + +import { InputStep } from "./welcome/input-step" +import { GreetingStep } from "./welcome/greeting-step" +import { WelcomeStep } from "./welcome/welcome-step" +import { ContinueStep } from "./welcome/continue-step" +import { FeaturesStep } from "./welcome/features-step" +import { MemoriesStep } from "./welcome/memories-step" +import { RelatableQuestion } from "./setup/relatable-question" +import { IntegrationsStep } from "./setup/integrations-step" + +import { InitialHeader } from "@/components/initial-header" +import { SetupHeader } from "./setup/header" +import { ChatSidebar } from "./setup/chat-sidebar" +import { Logo } from "@ui/assets/Logo" +import NovaOrb from "@/components/nova/nova-orb" +import { AnimatedGradientBackground } from "./setup/page" + +function UserSupermemory({ name }: { name: string }) { + return ( + + +
+

{name}'s

+

+ supermemory +

+
+
+ ) } export default function OnboardingPage() { - const session = getSession() + const searchParams = useSearchParams() + const { user } = useAuth() + + const flow = searchParams.get("flow") as "welcome" | "setup" | null + const step = searchParams.get("step") as string | null + + const [name, setName] = useState(user?.name ?? "") + const [isSubmitting, setIsSubmitting] = useState(false) + const [memoryFormData, setMemoryFormData] = useState<{ + twitter: string + linkedin: string + description: string + otherLinks: string[] + } | null>(null) + + const currentFlow = flow || "welcome" + const currentStep = step || "input" + + useEffect(() => { + if (user?.name) { + setName(user.name) + localStorage.setItem("username", user.name) + } + }, [user?.name]) + + useEffect(() => { + if (currentFlow !== "welcome") return + + const timers: NodeJS.Timeout[] = [] - if (!session) redirect("/login") + switch (currentStep) { + case "greeting": + timers.push( + setTimeout(() => { + // Auto-advance to welcome step + window.history.replaceState( + null, + "", + "/onboarding?flow=welcome&step=welcome", + ) + }, 2000), + ) + break + case "welcome": + timers.push( + setTimeout(() => { + // Auto-advance to username step + window.history.replaceState( + null, + "", + "/onboarding?flow=welcome&step=username", + ) + }, 2000), + ) + break + } + + return () => { + timers.forEach(clearTimeout) + } + }, [currentStep, currentFlow]) + + const handleSubmit = () => { + localStorage.setItem("username", name) + if (name.trim()) { + setIsSubmitting(true) + window.history.replaceState( + null, + "", + "/onboarding?flow=welcome&step=greeting", + ) + setIsSubmitting(false) + } + } + + const renderWelcomeStep = () => { + switch (currentStep) { + case "input": + return ( + + ) + case "greeting": + return + case "welcome": + return + case "username": + return + case "features": + return + case "memories": + return + default: + return null + } + } + + const renderSetupStep = () => { + switch (currentStep) { + case "relatable": + return + case "integrations": + return + default: + return null + } + } + + const isWelcomeFlow = currentFlow === "welcome" + const isSetupFlow = currentFlow === "setup" + + const minimizeNovaOrb = + isWelcomeFlow && ["features", "memories"].includes(currentStep) + const novaSize = currentStep === "memories" ? 150 : 300 + + const showUserSupermemory = isWelcomeFlow && currentStep === "username" return ( - - - - - - +
+ {isWelcomeFlow && ( + + )} + {isSetupFlow && } + + {isSetupFlow && } + + {isWelcomeFlow && ( +
+ + + + + {showUserSupermemory && } + + + {renderWelcomeStep()} + +
+ )} + + {isSetupFlow && ( +
+
+
+
+ + {renderSetupStep()} + +
+ + + + +
+
+
+ )} +
) } diff --git a/apps/web/app/onboarding/setup/chat-sidebar.tsx b/apps/web/app/onboarding/setup/chat-sidebar.tsx new file mode 100644 index 000000000..973d31ae9 --- /dev/null +++ b/apps/web/app/onboarding/setup/chat-sidebar.tsx @@ -0,0 +1,424 @@ +"use client" + +import { useState, useEffect, useCallback, useRef } from "react" +import { motion, AnimatePresence } from "motion/react" +import NovaOrb from "@/components/nova/nova-orb" +import { Button } from "@ui/components/button" +import { PanelRightCloseIcon, SendIcon } from "lucide-react" +import { collectValidUrls } from "@/utils/url-helpers" +import { $fetch } from "@lib/api" +import { cn } from "@lib/utils" +import { dmSansClassName } from "@/utils/fonts" + +interface ChatSidebarProps { + formData: { + twitter: string + linkedin: string + description: string + otherLinks: string[] + } | null +} + +export function ChatSidebar({ formData }: ChatSidebarProps) { + const [message, setMessage] = useState("") + const [isChatOpen, setIsChatOpen] = useState(true) + const [messages, setMessages] = useState< + { + message: string + type?: "formData" | "exa" | "memory" | "waiting" + memories?: { + url: string + title: string + description: string + fullContent: string + }[] + url?: string + title?: string + description?: string + }[] + >([]) + const [isLoading, setIsLoading] = useState(false) + const displayedMemoriesRef = useRef>(new Set()) + + const handleSend = () => { + console.log("Message:", message) + setMessage("") + } + + const handleKeyDown = (e: React.KeyboardEvent) => { + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault() + handleSend() + } + } + + const toggleChat = () => { + setIsChatOpen(!isChatOpen) + } + + const pollForMemories = useCallback( + async (documentIds: string[]) => { + const maxAttempts = 30 // 30 attempts * 3 seconds = 90 seconds max + const pollInterval = 3000 // 3 seconds + + for (let attempt = 0; attempt < maxAttempts; attempt++) { + try { + const response = await $fetch("@get/documents/:id", { + params: { id: documentIds[0] ?? "" }, + disableValidation: true, + }) + + console.log("response", response) + + if (response.data) { + const document = response.data + + if (document.memories && document.memories.length > 0) { + const newMemories: { + url: string + title: string + description: string + fullContent: string + }[] = [] + + document.memories.forEach( + (memory: { memory: string; title?: string }) => { + if (!displayedMemoriesRef.current.has(memory.memory)) { + displayedMemoriesRef.current.add(memory.memory) + newMemories.push({ + url: document.url || "", + title: memory.title || document.title || "Memory", + description: memory.memory || "", + fullContent: memory.memory || "", + }) + } + }, + ) + + if (newMemories.length > 0 && messages.length < 10) { + setMessages((prev) => [ + ...prev, + { + message: newMemories + .map((memory) => memory.description) + .join("\n"), + type: "memory" as const, + memories: newMemories, + }, + ]) + } + } + + if (document.memories && document.memories.length > 0) { + break + } + } + + await new Promise((resolve) => setTimeout(resolve, pollInterval)) + } catch (error) { + console.warn("Error polling for memories:", error) + await new Promise((resolve) => setTimeout(resolve, pollInterval)) + } + } + }, + [messages.length], + ) + + useEffect(() => { + if (!formData) return + + const urls = collectValidUrls(formData.linkedin, formData.otherLinks) + + console.log("urls", urls) + + const processContent = async () => { + setIsLoading(true) + + try { + const documentIds: string[] = [] + + // Step 1: Fetch content from Exa if URLs exist + if (urls.length > 0) { + const response = await fetch("/api/exa/fetch-content", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ urls }), + }) + const { results } = await response.json() + console.log("results", results) + + // Create documents from Exa results + for (const result of results) { + try { + const docResponse = await $fetch("@post/documents", { + body: { + content: result.text || result.description || "", + containerTags: ["sm_project_default"], + metadata: { + sm_source: "consumer", + exa_url: result.url, + exa_title: result.title, + }, + }, + }) + + if (docResponse.data?.id) { + documentIds.push(docResponse.data.id) + } + } catch (error) { + console.warn("Error creating document:", error) + } + } + } + + // Step 2: Create document from description if it exists + if (formData.description?.trim()) { + try { + const descDocResponse = await $fetch("@post/documents", { + body: { + content: formData.description, + containerTags: ["sm_project_default"], + metadata: { + sm_source: "consumer", + description_source: "user_input", + }, + }, + }) + + if (descDocResponse.data?.id) { + documentIds.push(descDocResponse.data.id) + } + } catch (error) { + console.warn("Error creating description document:", error) + } + } + + // Step 3: Poll for memories or show form data + if (documentIds.length > 0) { + await pollForMemories(documentIds) + } else { + // No documents created, show form data or waiting + const formDataMessages = [] + + if (formData.twitter) { + formDataMessages.push({ + message: `Twitter: ${formData.twitter}`, + url: formData.twitter, + title: "Twitter Profile", + description: `Twitter: ${formData.twitter}`, + type: "formData" as const, + }) + } + + if (formData.linkedin) { + formDataMessages.push({ + message: `LinkedIn: ${formData.linkedin}`, + url: formData.linkedin, + title: "LinkedIn Profile", + description: `LinkedIn: ${formData.linkedin}`, + type: "formData" as const, + }) + } + + if (formData.otherLinks.length > 0) { + formData.otherLinks.forEach((link) => { + formDataMessages.push({ + message: `Link: ${link}`, + url: link, + title: "Other Link", + description: `Link: ${link}`, + type: "formData" as const, + }) + }) + } + + const waitingMessage = { + message: "Waiting for your input", + url: "", + title: "", + description: "Waiting for your input", + type: "waiting" as const, + } + + setMessages([...formDataMessages, waitingMessage]) + } + } catch (error) { + console.warn("Error processing content:", error) + + const waitingMessage = { + message: "Waiting for your input", + url: "", + title: "", + description: "Waiting for your input", + type: "waiting" as const, + } + + setMessages([waitingMessage]) + } + setIsLoading(false) + } + + processContent() + }, [formData, pollForMemories]) + + return ( + + {!isChatOpen ? ( + + + + Chat with Nova + + + ) : ( + + + + Close chat + +
+ {messages.map((msg, i) => ( +
+ {msg.type === "waiting" ? ( +
+ + {msg.message} +
+ ) : ( + <> +
+ {i === 0 && ( +
+ )} +
+
+ {msg.type === "memory" && ( +
+ {msg.memories?.map((memory) => ( +
+ {memory.title && ( +

+ {memory.title} +

+ )} + {memory.url && ( + + {memory.url} + + )} + {memory.description && ( +

+ {memory.description} +

+ )} +
+ ))} +
+ )} + + )} +
+ ))} + {messages.length === 0 && !isLoading && !formData && ( +
+ + Waiting for your input +
+ )} + {isLoading && ( +
+ + Fetching your memories... +
+ )} +
+ +
+
{ + e.preventDefault() + if (message.trim()) { + handleSend() + } + }} + > + setMessage(e.target.value)} + onKeyDown={handleKeyDown} + placeholder="Chat with your Supermemory" + className="w-full text-white placeholder:text-white/20 rounded-sm outline-none resize-none text-base leading-relaxed bg-transparent px-2 h-10" + /> +
+ +
+
+
+ + )} + + ) +} diff --git a/apps/web/app/onboarding/setup/header.tsx b/apps/web/app/onboarding/setup/header.tsx new file mode 100644 index 000000000..e452a57bf --- /dev/null +++ b/apps/web/app/onboarding/setup/header.tsx @@ -0,0 +1,43 @@ +import { motion } from "motion/react" +import { Logo } from "@ui/assets/Logo" +import { useAuth } from "@lib/auth-context" +import { useEffect, useState } from "react" +import { Avatar, AvatarFallback, AvatarImage } from "@ui/components/avatar" + +export function SetupHeader() { + const { user } = useAuth() + const [name, setName] = useState("") + + useEffect(() => { + const storedName = + localStorage.getItem("username") || localStorage.getItem("userName") || "" + setName(storedName) + }, []) + + return ( + +
+ + {name && ( +
+

{name}'s

+

+ supermemory +

+
+ )} +
+ {user && ( + + + {user?.name?.charAt(0)} + + )} +
+ ) +} diff --git a/apps/web/app/onboarding/setup/integrations-step.tsx b/apps/web/app/onboarding/setup/integrations-step.tsx new file mode 100644 index 000000000..35faf5af0 --- /dev/null +++ b/apps/web/app/onboarding/setup/integrations-step.tsx @@ -0,0 +1,157 @@ +"use client" + +import { useState } from "react" +import { Button } from "@ui/components/button" +import { MCPDetailView } from "@/components/mcp-detail-view" +import { ChromeExtensionDetailView } from "@/components/chrome-extension-detail-view" +import { useRouter } from "next/navigation" +import { cn } from "@lib/utils" +import { dmSansClassName } from "@/utils/fonts" +import { useOnboardingStorage } from "@hooks/use-onboarding-storage" + +const integrationCards = [ + { + title: "Capture", + description: "Add the Chrome extension for one-click saves", + icon: ( +
+ Chrome +
+ ), + }, + { + title: "Connect to AI", + description: "Set up once and use your memory in Cursor, Claude, etc", + icon: ( +
+ MCP +
+ ), + }, + { + title: "Connect", + description: "Link Notion, Google Drive, or OneDrive to import your docs", + icon: ( +
+ Connectors +
+ ), + }, + { + title: "Import", + description: + "Bring in X/Twitter bookmarks, and turn them into useful memories", + icon: ( +
+ X +
+ ), + }, +] + +export function IntegrationsStep() { + const router = useRouter() + const [selectedCard, setSelectedCard] = useState(null) + const { markOnboardingCompleted } = useOnboardingStorage() + + const handleContinue = () => { + markOnboardingCompleted() + router.push("/new") + } + + if (selectedCard === "Connect to AI") { + return setSelectedCard(null)} /> + } + if (selectedCard === "Capture") { + return setSelectedCard(null)} /> + } + return ( +
+
+

+ Build your personal memory +

+

+ Your supermemory comes alive when you
capture and connect + what's important +

+
+ +
+ {integrationCards.map((card) => { + const isClickable = + card.title === "Connect to AI" || card.title === "Capture" + + if (isClickable) { + return ( + + ) + } + + return ( +
+
+

{card.title}

+

+ {card.description} +

+
+
{card.icon}
+
+ ) + })} +
+ +
+ + +
+
+ ) +} diff --git a/apps/web/app/onboarding/setup/page.tsx b/apps/web/app/onboarding/setup/page.tsx new file mode 100644 index 000000000..7437e49f0 --- /dev/null +++ b/apps/web/app/onboarding/setup/page.tsx @@ -0,0 +1,50 @@ +import { motion } from "motion/react" + +export function AnimatedGradientBackground() { + return ( +
+ + + +
+ ) +} diff --git a/apps/web/app/onboarding/setup/relatable-question.tsx b/apps/web/app/onboarding/setup/relatable-question.tsx new file mode 100644 index 000000000..693822aaf --- /dev/null +++ b/apps/web/app/onboarding/setup/relatable-question.tsx @@ -0,0 +1,122 @@ +"use client" + +import { useState } from "react" +import { motion } from "motion/react" +import { Button } from "@ui/components/button" +import { useRouter } from "next/navigation" +import { cn } from "@lib/utils" +import { dmSansClassName } from "@/utils/fonts" + +const relatableOptions = [ + { + emoji: "😔", + text: "I always forget what I save in my twitter bookmarks", + }, + { + emoji: "😭", + text: "Going through e-books manually is so tedious", + }, + { + emoji: "🥲", + text: "I always have to feed every AI app with my data", + }, + { + emoji: "😵‍💫", + text: "Referring meeting notes makes my AI chat hallucinate", + }, + { + emoji: "🫤", + text: "I save nothing on my browser, it's just useless", + }, +] + +export function RelatableQuestion() { + const router = useRouter() + const [selectedOptions, setSelectedOptions] = useState([]) + + const handleContinueOrSkip = () => { + router.push("/onboarding?flow=setup&step=integrations") + } + + return ( + + + Which of these sound most relatable? + + +
+ {relatableOptions.map((option, index) => ( + + ))} +
+
+
+ +
+
+
+ ) +} diff --git a/apps/web/app/onboarding/welcome/continue-step.tsx b/apps/web/app/onboarding/welcome/continue-step.tsx new file mode 100644 index 000000000..b4a4ef6c8 --- /dev/null +++ b/apps/web/app/onboarding/welcome/continue-step.tsx @@ -0,0 +1,41 @@ +import { dmSansClassName } from "@/utils/fonts" +import { cn } from "@lib/utils" +import { Button } from "@ui/components/button" +import { motion } from "motion/react" +import { useRouter } from "next/navigation" + +export function ContinueStep() { + const router = useRouter() + + const handleContinue = () => { + router.push("/onboarding?flow=welcome&step=features") + } + + return ( + +

+ I'm built with Supermemory's super fast memory API, +
so you never have to worry about forgetting
what matters + across your AI apps. +

+ +
+ ) +} diff --git a/apps/web/app/onboarding/welcome/features-step.tsx b/apps/web/app/onboarding/welcome/features-step.tsx new file mode 100644 index 000000000..9a8546722 --- /dev/null +++ b/apps/web/app/onboarding/welcome/features-step.tsx @@ -0,0 +1,99 @@ +import { motion } from "motion/react" +import { Button } from "@ui/components/button" +import { useRouter } from "next/navigation" +import { cn } from "@lib/utils" +import { dmSansClassName } from "@/utils/fonts" + +export function FeaturesStep() { + const router = useRouter() + + const handleContinue = () => { + router.push("/onboarding?flow=welcome&step=memories") + } + return ( + +

+ What I can do for you +

+ +
+
+
+ Brain icon +
+
+

+ Remember every context +

+

+ I keep track of what you've saved and shared with your supermemory. +

+
+
+ +
+
+ Search icon +
+
+

+ Find when you need it +

+

+ I surface the right memories inside
your supermemory, superfast. +

+
+
+ +
+
+ Growth icon +
+
+

+ Grow with your supermemory +

+

+ I learn and personalize over time, so every interaction feels + natural. +

+
+
+
+ + + + +
+ ) +} diff --git a/apps/web/app/onboarding/welcome/greeting-step.tsx b/apps/web/app/onboarding/welcome/greeting-step.tsx new file mode 100644 index 000000000..343d02184 --- /dev/null +++ b/apps/web/app/onboarding/welcome/greeting-step.tsx @@ -0,0 +1,22 @@ +import { motion } from "motion/react" + +interface GreetingStepProps { + name: string +} + +export function GreetingStep({ name }: GreetingStepProps) { + return ( + +

+ Hi {name}, I'm Nova +

+
+ ) +} diff --git a/apps/web/app/onboarding/welcome/input-step.tsx b/apps/web/app/onboarding/welcome/input-step.tsx new file mode 100644 index 000000000..cac747bfb --- /dev/null +++ b/apps/web/app/onboarding/welcome/input-step.tsx @@ -0,0 +1,93 @@ +import { motion } from "motion/react" +import { LabeledInput } from "@ui/input/labeled-input" +import { Button } from "@ui/components/button" + +interface InputStepProps { + name: string + setName: (name: string) => void + handleSubmit: () => void + isSubmitting: boolean +} + +export function InputStep({ + name, + setName, + handleSubmit, + isSubmitting, +}: InputStepProps) { + return ( + +

+ What should I call you? +

+
+ { + if (e.key === "Enter") { + handleSubmit() + } + }, + className: "!text-white placeholder:!text-[#525966] !h-[40px] pl-4", + }} + onChange={(e) => setName((e.target as HTMLInputElement).value)} + style={{ + background: + "linear-gradient(0deg, rgba(91, 126, 245, 0.04) 0%, rgba(91, 126, 245, 0.04) 100%)", + }} + /> + +
+
+ ) +} diff --git a/apps/web/app/onboarding/welcome/memories-step.tsx b/apps/web/app/onboarding/welcome/memories-step.tsx new file mode 100644 index 000000000..260b61d2f --- /dev/null +++ b/apps/web/app/onboarding/welcome/memories-step.tsx @@ -0,0 +1,293 @@ +import { motion } from "motion/react" +import { Button } from "@ui/components/button" +import { useState } from "react" +import { useRouter } from "next/navigation" +import { cn } from "@lib/utils" +import { dmSansClassName } from "@/utils/fonts" + +interface MemoriesStepProps { + onSubmit: (data: { + twitter: string + linkedin: string + description: string + otherLinks: string[] + }) => void +} + +type ValidationError = { + twitter: string | null + linkedin: string | null +} + +export function MemoriesStep({ onSubmit }: MemoriesStepProps) { + const router = useRouter() + const [otherLinks, setOtherLinks] = useState([""]) + const [twitterHandle, setTwitterHandle] = useState("") + const [linkedinProfile, setLinkedinProfile] = useState("") + const [description, setDescription] = useState("") + const [isSubmitting] = useState(false) + const [errors, setErrors] = useState({ + twitter: null, + linkedin: null, + }) + + const addOtherLink = () => { + if (otherLinks.length < 3) { + setOtherLinks([...otherLinks, ""]) + } + } + + const updateOtherLink = (index: number, value: string) => { + const updated = [...otherLinks] + updated[index] = value + setOtherLinks(updated) + } + + const validateTwitterLink = (value: string): string | null => { + if (!value.trim()) return null + + const normalized = value.trim().toLowerCase() + const isXDomain = + normalized.includes("x.com") || normalized.includes("twitter.com") + + if (!isXDomain) { + return "share your X profile link" + } + + // Check if it's a profile link (not a status/tweet link) + const profilePattern = + /^(https?:\/\/)?(www\.)?(x\.com|twitter\.com)\/[^\/]+$/ + const statusPattern = /\/status\//i + + if (statusPattern.test(normalized) || !profilePattern.test(normalized)) { + return "share your X profile link" + } + + // Note: 404 validation would require a backend API endpoint + // Format validation is handled above + return null + } + + const validateLinkedInLink = (value: string): string | null => { + if (!value.trim()) return null + + const normalized = value.trim().toLowerCase() + const isLinkedInDomain = normalized.includes("linkedin.com") + + if (!isLinkedInDomain) { + return "share your Linkedin profile link" + } + + // Check if it's a profile link (should have /in/ or /pub/) + const profilePattern = + /^(https?:\/\/)?(www\.)?linkedin\.com\/(in|pub)\/[^\/]+/ + + if (!profilePattern.test(normalized)) { + return "share your Linkedin profile link" + } + + // Note: 404 validation would require a backend API endpoint + // Format validation is handled above + return null + } + + const handleTwitterChange = (value: string) => { + setTwitterHandle(value) + const error = validateTwitterLink(value) + setErrors((prev) => ({ ...prev, twitter: error })) + } + + const handleLinkedInChange = (value: string) => { + setLinkedinProfile(value) + const error = validateLinkedInLink(value) + setErrors((prev) => ({ ...prev, linkedin: error })) + } + + return ( + +

+ Let's add your memories +

+ +
+
+ +
+ handleTwitterChange(e.target.value)} + onBlur={() => { + if (twitterHandle.trim()) { + const error = validateTwitterLink(twitterHandle) + setErrors((prev) => ({ ...prev, twitter: error })) + } + }} + className={`w-full px-4 py-2 bg-[#070E1B] border rounded-xl text-white placeholder-onboarding focus:outline-none transition-colors h-[40px] ${ + errors.twitter + ? "border-[#52596633] bg-[#290F0A]" + : "border-[#525966]/20" + }`} + /> + {errors.twitter && ( +
+
+
+

+ {errors.twitter} +

+
+
+ )} +
+
+ +
+ +
+ handleLinkedInChange(e.target.value)} + onBlur={() => { + if (linkedinProfile.trim()) { + const error = validateLinkedInLink(linkedinProfile) + setErrors((prev) => ({ ...prev, linkedin: error })) + } + }} + className={`w-full px-4 py-2 bg-[#070E1B] border rounded-xl text-white placeholder-onboarding focus:outline-none transition-colors h-[40px] ${ + errors.linkedin + ? "border-[#52596633] bg-[#290F0A]" + : "border-[#525966]/20" + }`} + /> + {errors.linkedin && ( +
+
+
+

+ {errors.linkedin} +

+
+
+ )} +
+
+ + + +
+ +