diff --git a/README.md b/README.md index 6325fdb3..e36a789d 100644 --- a/README.md +++ b/README.md @@ -187,6 +187,13 @@ Or with Docker: docker compose up --build -d ``` +**Memory tuning:** The Node.js server uses a default heap of 128MB. For larger memory stores or heavy usage, increase the heap by setting `NODE_OPTIONS` in the Dockerfile's production stage or via environment: + +```bash +# In docker-compose.yml environment section: +- NODE_OPTIONS=--max-old-space-size=256 +``` + The backend exposes: - `/api/memory/*` – memory operations diff --git a/dashboard/.env.local.example b/dashboard/.env.local.example new file mode 100644 index 00000000..93f9dd06 --- /dev/null +++ b/dashboard/.env.local.example @@ -0,0 +1,4 @@ +# OpenMemory Dashboard Configuration +NEXT_PUBLIC_API_URL=http://localhost:8080 +# Set this if your backend has OM_API_KEY configured for authentication +NEXT_PUBLIC_API_KEY=your \ No newline at end of file diff --git a/dashboard/CHAT_SETUP.md b/dashboard/CHAT_SETUP.md new file mode 100644 index 00000000..5c67bf15 --- /dev/null +++ b/dashboard/CHAT_SETUP.md @@ -0,0 +1,159 @@ +# Chat Interface Setup + +The chat interface is now connected to the OpenMemory backend and can query memories in real-time. + +## Features + +✅ **Memory Querying**: Searches your memory database for relevant content +✅ **Salience-based Results**: Shows top memories ranked by relevance +✅ **Memory Reinforcement**: Click the + button to boost memory importance +✅ **Real-time Updates**: Live connection to backend API +✅ **Action Buttons**: Quick actions after assistant responses + +## Setup Instructions + +### 1. Start the Backend + +First, make sure the OpenMemory backend is running: + +```bash +cd backend +npm install +npm run dev +``` + +The backend will start on `http://localhost:8080` + +### 2. Configure Environment (Optional) + +The dashboard is pre-configured to connect to `localhost:8080`. If your backend runs on a different port, create a `.env.local` file: + +```bash +# dashboard/.env.local +NEXT_PUBLIC_API_URL=http://localhost:8080 +``` + +### 3. Start the Dashboard + +```bash +cd dashboard +npm install +npm run dev +``` + +The dashboard will start on `http://localhost:3000` + +### 4. Add Some Memories + +Before chatting, you need to add some memories to your database. You can do this via: + +**Option A: API (Recommended for Testing)** + +```bash +curl -X POST http://localhost:8080/memory/add \ + -H "Content-Type: application/json" \ + -d '{ + "content": "JavaScript async/await makes asynchronous code more readable", + "tags": ["javascript", "async"], + "metadata": {"source": "learning"} + }' +``` + +**Option B: Use the SDK** + +```javascript +// examples/js-sdk/basic-usage.js +import OpenMemory from '../../sdk-js/src/index.js'; + +const om = new OpenMemory('http://localhost:8080'); + +await om.addMemory({ + content: 'React hooks revolutionized state management', + tags: ['react', 'hooks'], +}); +``` + +**Option C: Ingest a Document** + +```bash +curl -X POST http://localhost:8080/memory/ingest \ + -H "Content-Type: application/json" \ + -d '{ + "content_type": "text", + "data": "Your document content here...", + "metadata": {"source": "document"} + }' +``` + +## How It Works + +### Memory Query Flow + +1. **User Input**: You ask a question in the chat +2. **Backend Query**: POST to `/memory/query` with your question +3. **Vector Search**: Backend searches HSG memory graph +4. **Results**: Top 5 memories returned with salience scores +5. **Response**: Chat generates answer based on retrieved memories + +### Memory Reinforcement + +Clicking the **+** button on a memory card: + +- Sends POST to `/memory/reinforce` +- Increases memory salience by 0.1 +- Makes it more likely to appear in future queries + +## Current Features + +✅ Real-time memory querying +✅ Salience-based ranking +✅ Memory reinforcement (boost) +✅ Sector classification display +✅ Error handling with backend status + +## Coming Soon + +- 🚧 LLM Integration (OpenAI, Ollama, Gemini) +- 🚧 Conversation memory persistence +- 🚧 Export chat to memories +- 🚧 WebSocket streaming responses +- 🚧 Quiz generation from memories +- 🚧 Podcast script generation + +## Troubleshooting + +### "Failed to query memories" + +- Ensure backend is running: `npm run dev` in `backend/` +- Check backend is on port 8080: `curl http://localhost:8080/health` +- Verify CORS is enabled (already configured) + +### "No memories found" + +- Add memories using the API or SDK (see setup above) +- Try broader search terms +- Check memory content exists: `GET http://localhost:8080/memory/all` + +### Connection refused + +- Backend not started +- Wrong port in `.env.local` +- Firewall blocking connection + +## API Endpoints Used + +```typescript +POST /memory/query // Search memories +POST /memory/add // Add new memory +POST /memory/reinforce // Boost memory salience +GET /memory/all // List all memories +GET /memory/:id // Get specific memory +``` + +## Next Steps + +1. Add LLM integration for intelligent responses +2. Implement conversation memory storage +3. Add streaming response support +4. Create memory export feature +5. Build quiz/podcast generators diff --git a/dashboard/README.md b/dashboard/README.md new file mode 100644 index 00000000..e215bc4c --- /dev/null +++ b/dashboard/README.md @@ -0,0 +1,36 @@ +This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app). + +## Getting Started + +First, run the development server: + +```bash +npm run dev +# or +yarn dev +# or +pnpm dev +# or +bun dev +``` + +Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. + +You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. + +This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel. + +## Learn More + +To learn more about Next.js, take a look at the following resources: + +- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. +- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. + +You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome! + +## Deploy on Vercel + +The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. + +Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details. diff --git a/dashboard/app/api/settings/route.ts b/dashboard/app/api/settings/route.ts new file mode 100644 index 00000000..d35b1caf --- /dev/null +++ b/dashboard/app/api/settings/route.ts @@ -0,0 +1,109 @@ +import { NextResponse } from 'next/server' +import fs from 'fs' +import path from 'path' + +const ENV_PATH = path.resolve(process.cwd(), '../.env') + +function parseEnvFile(content: string): Record { + const result: Record = {} + const lines = content.split('\n') + + for (const line of lines) { + const trimmed = line.trim() + if (!trimmed || trimmed.startsWith('#')) continue + + const equalIndex = trimmed.indexOf('=') + if (equalIndex === -1) continue + + const key = trimmed.substring(0, equalIndex).trim() + const value = trimmed.substring(equalIndex + 1).trim() + result[key] = value + } + + return result +} + +function serializeEnvFile(updates: Record): string { + const lines: string[] = [] + + for (const [key, value] of Object.entries(updates)) { + lines.push(`${key}=${value}`) + } + + return lines.join('\n') +} + +export async function GET() { + try { + if (!fs.existsSync(ENV_PATH)) { + return NextResponse.json({ + exists: false, + settings: {} + }) + } + + const content = fs.readFileSync(ENV_PATH, 'utf-8') + const settings = parseEnvFile(content) + + const masked = { ...settings } + if (masked.OPENAI_API_KEY) masked.OPENAI_API_KEY = '***' + if (masked.GEMINI_API_KEY) masked.GEMINI_API_KEY = '***' + if (masked.AWS_SECRET_ACCESS_KEY) masked.AWS_SECRET_ACCESS_KEY = "***" + if (masked.OM_API_KEY) masked.OM_API_KEY = '***' + + return NextResponse.json({ + exists: true, + settings: masked + }) + } catch (e: any) { + console.error('[Settings API] read error:', e) + return NextResponse.json( + { error: 'internal', message: e.message }, + { status: 500 } + ) + } +} + +export async function POST(request: Request) { + try { + const updates = await request.json() + + if (!updates || typeof updates !== 'object') { + return NextResponse.json( + { error: 'invalid_body' }, + { status: 400 } + ) + } + + let content = '' + let envExists = false + + if (fs.existsSync(ENV_PATH)) { + content = fs.readFileSync(ENV_PATH, 'utf-8') + envExists = true + } else { + const examplePath = path.resolve(process.cwd(), '../.env.example') + if (fs.existsSync(examplePath)) { + content = fs.readFileSync(examplePath, 'utf-8') + } + } + + const existing = content ? parseEnvFile(content) : {} + const merged = { ...existing, ...updates } + const newContent = serializeEnvFile(merged) + + fs.writeFileSync(ENV_PATH, newContent, 'utf-8') + + return NextResponse.json({ + ok: true, + created: !envExists, + message: 'Settings saved. Restart the backend to apply changes.' + }) + } catch (e: any) { + console.error('[Settings API] write error:', e) + return NextResponse.json( + { error: 'internal', message: e.message }, + { status: 500 } + ) + } +} diff --git a/dashboard/app/chat/page.tsx b/dashboard/app/chat/page.tsx new file mode 100644 index 00000000..c26d7668 --- /dev/null +++ b/dashboard/app/chat/page.tsx @@ -0,0 +1,277 @@ +"use client" + +import { useState, useEffect, useRef } from "react" +import { API_BASE_URL, getHeaders } from "@/lib/api" +import { MemoryAIEngine, type MemoryReference as AIMemoryRef } from "@/lib/memory-ai-engine" + +interface ChatMessage { + role: "user" | "assistant" + content: string + timestamp: number +} + +interface MemoryReference { + id: string + sector: "semantic" | "episodic" | "procedural" | "emotional" | "reflective" + content: string + salience: number + title: string + last_seen_at?: number +} + +export default function ChatPage() { + const [messages, setMessages] = useState([]) + const [input, setInput] = useState("") + const [busy, setBusy] = useState(false) + const [connecting, setConnecting] = useState(false) + const [awaitingAnswer, setAwaitingAnswer] = useState(false) + const [memories, setMemories] = useState([]) + const scrollRef = useRef(null) + + useEffect(() => { + setTimeout(() => scrollRef.current?.scrollIntoView({ behavior: "smooth", block: "end" }), 0) + }, [messages.length]) + + const queryMemories = async (query: string): Promise => { + try { + const response = await fetch(`${API_BASE_URL}/memory/query`, { + method: "POST", + headers: getHeaders(), + body: JSON.stringify({ + query, + k: 10, + filters: {} + }) + }) + + if (!response.ok) { + throw new Error("Failed to query memories") + } + + const data = await response.json() + const raw: MemoryReference[] = data.matches.map((match: any) => ({ + id: match.id, + sector: match.primary_sector || "semantic", + content: match.content, + salience: match.salience || match.score || 0, + title: match.content.substring(0, 50) + (match.content.length > 50 ? "..." : ""), + last_seen_at: match.last_seen_at + })) + return raw + } catch (error) { + console.error("Error querying memories:", error) + return [] + } + } + + const generateResponse = async (userQuery: string, relevantMemories: MemoryReference[]): Promise => { + const aiMemories: AIMemoryRef[] = relevantMemories.map(m => ({ + id: m.id, + sector: m.sector, + content: m.content, + salience: m.salience, + title: m.title, + last_seen_at: m.last_seen_at, + score: (m as any).score + })) + + return await MemoryAIEngine.generateResponse(userQuery, aiMemories) + } + + const sendMessage = async () => { + if (!input.trim() || busy) return + + const userMessage: ChatMessage = { + role: "user", + content: input, + timestamp: Date.now() + } + + setMessages(prev => [...prev, userMessage]) + const currentInput = input + setInput("") + setAwaitingAnswer(true) + setBusy(true) + + try { + const relevantMemories = await queryMemories(currentInput) + setMemories(relevantMemories) + const responseContent = await generateResponse(currentInput, relevantMemories) + + const assistantMessage: ChatMessage = { + role: "assistant", + content: responseContent, + timestamp: Date.now() + } + setMessages(prev => [...prev, assistantMessage]) + } catch (error) { + console.error("Error processing message:", error) + const errorMessage: ChatMessage = { + role: "assistant", + content: "I encountered an error while processing your message. Please make sure the OpenMemory backend is running on port 8080.", + timestamp: Date.now() + } + setMessages(prev => [...prev, errorMessage]) + } finally { + setAwaitingAnswer(false) + setBusy(false) + } + } + + const addMemoryToBag = async (memory: MemoryReference) => { + try { + await fetch(`${API_BASE_URL}/memory/reinforce`, { + method: "POST", + headers: getHeaders(), + body: JSON.stringify({ + id: memory.id, + boost: 0.1 + }) + }) + console.log("Memory reinforced:", memory.id) + } catch (error) { + console.error("Error reinforcing memory:", error) + } + } + + return ( +
+
+ { } +
+
+
+ {messages.map((m, i) => { + if (m.role === "assistant") { + return ( +
+
+
+ {m.content} +
+
+
+ ) + } + return ( +
+
+
{m.content}
+
+
+ ) + })} + {(connecting || awaitingAnswer) && ( +
+
+
+
+
+
+
+
+ {connecting ? "Connecting…" : "Thinking…"} +
+
+
+ )} +
+
+
+ + { } +
+
+
+ setInput(e.target.value)} + onKeyDown={e => { + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault() + sendMessage() + } + }} + placeholder="Ask about your memories..." + className="flex-1 bg-stone-950 border border-stone-900 rounded-2xl px-4 py-3 text-sm text-stone-200 placeholder:text-stone-500 focus:outline-none focus:border-stone-800" + disabled={busy} + /> + +
+
+
+
+ + { } +
+
+
+
+

Memories Used

+ {memories.length} +
+
+ +
+ {memories.length === 0 ? ( +
+ No memories referenced yet +
+ ) : ( + memories.map((memory) => ( +
+
+
+
+
+ + {memory.sector} + +
+ + + + {(memory.salience * 100).toFixed(0)}% +
+
+

+ {memory.title} +

+

+ {memory.content} +

+
+ +
+
+
+ )) + )} +
+
+
+
+
+ ) +} diff --git a/dashboard/app/decay/page.tsx b/dashboard/app/decay/page.tsx new file mode 100644 index 00000000..47619c8c --- /dev/null +++ b/dashboard/app/decay/page.tsx @@ -0,0 +1,375 @@ +"use client" + +import { useState, useEffect, useRef } from "react" +import { Chart, registerables } from "chart.js" +import { API_BASE_URL, getHeaders } from "@/lib/api" + +Chart.register(...registerables) + +interface decaystats { + sector: string + avgdecay: number + atriskrisk: number + stable: number + avgSalience: number + memories: memory[] +} + +interface memory { + id: string + content: string + sector: string + salience: number +} + +export default function decay() { + const chartref = useRef(null) + const chartInstance = useRef(null) + const [stats, setstats] = useState([]) + const [riskmems, setriskmems] = useState([]) + const [loading, setloading] = useState(true) + const [error, seterror] = useState(null) + const [dashstats, setdashstats] = useState(null) + + useEffect(() => { + fetchdata() + const interval = setInterval(fetchdata, 60000) + return () => clearInterval(interval) + }, []) + + async function fetchdata() { + setloading(true) + seterror(null) + try { + const [statsres, memsres] = await Promise.all([ + fetch(`${API_BASE_URL}/dashboard/stats`, { headers: getHeaders() }), + fetch(`${API_BASE_URL}/memory/all?l=100&u=0`, { headers: getHeaders() }) + ]) + + if (!statsres.ok || !memsres.ok) throw new Error('failed to fetch data') + + const statsdata = await statsres.json() + const memsdata = await memsres.json() + + setdashstats(statsdata) + + const sectors = ['semantic', 'episodic', 'procedural', 'emotional', 'reflective'] + const sectorstats = sectors.map(sector => { + const sectormems = (memsdata.items || []).filter((m: any) => m.primary_sector === sector) + const atrisk = sectormems.filter((m: any) => m.salience < 0.3).length + const stable = sectormems.filter((m: any) => m.salience >= 0.3).length + const avgdecay = sectormems.length > 0 + ? sectormems.reduce((sum: number, m: any) => sum + (m.decay_lambda || 0.01), 0) / sectormems.length + : 0.01 + const avgSalience = sectormems.length > 0 + ? sectormems.reduce((sum: number, m: any) => sum + m.salience, 0) / sectormems.length + : 1.0 + + return { + sector, + avgdecay, + atriskrisk: atrisk, + stable, + avgSalience, + memories: sectormems.map((m: any) => ({ + id: m.id, + content: m.content, + sector: m.primary_sector, + salience: m.salience + })) + } + }) + + setstats(sectorstats) + + const atriskmems = (memsdata.items || []) + .filter((m: any) => m.salience < 0.3) + .slice(0, 10) + .map((m: any) => ({ + id: m.id, + content: m.content, + sector: m.primary_sector, + salience: m.salience + })) + + setriskmems(atriskmems) + + } catch (e: any) { + seterror(e.message) + } finally { + setloading(false) + } + } + + async function boostmemory(id: string) { + try { + const res = await fetch(`${API_BASE_URL}/memory/${id}`, { + method: 'PATCH', + headers: getHeaders(), + body: JSON.stringify({ salience: 0.8 }) + }) + if (!res.ok) throw new Error('failed to boost memory') + fetchdata() + } catch (e: any) { + alert(`Error: ${e.message}`) + } + } + + useEffect(() => { + if (!chartref.current || stats.length === 0) return + + const ctx = chartref.current.getContext("2d") + if (!ctx) return + + if (chartInstance.current) { + chartInstance.current.destroy() + } + + chartInstance.current = new Chart(ctx, { + type: "line", + data: { + labels: Array.from({ length: 30 }, (_, i) => `day ${i + 1}`), + datasets: [ + { + label: "semantic", + data: Array.from({ length: 30 }, (_, i) => { + const sectorData = stats.find(s => s.sector === 'semantic') + const avgSalience = sectorData?.avgSalience || 1.0 + const avgDecay = sectorData?.avgdecay || 0.012 + return Math.max(0, avgSalience - (i * avgDecay)) + }), + borderColor: "rgba(96, 165, 250, 1)", + backgroundColor: "rgba(96, 165, 250, 0.1)", + tension: 0.4 + }, + { + label: "episodic", + data: Array.from({ length: 30 }, (_, i) => { + const sectorData = stats.find(s => s.sector === 'episodic') + const avgSalience = sectorData?.avgSalience || 1.0 + const avgDecay = sectorData?.avgdecay || 0.018 + return Math.max(0, avgSalience - (i * avgDecay)) + }), + borderColor: "rgba(251, 191, 36, 1)", + backgroundColor: "rgba(251, 191, 36, 0.1)", + tension: 0.4 + }, + { + label: "procedural", + data: Array.from({ length: 30 }, (_, i) => { + const sectorData = stats.find(s => s.sector === 'procedural') + const avgSalience = sectorData?.avgSalience || 1.0 + const avgDecay = sectorData?.avgdecay || 0.008 + return Math.max(0, avgSalience - (i * avgDecay)) + }), + borderColor: "rgba(52, 211, 153, 1)", + backgroundColor: "rgba(52, 211, 153, 0.1)", + tension: 0.4 + }, + { + label: "emotional", + data: Array.from({ length: 30 }, (_, i) => { + const sectorData = stats.find(s => s.sector === 'emotional') + const avgSalience = sectorData?.avgSalience || 1.0 + const avgDecay = sectorData?.avgdecay || 0.022 + return Math.max(0, avgSalience - (i * avgDecay)) + }), + borderColor: "rgba(244, 114, 182, 1)", + backgroundColor: "rgba(244, 114, 182, 0.1)", + tension: 0.4 + }, + { + label: "reflective", + data: Array.from({ length: 30 }, (_, i) => { + const sectorData = stats.find(s => s.sector === 'reflective') + const avgSalience = sectorData?.avgSalience || 1.0 + const avgDecay = sectorData?.avgdecay || 0.015 + return Math.max(0, avgSalience - (i * avgDecay)) + }), + borderColor: "rgba(167, 139, 250, 1)", + backgroundColor: "rgba(167, 139, 250, 0.1)", + tension: 0.4 + } + ] + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: false + }, + tooltip: { + mode: "index", + intersect: false, + backgroundColor: "rgba(0, 0, 0, 0.9)", + borderColor: "rgba(63, 63, 70, 1)", + borderWidth: 1 + } + }, + scales: { + y: { + beginAtZero: false, + min: 0, + max: 1, + grid: { + color: "rgba(39, 39, 42, 0.3)" + }, + ticks: { + color: "#8a8a8a" + } + }, + x: { + grid: { + display: false + }, + ticks: { + color: "#8a8a8a", + maxTicksLimit: 10 + } + } + } + } + }) + + return () => { + if (chartInstance.current) { + chartInstance.current.destroy() + chartInstance.current = null + } + } + }, [stats]) + + const getdecaycolor = (rate: number) => { + if (rate < 0.1) return "text-green-500" + if (rate < 0.2) return "text-yellow-500" + return "text-red-500" + } + + return ( +
+
+

Decay Monitor

+ +
+ + {loading &&
Loading decay data...
} + {error &&
Error: {error}
} + + {!loading && !error && ( +
+ + { } +
+ + Salience Decay Trends + (30-day projection) + +
+
+
+ Semantic +
+
+
+ Episodic +
+
+
+ Procedural +
+
+
+ Emotional +
+
+
+ Reflective +
+
+
+ +
+
+ + { } +
+ {stats.map(s => ( +
+

{s.sector}

+
+
+

Avg Decay Rate

+

+ {(s.avgdecay * 100).toFixed(1)}%/day +

+
+
+
+ At Risk + {s.atriskrisk} +
+
+ Stable + {s.stable} +
+
+
+
+ ))} +
+ + { } +
+ + + + + Memories At Risk + +
+ {riskmems.length === 0 ? ( +
+

No memories at risk! All memories are stable.

+
+ ) : ( + riskmems.map(mem => ( +
+
+
+
+ + {mem.sector} + + Salience: {mem.salience.toFixed(2)} +
+

{mem.content}

+
+ +
+
+ )) + )} +
+
+
+ )} +
+ ) +} + + diff --git a/dashboard/app/globals.css b/dashboard/app/globals.css new file mode 100644 index 00000000..7ce119b0 --- /dev/null +++ b/dashboard/app/globals.css @@ -0,0 +1,104 @@ +@import url('https://fonts.googleapis.com/css2?family=Funnel+Display:wght@300..800&display=swap'); +@import "tailwindcss"; + +body { + font-family: 'Funnel Display', sans-serif; +} + +@keyframes fadeIn { + from { + opacity: 0; + } + to { + opacity: 1; + } +} + +@keyframes slideInRight { + from { + opacity: 0; + transform: translateX(20px); + } + to { + opacity: 1; + transform: translateX(0); + } +} + +@keyframes slideInLeft { + from { + opacity: 0; + transform: translateX(-20px); + } + to { + opacity: 1; + transform: translateX(0); + } +} + +@keyframes slideInUp { + from { + opacity: 0; + transform: translateY(20px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +@keyframes scaleIn { + from { + opacity: 0; + transform: scale(0.95); + } + to { + opacity: 1; + transform: scale(1); + } +} + +.custom-scroll::-webkit-scrollbar { + width: 6px; +} + +.custom-scroll::-webkit-scrollbar-track { + background: transparent; +} + +.custom-scroll::-webkit-scrollbar-thumb { + background: #27272a; + border-radius: 3px; +} + +.custom-scroll::-webkit-scrollbar-thumb:hover { + background: #3f3f46; +} + +/* Global scrollbar styles */ +* { + scrollbar-width: thin; + scrollbar-color: #27272a transparent; +} + +*::-webkit-scrollbar { + width: 8px; + height: 8px; +} + +*::-webkit-scrollbar-track { + background: transparent; +} + +*::-webkit-scrollbar-thumb { + background: #27272a; + border-radius: 4px; +} + +*::-webkit-scrollbar-thumb:hover { + background: #3f3f46; +} + +*::-webkit-scrollbar-corner { + background: transparent; +} diff --git a/dashboard/app/layout.tsx b/dashboard/app/layout.tsx new file mode 100644 index 00000000..25158fa3 --- /dev/null +++ b/dashboard/app/layout.tsx @@ -0,0 +1,44 @@ +import type { Metadata } from "next"; +import { Geist, Geist_Mono } from "next/font/google"; +import "./globals.css"; +import Sidebar from "@/components/sidebar"; +import Navbar from "@/components/navbar"; + +const geistSans = Geist({ + variable: "--font-geist-sans", + subsets: ["latin"], +}); + +const geistMono = Geist_Mono({ + variable: "--font-geist-mono", + subsets: ["latin"], +}); + +export const metadata: Metadata = { + title: "OpenMemory Dashboard", + description: "Memory analytics and monitoring dashboard", + icons: { + icon: '/favicon.ico', + }, +}; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + + + +
+ {children} +
+ + + ); +} diff --git a/dashboard/app/memories/page.tsx b/dashboard/app/memories/page.tsx new file mode 100644 index 00000000..09b96211 --- /dev/null +++ b/dashboard/app/memories/page.tsx @@ -0,0 +1,514 @@ +"use client" + +import { useState, useEffect } from "react" +import { API_BASE_URL, getHeaders } from "@/lib/api" + +interface mem { + id: string + content: string + primary_sector: string + tags: string[] + metadata?: any + created_at: number + updated_at?: number + last_seen_at?: number + salience: number + decay_lambda?: number + version?: number +} + +const sectorColors: Record = { + semantic: "sky", + episodic: "amber", + procedural: "emerald", + emotional: "rose", + reflective: "purple" +} + +export default function memories() { + const [mems, setmems] = useState([]) + const [srch, setsrch] = useState("") + const [filt, setfilt] = useState("all") + const [loading, setloading] = useState(false) + const [error, seterror] = useState(null) + const [page, setpage] = useState(1) + const [showAddModal, setShowAddModal] = useState(false) + const [showEditModal, setShowEditModal] = useState(false) + const [showDeleteModal, setShowDeleteModal] = useState(false) + const [editingMem, setEditingMem] = useState(null) + const [deletingMemId, setDeletingMemId] = useState(null) + const [apiKey, setApiKey] = useState('') + + const limit = 1000 + + useEffect(() => { + const key = process.env.NEXT_PUBLIC_API_KEY + if (key) { + setApiKey(key) + } + }, []) + + useEffect(() => { + if (apiKey) { + fetchMems() + } + }, [page, filt, apiKey]) + + async function fetchMems() { + setloading(true) + seterror(null) + try { + const offset = (page - 1) * limit + const url = filt !== "all" + ? `${API_BASE_URL}/memory/all?l=${limit}&u=${offset}§or=${filt}` + : `${API_BASE_URL}/memory/all?l=${limit}&u=${offset}` + const res = await fetch(url, { headers: getHeaders() }) + if (!res.ok) throw new Error('failed to fetch memories') + const data = await res.json() + setmems(data.items || []) + } catch (e: any) { + seterror(e.message) + } finally { + setloading(false) + } + } + + async function handleSearch() { + if (!srch.trim()) { + fetchMems() + return + } + setloading(true) + seterror(null) + try { + const res = await fetch(`${API_BASE_URL}/memory/query`, { + method: 'POST', + headers: getHeaders(), + body: JSON.stringify({ + query: srch, + k: 1000, + filters: filt !== "all" ? { sector: filt } : undefined, + }), + }) + if (!res.ok) throw new Error('search failed') + const data = await res.json() + setmems( + (data.matches || []).map((m: any) => ({ + id: m.id, + content: m.content, + primary_sector: m.primary_sector, + tags: [], + created_at: m.last_seen_at || Date.now(), + salience: m.salience, + })) + ) + } catch (e: any) { + seterror(e.message) + } finally { + setloading(false) + } + } + + async function handleAddMemory(content: string, sector: string, tags: string) { + try { + const res = await fetch(`${API_BASE_URL}/memory/add`, { + method: 'POST', + headers: getHeaders(), + body: JSON.stringify({ + content, + tags: tags.split(',').map((t) => t.trim()).filter(Boolean), + metadata: { primary_sector: sector }, + }), + }) + if (!res.ok) throw new Error('failed to add memory') + setShowAddModal(false) + fetchMems() + } catch (e: any) { + alert(`Error: ${e.message}`) + } + } + + async function handleEditMemory(id: string, content: string, tags: string) { + try { + const res = await fetch(`${API_BASE_URL}/memory/${id}`, { + method: 'PATCH', + headers: getHeaders(), + body: JSON.stringify({ + content, + tags: tags.split(',').map((t) => t.trim()).filter(Boolean), + }), + }) + if (!res.ok) throw new Error('failed to update memory') + setShowEditModal(false) + setEditingMem(null) + fetchMems() + } catch (e: any) { + alert(`Error: ${e.message}`) + } + } + + async function handleDeleteMemory(id: string) { + try { + const res = await fetch(`${API_BASE_URL}/memory/${id}`, { + method: 'DELETE', + headers: getHeaders(), + }) + if (!res.ok) throw new Error('failed to delete memory') + setShowDeleteModal(false) + setDeletingMemId(null) + fetchMems() + } catch (e: any) { + alert(`Error: ${e.message}`) + } + } + + const filteredMems = mems.filter(m => { + const matchesSearch = !srch || m.content.toLowerCase().includes(srch.toLowerCase()) + return matchesSearch + }) + + const sectorCounts = mems.reduce((acc, m) => { + acc[m.primary_sector] = (acc[m.primary_sector] || 0) + 1 + return acc + }, {} as Record) + + return ( +
+
+

Memory Topology

+ +
+ +
+
+ Filters + + + + + + +
+ +
+ Search +
+ + + setsrch(e.target.value)} + onKeyDown={(e) => e.key === 'Enter' && handleSearch()} + className="w-full bg-stone-950 rounded-xl border border-stone-900 outline-none p-2 pl-10 text-stone-300" + placeholder="Search memories..." + /> +
+
+ +
+
+
+
{sectorCounts.semantic || 0}
+
Semantic
+
+
+
{sectorCounts.episodic || 0}
+
Episodic
+
+
+
{sectorCounts.procedural || 0}
+
Procedural
+
+
+
{sectorCounts.emotional || 0}
+
Emotional
+
+
+
{sectorCounts.reflective || 0}
+
Reflective
+
+
+
+ +
+

Memories ({filteredMems.length})

+ {loading &&
Loading...
} + {error &&
Error: {error}
} + {!loading && !error && ( +
+ {filteredMems.length === 0 ? ( +
+ No memories found.
Try adjusting your filters.
+
+ ) : ( + filteredMems.map((mem) => ( +
+
+
+ +
+
+

{mem.content}

+
+ + {mem.primary_sector} + + + Salience: {(mem.salience * 100).toFixed(0)}% + + + {new Date(mem.created_at).toLocaleDateString()} + + {mem.tags?.map(tag => ( + + {tag} + + ))} +
+
+
+
+ + +
+
+ )) + )} +
+ )} + + {!loading && !error && filteredMems.length >= limit && ( +
+ + Page {page} + +
+ )} +
+
+ + {showAddModal && setShowAddModal(false)} onAdd={handleAddMemory} />} + + {showEditModal && editingMem && ( + { setShowEditModal(false); setEditingMem(null) }} + onEdit={handleEditMemory} + /> + )} + + {showDeleteModal && deletingMemId && ( + { setShowDeleteModal(false); setDeletingMemId(null) }} + onConfirm={() => handleDeleteMemory(deletingMemId)} + /> + )} +
+ ) +} + +function AddMemoryModal({ onClose, onAdd }: { onClose: () => void; onAdd: (content: string, sector: string, tags: string) => void }) { + const [content, setContent] = useState('') + const [sector, setSector] = useState('semantic') + const [tags, setTags] = useState('') + + return ( +
+
+

Add New Memory

+
+
+ +