add nocmem: auto memory recall + ingest via NuoNuo hippocampal network
- nocmem Python service (mem/): FastAPI wrapper around NuoNuo's Hopfield-Hebbian memory, with /recall, /ingest, /store, /stats endpoints - NOC integration: auto recall after user message (injected as system msg), async ingest after LLM response (fire-and-forget) - Recall: cosine pre-filter (threshold 0.35) + Hopfield attention (β=32), top_k=3, KV-cache friendly (appended after user msg, not in system prompt) - Ingest: LLM extraction + paraphrase augmentation, heuristic fallback - Wired into main.rs, life.rs (agent done), http.rs (api chat) - Config: optional `nocmem.endpoint` in config.yaml - Includes benchmarks: LongMemEval (R@5=94.0%), efficiency, noise vs scale - Design doc: doc/nocmem.md
This commit is contained in:
@@ -86,6 +86,14 @@ pub async fn life_loop(
|
||||
// append the agent completion as a new user message
|
||||
messages.push(serde_json::json!({"role": "user", "content": notification}));
|
||||
|
||||
// auto recall from nocmem
|
||||
if let Some(ref nocmem) = config.nocmem {
|
||||
let recalled = crate::nocmem::recall(&nocmem.endpoint, ¬ification).await;
|
||||
if !recalled.is_empty() {
|
||||
messages.push(serde_json::json!({"role": "system", "content": recalled}));
|
||||
}
|
||||
}
|
||||
|
||||
if let BackendConfig::OpenAI { ref endpoint, ref model, ref api_key } = config.backend {
|
||||
let chat_id_tg = ChatId(cid);
|
||||
let sid = format!("agent-{id}");
|
||||
|
||||
Reference in New Issue
Block a user