add nocmem: auto memory recall + ingest via NuoNuo hippocampal network

- nocmem Python service (mem/): FastAPI wrapper around NuoNuo's
  Hopfield-Hebbian memory, with /recall, /ingest, /store, /stats endpoints
- NOC integration: auto recall after user message (injected as system msg),
  async ingest after LLM response (fire-and-forget)
- Recall: cosine pre-filter (threshold 0.35) + Hopfield attention (β=32),
  top_k=3, KV-cache friendly (appended after user msg, not in system prompt)
- Ingest: LLM extraction + paraphrase augmentation, heuristic fallback
- Wired into main.rs, life.rs (agent done), http.rs (api chat)
- Config: optional `nocmem.endpoint` in config.yaml
- Includes benchmarks: LongMemEval (R@5=94.0%), efficiency, noise vs scale
- Design doc: doc/nocmem.md
This commit is contained in:
Fam Zheng
2026-04-11 12:24:48 +01:00
parent 688387dac3
commit 7000ccda0f
17 changed files with 4164 additions and 3 deletions

View File

@@ -86,6 +86,14 @@ pub async fn life_loop(
// append the agent completion as a new user message
messages.push(serde_json::json!({"role": "user", "content": notification}));
// auto recall from nocmem
if let Some(ref nocmem) = config.nocmem {
let recalled = crate::nocmem::recall(&nocmem.endpoint, &notification).await;
if !recalled.is_empty() {
messages.push(serde_json::json!({"role": "system", "content": recalled}));
}
}
if let BackendConfig::OpenAI { ref endpoint, ref model, ref api_key } = config.backend {
let chat_id_tg = ChatId(cid);
let sid = format!("agent-{id}");