refactor: server no longer runs agent loop or LLM

- Remove agent_loop from server (was ~400 lines) — server dispatches to workers
- AgentManager simplified to pure dispatcher (send_event → worker)
- Remove LLM config requirement from server (workers bring their own via config.yaml)
- Remove process_feedback, build_feedback_tools from server
- Remove chat API endpoint (LLM on workers only)
- Remove service proxy (services run on workers)
- Worker reads LLM config from its own config.yaml
- ws_worker.rs handles WorkerToServer::Update messages (DB + broadcast)
- Verified locally: tori server + tori worker connect and register
This commit is contained in:
2026-04-06 13:18:21 +01:00
parent dfedb6dd45
commit decabc0e8a
9 changed files with 380 additions and 997 deletions

View File

@@ -1,53 +1,20 @@
use std::sync::Arc;
use axum::{
extract::State,
http::StatusCode,
response::{IntoResponse, Response},
routing::post,
Json, Router,
};
use serde::Deserialize;
use crate::llm::{ChatMessage, LlmClient};
use crate::AppState;
#[derive(Deserialize)]
struct ChatRequest {
messages: Vec<SimpleChatMessage>,
}
#[derive(Deserialize)]
struct SimpleChatMessage {
role: String,
content: String,
}
pub fn router(state: Arc<AppState>) -> Router {
Router::new()
.route("/chat", post(chat))
.with_state(state)
}
async fn chat(
State(state): State<Arc<AppState>>,
Json(input): Json<ChatRequest>,
) -> Result<Json<serde_json::Value>, Response> {
let llm = LlmClient::new(&state.config.llm);
let messages: Vec<ChatMessage> = input
.messages
.into_iter()
.map(|m| ChatMessage {
role: m.role,
content: Some(m.content),
tool_calls: None,
tool_call_id: None,
})
.collect();
let reply = llm.chat(messages).await.map_err(|e| {
tracing::error!("Chat LLM error: {}", e);
(StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response()
})?;
Ok(Json(serde_json::json!({ "reply": reply })))
async fn chat() -> Result<Json<serde_json::Value>, Response> {
// Chat endpoint removed — LLM runs on workers only
Err((StatusCode::GONE, "Chat endpoint removed. LLM runs on workers.").into_response())
}

View File

@@ -59,50 +59,12 @@ async fn proxy_to_service(
}
async fn proxy_impl(
state: &AppState,
project_id: &str,
path: &str,
req: Request<Body>,
_state: &AppState,
_project_id: &str,
_path: &str,
_req: Request<Body>,
) -> Response {
let port = match state.agent_mgr.get_service_port(project_id).await {
Some(p) => p,
None => return (StatusCode::SERVICE_UNAVAILABLE, "服务未启动").into_response(),
};
let query = req.uri().query().map(|q| format!("?{}", q)).unwrap_or_default();
let url = format!("http://127.0.0.1:{}{}{}", port, path, query);
let client = reqwest::Client::new();
let method = req.method().clone();
let headers = req.headers().clone();
let body_bytes = match axum::body::to_bytes(req.into_body(), 10 * 1024 * 1024).await {
Ok(b) => b,
Err(_) => return (StatusCode::BAD_REQUEST, "请求体过大").into_response(),
};
let mut upstream_req = client.request(method, &url);
for (key, val) in headers.iter() {
if key != "host" {
upstream_req = upstream_req.header(key, val);
}
}
upstream_req = upstream_req.body(body_bytes);
match upstream_req.send().await {
Ok(resp) => {
let status = StatusCode::from_u16(resp.status().as_u16()).unwrap_or(StatusCode::BAD_GATEWAY);
let resp_headers = resp.headers().clone();
let body = resp.bytes().await.unwrap_or_default();
let mut response = (status, body).into_response();
for (key, val) in resp_headers.iter() {
if let Ok(name) = axum::http::header::HeaderName::from_bytes(key.as_ref()) {
response.headers_mut().insert(name, val.clone());
}
}
response
}
Err(_) => (StatusCode::BAD_GATEWAY, "无法连接到后端服务").into_response(),
}
(StatusCode::SERVICE_UNAVAILABLE, "服务在 worker 上运行,无法从 server 代理").into_response()
}