From 39cdaa4d7519e62cfb94cf9833aa8ec40e47373a Mon Sep 17 00:00:00 2001 From: aaron <> Date: Sat, 21 Feb 2026 19:36:58 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E7=8E=B0=E6=88=90=E9=98=BB?= =?UTF-8?q?=E5=A1=9E=E7=9A=84=E9=97=AE=E9=A2=98=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../app/crypto_agent/llm_signal_analyzer.py | 8 ++--- backend/app/crypto_agent/signal_analyzer.py | 4 +-- backend/app/services/llm_service.py | 36 ++++++++++++++++++- 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/backend/app/crypto_agent/llm_signal_analyzer.py b/backend/app/crypto_agent/llm_signal_analyzer.py index b28db86..5925031 100644 --- a/backend/app/crypto_agent/llm_signal_analyzer.py +++ b/backend/app/crypto_agent/llm_signal_analyzer.py @@ -578,8 +578,8 @@ class LLMSignalAnalyzer: # 构建数据提示 data_prompt = self._build_data_prompt(symbol, data, news_text, position_info, futures_data) - # 调用 LLM - response = llm_service.chat([ + # 调用 LLM(使用异步方法避免阻塞事件循环) + response = await llm_service.achat([ {"role": "system", "content": system_prompt}, {"role": "user", "content": data_prompt} ], model_override=self.model_override) @@ -1719,8 +1719,8 @@ class LLMSignalAnalyzer: # 构建持仓分析提示 prompt = self._build_position_review_prompt(symbol, positions, data) - # 调用 LLM - response = llm_service.chat([ + # 调用 LLM(使用异步方法避免阻塞事件循环) + response = await llm_service.achat([ {"role": "system", "content": self.POSITION_REVIEW_PROMPT}, {"role": "user", "content": prompt} ], model_override=self.model_override) diff --git a/backend/app/crypto_agent/signal_analyzer.py b/backend/app/crypto_agent/signal_analyzer.py index a97ecfa..47ef1d3 100644 --- a/backend/app/crypto_agent/signal_analyzer.py +++ b/backend/app/crypto_agent/signal_analyzer.py @@ -1089,8 +1089,8 @@ class SignalAnalyzer: # 构建分析提示 prompt = self._build_analysis_prompt(data, signal, symbol) - # 调用 LLM - response = llm_service.chat([ + # 调用 LLM(使用异步方法避免阻塞事件循环) + response = await llm_service.achat([ {"role": "system", "content": self.CRYPTO_ANALYST_PROMPT}, {"role": "user", "content": prompt} ]) diff --git a/backend/app/services/llm_service.py b/backend/app/services/llm_service.py index c05bdba..45e6914 100644 --- a/backend/app/services/llm_service.py +++ b/backend/app/services/llm_service.py @@ -1,6 +1,7 @@ """ LLM服务 - 兼容层,使用多模型服务 """ +import asyncio from typing import Optional, List, Dict, Any from app.services.multi_llm_service import multi_llm_service from app.utils.logger import logger @@ -23,7 +24,7 @@ class LLMService: model_override: str = None ) -> Optional[str]: """ - 调用LLM进行对话 + 调用LLM进行对话(同步版本) Args: messages: 消息列表 @@ -42,6 +43,39 @@ class LLMService: model_override=model_override ) + async def achat( + self, + messages: List[Dict[str, str]], + model: str = None, + temperature: float = 0.7, + max_tokens: int = 2000, + model_override: str = None + ) -> Optional[str]: + """ + 调用LLM进行对话(异步版本 - 不阻塞事件循环) + + Args: + messages: 消息列表 + model: 模型名称(已废弃,使用 model_override) + temperature: 温度参数 + max_tokens: 最大token数 + model_override: 指定使用的模型 (zhipu/deepseek) + + Returns: + LLM响应文本 + """ + # 在线程池中执行同步的 LLM 调用,避免阻塞事件循环 + loop = asyncio.get_event_loop() + return await loop.run_in_executor( + None, + lambda: self.multi_service.chat( + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + model_override=model_override + ) + ) + def analyze_intent(self, user_message: str) -> Dict[str, Any]: """使用LLM分析用户意图""" return self.multi_service.analyze_intent(user_message)