修复现成阻塞的问题。

This commit is contained in:
aaron 2026-02-21 19:36:58 +08:00
parent d45dbd044c
commit 39cdaa4d75
3 changed files with 41 additions and 7 deletions

View File

@ -578,8 +578,8 @@ class LLMSignalAnalyzer:
# 构建数据提示
data_prompt = self._build_data_prompt(symbol, data, news_text, position_info, futures_data)
# 调用 LLM
response = llm_service.chat([
# 调用 LLM(使用异步方法避免阻塞事件循环)
response = await llm_service.achat([
{"role": "system", "content": system_prompt},
{"role": "user", "content": data_prompt}
], model_override=self.model_override)
@ -1719,8 +1719,8 @@ class LLMSignalAnalyzer:
# 构建持仓分析提示
prompt = self._build_position_review_prompt(symbol, positions, data)
# 调用 LLM
response = llm_service.chat([
# 调用 LLM(使用异步方法避免阻塞事件循环)
response = await llm_service.achat([
{"role": "system", "content": self.POSITION_REVIEW_PROMPT},
{"role": "user", "content": prompt}
], model_override=self.model_override)

View File

@ -1089,8 +1089,8 @@ class SignalAnalyzer:
# 构建分析提示
prompt = self._build_analysis_prompt(data, signal, symbol)
# 调用 LLM
response = llm_service.chat([
# 调用 LLM(使用异步方法避免阻塞事件循环)
response = await llm_service.achat([
{"role": "system", "content": self.CRYPTO_ANALYST_PROMPT},
{"role": "user", "content": prompt}
])

View File

@ -1,6 +1,7 @@
"""
LLM服务 - 兼容层使用多模型服务
"""
import asyncio
from typing import Optional, List, Dict, Any
from app.services.multi_llm_service import multi_llm_service
from app.utils.logger import logger
@ -23,7 +24,7 @@ class LLMService:
model_override: str = None
) -> Optional[str]:
"""
调用LLM进行对话
调用LLM进行对话同步版本
Args:
messages: 消息列表
@ -42,6 +43,39 @@ class LLMService:
model_override=model_override
)
async def achat(
self,
messages: List[Dict[str, str]],
model: str = None,
temperature: float = 0.7,
max_tokens: int = 2000,
model_override: str = None
) -> Optional[str]:
"""
调用LLM进行对话异步版本 - 不阻塞事件循环
Args:
messages: 消息列表
model: 模型名称已废弃使用 model_override
temperature: 温度参数
max_tokens: 最大token数
model_override: 指定使用的模型 (zhipu/deepseek)
Returns:
LLM响应文本
"""
# 在线程池中执行同步的 LLM 调用,避免阻塞事件循环
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.multi_service.chat(
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
model_override=model_override
)
)
def analyze_intent(self, user_message: str) -> Dict[str, Any]:
"""使用LLM分析用户意图"""
return self.multi_service.analyze_intent(user_message)