This commit is contained in:
aaron 2026-02-10 00:04:22 +08:00
parent 935877b1d1
commit 271938b823
8 changed files with 57 additions and 58 deletions

View File

@ -37,11 +37,14 @@ class SmartStockAgent:
# 注册技能 # 注册技能
self._register_skills() self._register_skills()
# 获取配置的模型
self.model_override = getattr(self.settings, 'smart_agent_model', None)
# 检查LLM是否可用 # 检查LLM是否可用
self.use_llm = bool(self.settings.zhipuai_api_key) and llm_service.client is not None self.use_llm = bool(self.settings.zhipuai_api_key) and llm_service.client is not None
if self.use_llm: if self.use_llm:
logger.info("Smart Agent初始化完成智能模式 + LLM深度集成 + Tushare Pro高级数据") logger.info(f"Smart Agent初始化完成智能模式 + LLM深度集成 + Tushare Pro高级数据,模型: {self.model_override or '默认'}")
else: else:
logger.warning("Smart Agent初始化完成规则模式建议配置LLM") logger.warning("Smart Agent初始化完成规则模式建议配置LLM")
@ -50,7 +53,7 @@ class SmartStockAgent:
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
return await loop.run_in_executor( return await loop.run_in_executor(
None, None,
lambda: llm_service.chat(messages, temperature, max_tokens) lambda: llm_service.chat(messages, temperature, max_tokens, model_override=self.model_override)
) )
def _register_skills(self): def _register_skills(self):
@ -2088,7 +2091,8 @@ RSI{technical.get('rsi', 0):.2f if technical.get('rsi') else '计算中'}
stream = llm_service.chat_stream( stream = llm_service.chat_stream(
messages=[{"role": "user", "content": prompt}], messages=[{"role": "user", "content": prompt}],
temperature=0.7, temperature=0.7,
max_tokens=2500 # 增加 token 数量以容纳新闻分析 max_tokens=2500, # 增加 token 数量以容纳新闻分析
model_override=self.model_override
) )
for chunk in stream: for chunk in stream:
@ -2163,7 +2167,8 @@ RSI{technical.get('rsi', 0):.2f if technical.get('rsi') else '计算中'}
stream = llm_service.chat_stream( stream = llm_service.chat_stream(
messages=[{"role": "user", "content": prompt}], messages=[{"role": "user", "content": prompt}],
temperature=0.7, temperature=0.7,
max_tokens=2500 max_tokens=2500,
model_override=self.model_override
) )
for chunk in stream: for chunk in stream:
@ -2480,7 +2485,8 @@ RSI{technical.get('rsi', 0):.2f if technical.get('rsi') else '计算中'}
stream = llm_service.chat_stream( stream = llm_service.chat_stream(
messages=[{"role": "user", "content": prompt}], messages=[{"role": "user", "content": prompt}],
temperature=0.7, temperature=0.7,
max_tokens=2500 max_tokens=2500,
model_override=self.model_override
) )
# 在线程中迭代同步生成器,避免阻塞事件循环 # 在线程中迭代同步生成器,避免阻塞事件循环
@ -2581,7 +2587,8 @@ MACD{f"{technical.get('macd'):.4f}" if technical.get('macd') else '计算中'
stream = llm_service.chat_stream( stream = llm_service.chat_stream(
messages=[{"role": "user", "content": prompt}], messages=[{"role": "user", "content": prompt}],
temperature=0.7, temperature=0.7,
max_tokens=2500 # 增加 token 数量以容纳新闻分析 max_tokens=2500, # 增加 token 数量以容纳新闻分析
model_override=self.model_override
) )
# 在线程中迭代同步生成器,避免阻塞事件循环 # 在线程中迭代同步生成器,避免阻塞事件循环
@ -2984,7 +2991,8 @@ MACD{f"{technical.get('macd'):.4f}" if technical.get('macd') else '计算中'
stream = llm_service.chat_stream( stream = llm_service.chat_stream(
messages=[{"role": "user", "content": prompt}], messages=[{"role": "user", "content": prompt}],
temperature=0.7, temperature=0.7,
max_tokens=self._calculate_max_tokens(intent) max_tokens=self._calculate_max_tokens(intent),
model_override=self.model_override
) )
for chunk in stream: for chunk in stream:

View File

@ -116,6 +116,10 @@ class Settings(BaseSettings):
paper_trading_position_b: float = 500 # B级信号仓位 (USDT) paper_trading_position_b: float = 500 # B级信号仓位 (USDT)
paper_trading_position_c: float = 200 # C级信号仓位 (USDT) paper_trading_position_c: float = 200 # C级信号仓位 (USDT)
# Agent 模型配置 (可选值: zhipu, deepseek)
smart_agent_model: str = "deepseek" # SmartAgent 使用的模型
crypto_agent_model: str = "deepseek" # CryptoAgent 使用的模型
class Config: class Config:
env_file = find_env_file() env_file = find_env_file()
case_sensitive = False case_sensitive = False

View File

@ -143,8 +143,11 @@ class LLMSignalAnalyzer:
def __init__(self): def __init__(self):
"""初始化分析器""" """初始化分析器"""
from app.config import get_settings
self.news_service = get_news_service() self.news_service = get_news_service()
logger.info("LLM 信号分析器初始化完成(含新闻舆情)") settings = get_settings()
self.model_override = getattr(settings, 'crypto_agent_model', None)
logger.info(f"LLM 信号分析器初始化完成(含新闻舆情,模型: {self.model_override or '默认'}")
async def analyze(self, symbol: str, data: Dict[str, pd.DataFrame], async def analyze(self, symbol: str, data: Dict[str, pd.DataFrame],
symbols: List[str] = None) -> Dict[str, Any]: symbols: List[str] = None) -> Dict[str, Any]:
@ -170,7 +173,7 @@ class LLMSignalAnalyzer:
response = llm_service.chat([ response = llm_service.chat([
{"role": "system", "content": self.SYSTEM_PROMPT}, {"role": "system", "content": self.SYSTEM_PROMPT},
{"role": "user", "content": data_prompt} {"role": "user", "content": data_prompt}
]) ], model_override=self.model_override)
if not response: if not response:
logger.warning(f"{symbol} LLM 分析无响应") logger.warning(f"{symbol} LLM 分析无响应")

View File

@ -19,16 +19,18 @@ class LLMService:
messages: List[Dict[str, str]], messages: List[Dict[str, str]],
model: str = None, model: str = None,
temperature: float = 0.7, temperature: float = 0.7,
max_tokens: int = 2000 max_tokens: int = 2000,
model_override: str = None
) -> Optional[str]: ) -> Optional[str]:
""" """
调用LLM进行对话 调用LLM进行对话
Args: Args:
messages: 消息列表 messages: 消息列表
model: 模型名称忽略使用当前选择的模型 model: 模型名称已废弃使用 model_override
temperature: 温度参数 temperature: 温度参数
max_tokens: 最大token数 max_tokens: 最大token数
model_override: 指定使用的模型 (zhipu/deepseek)
Returns: Returns:
LLM响应文本 LLM响应文本
@ -36,7 +38,8 @@ class LLMService:
return self.multi_service.chat( return self.multi_service.chat(
messages=messages, messages=messages,
temperature=temperature, temperature=temperature,
max_tokens=max_tokens max_tokens=max_tokens,
model_override=model_override
) )
def analyze_intent(self, user_message: str) -> Dict[str, Any]: def analyze_intent(self, user_message: str) -> Dict[str, Any]:
@ -47,7 +50,8 @@ class LLMService:
self, self,
messages: List[Dict[str, str]], messages: List[Dict[str, str]],
temperature: float = 0.7, temperature: float = 0.7,
max_tokens: int = 2000 max_tokens: int = 2000,
model_override: str = None
): ):
""" """
流式调用LLM进行对话 流式调用LLM进行对话
@ -56,6 +60,7 @@ class LLMService:
messages: 消息列表 messages: 消息列表
temperature: 温度参数 temperature: 温度参数
max_tokens: 最大token数 max_tokens: 最大token数
model_override: 指定使用的模型 (zhipu/deepseek)
Yields: Yields:
LLM响应的文本片段 LLM响应的文本片段
@ -63,7 +68,8 @@ class LLMService:
return self.multi_service.chat_stream( return self.multi_service.chat_stream(
messages=messages, messages=messages,
temperature=temperature, temperature=temperature,
max_tokens=max_tokens max_tokens=max_tokens,
model_override=model_override
) )

View File

@ -66,8 +66,12 @@ class MultiLLMService:
except Exception as e: except Exception as e:
logger.error(f"DeepSeek初始化失败: {e}") logger.error(f"DeepSeek初始化失败: {e}")
# 设置默认模型优先DeepSeek因为更便宜 # 设置默认模型(优先使用配置文件中的设置)
if 'deepseek' in self.clients: preferred_model = getattr(settings, 'smart_agent_model', None)
if preferred_model and preferred_model in self.clients:
self.current_model = preferred_model
logger.info(f"使用配置的模型: {preferred_model}")
elif 'deepseek' in self.clients:
self.current_model = 'deepseek' self.current_model = 'deepseek'
elif 'zhipu' in self.clients: elif 'zhipu' in self.clients:
self.current_model = 'zhipu' self.current_model = 'zhipu'
@ -138,10 +142,12 @@ class MultiLLMService:
if provider == 'zhipu': if provider == 'zhipu':
# 智谱AI调用 # 智谱AI调用
# Zhipu对参数更严格temperature范围是0.0-1.0
safe_temperature = max(0.0, min(1.0, temperature))
response = client.chat.completions.create( response = client.chat.completions.create(
model=model_id, model=model_id,
messages=messages, messages=messages,
temperature=temperature, temperature=safe_temperature,
max_tokens=max_tokens max_tokens=max_tokens
) )
elif provider == 'deepseek': elif provider == 'deepseek':
@ -205,10 +211,12 @@ class MultiLLMService:
if provider == 'zhipu': if provider == 'zhipu':
# 智谱AI流式调用 # 智谱AI流式调用
# Zhipu对参数更严格temperature范围是0.0-1.0
safe_temperature = max(0.0, min(1.0, temperature))
response = client.chat.completions.create( response = client.chat.completions.create(
model=model_id, model=model_id,
messages=messages, messages=messages,
temperature=temperature, temperature=safe_temperature,
max_tokens=max_tokens, max_tokens=max_tokens,
stream=True stream=True
) )

View File

@ -135,6 +135,12 @@ html, body {
color: var(--text-primary); color: var(--text-primary);
} }
.model-name {
color: var(--text-primary);
font-size: 13px;
font-family: inherit;
}
.status { .status {
display: flex; display: flex;
align-items: center; align-items: center;

View File

@ -29,17 +29,13 @@
<span>TradusAI金融智能体</span> <span>TradusAI金融智能体</span>
</div> </div>
<div class="header-right"> <div class="header-right">
<!-- Model Selector --> <!-- Model Display (只显示,不可切换) -->
<div class="model-selector" v-if="currentModel"> <div class="model-selector" v-if="currentModel">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> <svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<circle cx="12" cy="12" r="3"/> <circle cx="12" cy="12" r="3"/>
<path d="M12 1v6m0 6v6M5.64 5.64l4.24 4.24m4.24 4.24l4.24 4.24M1 12h6m6 0h6M5.64 18.36l4.24-4.24m4.24-4.24l4.24-4.24"/> <path d="M12 1v6m0 6v6M5.64 5.64l4.24 4.24m4.24 4.24l4.24 4.24M1 12h6m6 0h6M5.64 18.36l4.24-4.24m4.24-4.24l4.24-4.24"/>
</svg> </svg>
<select v-model="selectedModel" @change="switchModel" class="model-select"> <span class="model-name">{{ currentModel.name }}</span>
<option v-for="model in availableModels" :key="model.provider" :value="model.provider">
{{ model.name }}
</option>
</select>
</div> </div>
<!-- Logout Button --> <!-- Logout Button -->
<button class="logout-btn" @click="logout" title="登出"> <button class="logout-btn" @click="logout" title="登出">

View File

@ -12,9 +12,7 @@ createApp({
showImageModal: false, showImageModal: false,
modalImageUrl: '', modalImageUrl: '',
showContactModal: false, showContactModal: false,
availableModels: [], currentModel: null
currentModel: null,
selectedModel: null
}; };
}, },
mounted() { mounted() {
@ -469,40 +467,10 @@ createApp({
const data = await response.json(); const data = await response.json();
if (data.success) { if (data.success) {
this.availableModels = data.models;
this.currentModel = data.current; this.currentModel = data.current;
this.selectedModel = data.current ? data.current.provider : null;
} }
} catch (error) { } catch (error) {
// 忽略错误
}
},
async switchModel() {
if (!this.selectedModel) return;
try {
const response = await fetch('/api/llm/switch', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
provider: this.selectedModel
})
});
const data = await response.json();
if (data.success) {
this.currentModel = data.current;
this.showNotification(`已切换到 ${data.current.name}`);
} else {
this.showNotification('切换失败');
}
} catch (error) {
this.showNotification('切换失败');
} }
} }
}, },