update
This commit is contained in:
parent
935877b1d1
commit
271938b823
@ -37,11 +37,14 @@ class SmartStockAgent:
|
||||
# 注册技能
|
||||
self._register_skills()
|
||||
|
||||
# 获取配置的模型
|
||||
self.model_override = getattr(self.settings, 'smart_agent_model', None)
|
||||
|
||||
# 检查LLM是否可用
|
||||
self.use_llm = bool(self.settings.zhipuai_api_key) and llm_service.client is not None
|
||||
|
||||
if self.use_llm:
|
||||
logger.info("Smart Agent初始化完成(智能模式 + LLM深度集成 + Tushare Pro高级数据)")
|
||||
logger.info(f"Smart Agent初始化完成(智能模式 + LLM深度集成 + Tushare Pro高级数据,模型: {self.model_override or '默认'})")
|
||||
else:
|
||||
logger.warning("Smart Agent初始化完成(规则模式,建议配置LLM)")
|
||||
|
||||
@ -50,7 +53,7 @@ class SmartStockAgent:
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: llm_service.chat(messages, temperature, max_tokens)
|
||||
lambda: llm_service.chat(messages, temperature, max_tokens, model_override=self.model_override)
|
||||
)
|
||||
|
||||
def _register_skills(self):
|
||||
@ -2088,7 +2091,8 @@ RSI:{technical.get('rsi', 0):.2f if technical.get('rsi') else '计算中'}
|
||||
stream = llm_service.chat_stream(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0.7,
|
||||
max_tokens=2500 # 增加 token 数量以容纳新闻分析
|
||||
max_tokens=2500, # 增加 token 数量以容纳新闻分析
|
||||
model_override=self.model_override
|
||||
)
|
||||
|
||||
for chunk in stream:
|
||||
@ -2163,7 +2167,8 @@ RSI:{technical.get('rsi', 0):.2f if technical.get('rsi') else '计算中'}
|
||||
stream = llm_service.chat_stream(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0.7,
|
||||
max_tokens=2500
|
||||
max_tokens=2500,
|
||||
model_override=self.model_override
|
||||
)
|
||||
|
||||
for chunk in stream:
|
||||
@ -2480,7 +2485,8 @@ RSI:{technical.get('rsi', 0):.2f if technical.get('rsi') else '计算中'}
|
||||
stream = llm_service.chat_stream(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0.7,
|
||||
max_tokens=2500
|
||||
max_tokens=2500,
|
||||
model_override=self.model_override
|
||||
)
|
||||
|
||||
# 在线程中迭代同步生成器,避免阻塞事件循环
|
||||
@ -2581,7 +2587,8 @@ MACD:{f"{technical.get('macd'):.4f}" if technical.get('macd') else '计算中'
|
||||
stream = llm_service.chat_stream(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0.7,
|
||||
max_tokens=2500 # 增加 token 数量以容纳新闻分析
|
||||
max_tokens=2500, # 增加 token 数量以容纳新闻分析
|
||||
model_override=self.model_override
|
||||
)
|
||||
|
||||
# 在线程中迭代同步生成器,避免阻塞事件循环
|
||||
@ -2984,7 +2991,8 @@ MACD:{f"{technical.get('macd'):.4f}" if technical.get('macd') else '计算中'
|
||||
stream = llm_service.chat_stream(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0.7,
|
||||
max_tokens=self._calculate_max_tokens(intent)
|
||||
max_tokens=self._calculate_max_tokens(intent),
|
||||
model_override=self.model_override
|
||||
)
|
||||
|
||||
for chunk in stream:
|
||||
|
||||
@ -116,6 +116,10 @@ class Settings(BaseSettings):
|
||||
paper_trading_position_b: float = 500 # B级信号仓位 (USDT)
|
||||
paper_trading_position_c: float = 200 # C级信号仓位 (USDT)
|
||||
|
||||
# Agent 模型配置 (可选值: zhipu, deepseek)
|
||||
smart_agent_model: str = "deepseek" # SmartAgent 使用的模型
|
||||
crypto_agent_model: str = "deepseek" # CryptoAgent 使用的模型
|
||||
|
||||
class Config:
|
||||
env_file = find_env_file()
|
||||
case_sensitive = False
|
||||
|
||||
@ -143,8 +143,11 @@ class LLMSignalAnalyzer:
|
||||
|
||||
def __init__(self):
|
||||
"""初始化分析器"""
|
||||
from app.config import get_settings
|
||||
self.news_service = get_news_service()
|
||||
logger.info("LLM 信号分析器初始化完成(含新闻舆情)")
|
||||
settings = get_settings()
|
||||
self.model_override = getattr(settings, 'crypto_agent_model', None)
|
||||
logger.info(f"LLM 信号分析器初始化完成(含新闻舆情,模型: {self.model_override or '默认'})")
|
||||
|
||||
async def analyze(self, symbol: str, data: Dict[str, pd.DataFrame],
|
||||
symbols: List[str] = None) -> Dict[str, Any]:
|
||||
@ -170,7 +173,7 @@ class LLMSignalAnalyzer:
|
||||
response = llm_service.chat([
|
||||
{"role": "system", "content": self.SYSTEM_PROMPT},
|
||||
{"role": "user", "content": data_prompt}
|
||||
])
|
||||
], model_override=self.model_override)
|
||||
|
||||
if not response:
|
||||
logger.warning(f"{symbol} LLM 分析无响应")
|
||||
|
||||
@ -19,16 +19,18 @@ class LLMService:
|
||||
messages: List[Dict[str, str]],
|
||||
model: str = None,
|
||||
temperature: float = 0.7,
|
||||
max_tokens: int = 2000
|
||||
max_tokens: int = 2000,
|
||||
model_override: str = None
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
调用LLM进行对话
|
||||
|
||||
Args:
|
||||
messages: 消息列表
|
||||
model: 模型名称(忽略,使用当前选择的模型)
|
||||
model: 模型名称(已废弃,使用 model_override)
|
||||
temperature: 温度参数
|
||||
max_tokens: 最大token数
|
||||
model_override: 指定使用的模型 (zhipu/deepseek)
|
||||
|
||||
Returns:
|
||||
LLM响应文本
|
||||
@ -36,7 +38,8 @@ class LLMService:
|
||||
return self.multi_service.chat(
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
max_tokens=max_tokens,
|
||||
model_override=model_override
|
||||
)
|
||||
|
||||
def analyze_intent(self, user_message: str) -> Dict[str, Any]:
|
||||
@ -47,7 +50,8 @@ class LLMService:
|
||||
self,
|
||||
messages: List[Dict[str, str]],
|
||||
temperature: float = 0.7,
|
||||
max_tokens: int = 2000
|
||||
max_tokens: int = 2000,
|
||||
model_override: str = None
|
||||
):
|
||||
"""
|
||||
流式调用LLM进行对话
|
||||
@ -56,6 +60,7 @@ class LLMService:
|
||||
messages: 消息列表
|
||||
temperature: 温度参数
|
||||
max_tokens: 最大token数
|
||||
model_override: 指定使用的模型 (zhipu/deepseek)
|
||||
|
||||
Yields:
|
||||
LLM响应的文本片段
|
||||
@ -63,7 +68,8 @@ class LLMService:
|
||||
return self.multi_service.chat_stream(
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
max_tokens=max_tokens,
|
||||
model_override=model_override
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -66,8 +66,12 @@ class MultiLLMService:
|
||||
except Exception as e:
|
||||
logger.error(f"DeepSeek初始化失败: {e}")
|
||||
|
||||
# 设置默认模型(优先DeepSeek,因为更便宜)
|
||||
if 'deepseek' in self.clients:
|
||||
# 设置默认模型(优先使用配置文件中的设置)
|
||||
preferred_model = getattr(settings, 'smart_agent_model', None)
|
||||
if preferred_model and preferred_model in self.clients:
|
||||
self.current_model = preferred_model
|
||||
logger.info(f"使用配置的模型: {preferred_model}")
|
||||
elif 'deepseek' in self.clients:
|
||||
self.current_model = 'deepseek'
|
||||
elif 'zhipu' in self.clients:
|
||||
self.current_model = 'zhipu'
|
||||
@ -138,10 +142,12 @@ class MultiLLMService:
|
||||
|
||||
if provider == 'zhipu':
|
||||
# 智谱AI调用
|
||||
# Zhipu对参数更严格,temperature范围是0.0-1.0
|
||||
safe_temperature = max(0.0, min(1.0, temperature))
|
||||
response = client.chat.completions.create(
|
||||
model=model_id,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
temperature=safe_temperature,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
elif provider == 'deepseek':
|
||||
@ -205,10 +211,12 @@ class MultiLLMService:
|
||||
|
||||
if provider == 'zhipu':
|
||||
# 智谱AI流式调用
|
||||
# Zhipu对参数更严格,temperature范围是0.0-1.0
|
||||
safe_temperature = max(0.0, min(1.0, temperature))
|
||||
response = client.chat.completions.create(
|
||||
model=model_id,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
temperature=safe_temperature,
|
||||
max_tokens=max_tokens,
|
||||
stream=True
|
||||
)
|
||||
|
||||
@ -135,6 +135,12 @@ html, body {
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.model-name {
|
||||
color: var(--text-primary);
|
||||
font-size: 13px;
|
||||
font-family: inherit;
|
||||
}
|
||||
|
||||
.status {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
|
||||
@ -29,17 +29,13 @@
|
||||
<span>Tradus|AI金融智能体</span>
|
||||
</div>
|
||||
<div class="header-right">
|
||||
<!-- Model Selector -->
|
||||
<!-- Model Display (只显示,不可切换) -->
|
||||
<div class="model-selector" v-if="currentModel">
|
||||
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<circle cx="12" cy="12" r="3"/>
|
||||
<path d="M12 1v6m0 6v6M5.64 5.64l4.24 4.24m4.24 4.24l4.24 4.24M1 12h6m6 0h6M5.64 18.36l4.24-4.24m4.24-4.24l4.24-4.24"/>
|
||||
</svg>
|
||||
<select v-model="selectedModel" @change="switchModel" class="model-select">
|
||||
<option v-for="model in availableModels" :key="model.provider" :value="model.provider">
|
||||
{{ model.name }}
|
||||
</option>
|
||||
</select>
|
||||
<span class="model-name">{{ currentModel.name }}</span>
|
||||
</div>
|
||||
<!-- Logout Button -->
|
||||
<button class="logout-btn" @click="logout" title="登出">
|
||||
|
||||
@ -12,9 +12,7 @@ createApp({
|
||||
showImageModal: false,
|
||||
modalImageUrl: '',
|
||||
showContactModal: false,
|
||||
availableModels: [],
|
||||
currentModel: null,
|
||||
selectedModel: null
|
||||
currentModel: null
|
||||
};
|
||||
},
|
||||
mounted() {
|
||||
@ -469,40 +467,10 @@ createApp({
|
||||
const data = await response.json();
|
||||
|
||||
if (data.success) {
|
||||
this.availableModels = data.models;
|
||||
this.currentModel = data.current;
|
||||
this.selectedModel = data.current ? data.current.provider : null;
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
}
|
||||
},
|
||||
|
||||
async switchModel() {
|
||||
if (!this.selectedModel) return;
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/llm/switch', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
provider: this.selectedModel
|
||||
})
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (data.success) {
|
||||
this.currentModel = data.current;
|
||||
this.showNotification(`已切换到 ${data.current.name}`);
|
||||
} else {
|
||||
this.showNotification('切换失败');
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
this.showNotification('切换失败');
|
||||
// 忽略错误
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Loading…
Reference in New Issue
Block a user