176 lines
4.8 KiB
Python
176 lines
4.8 KiB
Python
"""
|
||
LLM服务 - 智谱AI GLM-4集成
|
||
"""
|
||
from typing import Optional, List, Dict, Any
|
||
from app.config import get_settings
|
||
from app.utils.logger import logger
|
||
|
||
try:
|
||
from zhipuai import ZhipuAI
|
||
ZHIPUAI_AVAILABLE = True
|
||
except ImportError:
|
||
ZHIPUAI_AVAILABLE = False
|
||
logger.warning("zhipuai包未安装,LLM功能将不可用")
|
||
|
||
|
||
class LLMService:
|
||
"""LLM服务类"""
|
||
|
||
def __init__(self):
|
||
"""初始化LLM服务"""
|
||
settings = get_settings()
|
||
|
||
if not ZHIPUAI_AVAILABLE:
|
||
logger.warning("智谱AI SDK未安装")
|
||
self.client = None
|
||
return
|
||
|
||
if not settings.zhipuai_api_key:
|
||
logger.warning("智谱AI API Key未配置")
|
||
self.client = None
|
||
return
|
||
|
||
try:
|
||
self.client = ZhipuAI(api_key=settings.zhipuai_api_key)
|
||
logger.info("智谱AI LLM服务初始化成功")
|
||
except Exception as e:
|
||
logger.error(f"智谱AI初始化失败: {e}")
|
||
self.client = None
|
||
|
||
def chat(
|
||
self,
|
||
messages: List[Dict[str, str]],
|
||
model: str = "glm-4",
|
||
temperature: float = 0.7,
|
||
max_tokens: int = 2000
|
||
) -> Optional[str]:
|
||
"""
|
||
调用LLM进行对话
|
||
|
||
Args:
|
||
messages: 消息列表 [{"role": "user", "content": "..."}]
|
||
model: 模型名称
|
||
temperature: 温度参数
|
||
max_tokens: 最大token数
|
||
|
||
Returns:
|
||
LLM响应文本
|
||
"""
|
||
if not self.client:
|
||
logger.error("LLM客户端未初始化")
|
||
return None
|
||
|
||
try:
|
||
logger.info(f"调用LLM: model={model}, messages={len(messages)}条")
|
||
response = self.client.chat.completions.create(
|
||
model=model,
|
||
messages=messages,
|
||
temperature=temperature,
|
||
max_tokens=max_tokens
|
||
)
|
||
|
||
if response.choices:
|
||
content = response.choices[0].message.content
|
||
logger.info(f"LLM响应成功,长度: {len(content) if content else 0}")
|
||
return content
|
||
else:
|
||
logger.warning("LLM响应中没有choices")
|
||
return None
|
||
|
||
except Exception as e:
|
||
logger.error(f"LLM调用失败: {type(e).__name__}: {e}")
|
||
import traceback
|
||
logger.error(f"详细错误: {traceback.format_exc()}")
|
||
return None
|
||
|
||
def analyze_intent(self, user_message: str) -> Dict[str, Any]:
|
||
"""
|
||
使用LLM分析用户意图
|
||
|
||
Args:
|
||
user_message: 用户消息
|
||
|
||
Returns:
|
||
意图分析结果
|
||
"""
|
||
if not self.client:
|
||
return {"type": "unknown", "confidence": 0}
|
||
|
||
prompt = f"""你是一个股票分析助手的意图识别模块。请分析用户的查询意图。
|
||
|
||
用户消息:{user_message}
|
||
|
||
请识别以下意图类型之一:
|
||
1. market_data - 查询实时行情、价格
|
||
2. technical_analysis - 技术分析、技术指标
|
||
3. fundamental - 基本面信息、公司信息
|
||
4. visualization - K线图、图表
|
||
5. unknown - 无法识别
|
||
|
||
请以JSON格式返回:
|
||
{{
|
||
"type": "意图类型",
|
||
"confidence": 0.0-1.0,
|
||
"stock_name": "提取的股票名称(如果有)"
|
||
}}
|
||
"""
|
||
|
||
try:
|
||
response = self.chat([{"role": "user", "content": prompt}], temperature=0.3)
|
||
if response:
|
||
import json
|
||
# 尝试解析JSON
|
||
result = json.loads(response)
|
||
return result
|
||
except Exception as e:
|
||
logger.error(f"意图分析失败: {e}")
|
||
|
||
return {"type": "unknown", "confidence": 0}
|
||
|
||
def generate_analysis_summary(
|
||
self,
|
||
stock_code: str,
|
||
stock_name: str,
|
||
data: Dict[str, Any]
|
||
) -> str:
|
||
"""
|
||
使用LLM生成分析总结
|
||
|
||
Args:
|
||
stock_code: 股票代码
|
||
stock_name: 股票名称
|
||
data: 分析数据
|
||
|
||
Returns:
|
||
分析总结文本
|
||
"""
|
||
if not self.client:
|
||
return "LLM服务不可用,无法生成智能分析"
|
||
|
||
prompt = f"""你是一个专业的股票分析师。请根据以下数据对{stock_name}({stock_code})进行分析总结。
|
||
|
||
数据:
|
||
{data}
|
||
|
||
请提供:
|
||
1. 当前状态评估
|
||
2. 技术指标解读
|
||
3. 投资建议(仅供参考)
|
||
|
||
注意:
|
||
- 使用专业但易懂的语言
|
||
- 控制在200字以内
|
||
- 必须声明"仅供参考,不构成投资建议"
|
||
"""
|
||
|
||
try:
|
||
response = self.chat([{"role": "user", "content": prompt}], temperature=0.7)
|
||
return response or "分析生成失败"
|
||
except Exception as e:
|
||
logger.error(f"分析总结生成失败: {e}")
|
||
return "分析生成失败"
|
||
|
||
|
||
# 创建全局实例
|
||
llm_service = LLMService()
|