first commit

This commit is contained in:
aaron 2025-12-02 22:54:03 +08:00
commit ef36140353
56 changed files with 9970 additions and 0 deletions

42
.env.example Normal file
View File

@ -0,0 +1,42 @@
# Binance Configuration
BINANCE_WS_BASE_URL=wss://fstream.binance.com
SYMBOL=btcusdt
KLINE_INTERVALS=5m,15m,1h,4h # Multiple intervals (comma-separated)
# Redis Configuration
REDIS_HOST=redis
REDIS_PORT=6379
REDIS_DB=0
REDIS_PASSWORD=
# Performance Tuning
MAX_BUFFER_SIZE=1000
RATE_LIMIT_MESSAGES_PER_SEC=1000
DEDUP_CACHE_SIZE=10000
# Logging
LOG_LEVEL=INFO
# LLM Gate Configuration (极简门控 - 频率为主,量化初筛)
LLM_GATE_ENABLED=true # 启用 LLM 门控
# 数据要求
LLM_MIN_CANDLES=100 # 最少K线数量
# 信号质量(极简 - 只检查综合得分)
LLM_MIN_COMPOSITE_SCORE=15.0 # 最小综合得分(过滤完全中性信号)
# 频率限制(核心控制!)
LLM_MAX_CALLS_PER_DAY=12 # 每天最多调用次数
LLM_MIN_INTERVAL_MINUTES=0 # 最小调用间隔(分钟)
# LLM API Configuration (optional - for AI-powered trading signals)
# Option 1: Use Deepseek (recommended for Chinese market analysis, low cost)
# OPENAI_API_KEY=sk-your-deepseek-key-here
# OPENAI_BASE_URL=https://api.deepseek.com
# Option 2: Use OpenAI GPT
# OPENAI_API_KEY=sk-your-openai-key-here
# Option 3: Use Anthropic Claude
# ANTHROPIC_API_KEY=sk-ant-your-key-here

56
.gitignore vendored Normal file
View File

@ -0,0 +1,56 @@
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
env/
venv/
ENV/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# Environment
.env
.env.local
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# Logs
*.log
logs/
# Docker
docker-compose.override.yml
# Redis
dump.rdb
appendonly.aof
# Testing
.pytest_cache/
.coverage
htmlcov/
.tox/
# OS
.DS_Store
Thumbs.db

309
ARCHITECTURE.md Normal file
View File

@ -0,0 +1,309 @@
# 系统架构说明
## 📐 整体架构
```
┌─────────────────────────────────────────────────────────────────┐
│ Binance Futures │
│ (WebSocket Streams) │
└────────────────┬────────────────────────────────────────────────┘
│ WebSocket连接
┌─────────────────────────────────────────────────────────────────┐
│ Data Ingestion Service (ingestion) │
│ ┌──────────────────────────────────────────────────────────┐ │
│ │ • K线数据流 (5m, 15m, 1h, 4h) │ │
│ │ • 订单簿快照 (Top 20) │ │
│ │ • 实时成交流 │ │
│ └──────────────────────────────────────────────────────────┘ │
│ │ │
│ │ 去重 → 缓冲 → 批量写入 │
│ ▼ │
└─────────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ Redis (数据存储) │
│ ┌──────────────────────────────────────────────────────────┐ │
│ │ Stream: binance:raw:kline:5m (5分钟K线) │ │
│ │ Stream: binance:raw:kline:15m (15分钟K线) │ │
│ │ Stream: binance:raw:kline:1h (1小时K线) │ │
│ │ Stream: binance:raw:kline:4h (4小时K线) │ │
│ │ Stream: binance:raw:depth:20 (订单簿Top20) │ │
│ │ Stream: binance:raw:trade (实时成交) │ │
│ └──────────────────────────────────────────────────────────┘ │
└────────────────┬────────────────────────────────────────────────┘
│ 读取历史数据
┌─────────────────────────────────────────────────────────────────┐
│ Signal Generation Service (scheduler) │
│ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 1⃣ Market Analysis Engine │ │
│ │ • 获取6个时间周期数据 (5m/15m/1h/4h/1d/1w) │ │
│ │ • 计算技术指标 (RSI/MACD/ATR/EMA/ADX...) │ │
│ │ • 识别支撑压力位 │ │
│ │ • 分析订单流 │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 2⃣ Quantitative Signal Generator │ │
│ │ • 趋势得分 (Trend Score) │ │
│ │ • 动量得分 (Momentum Score) │ │
│ │ • 订单流得分 (OrderFlow Score) │ │
│ │ • 突破得分 (Breakout Score) │ │
│ │ • 综合得分 = 加权平均 │ │
│ │ → 输出: BUY/SELL/HOLD + 置信度 │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 3⃣ LLM Gate (极简门控) │ │
│ │ ✓ 频率限制 (12次/天, 间隔≥15分钟) │ │
│ │ ✓ 数据检查 (K线≥100根) │ │
│ │ ✓ 质量检查 (综合得分≥15) │ │
│ │ → PASS / BLOCK │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │ │
│ ▼ (if PASS) │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 4⃣ LLM Decision Maker (可选) │ │
│ │ • 使用: Deepseek / GPT / Claude │ │
│ │ • 输入: 多时间周期完整技术分析 │ │
│ │ • 输出: 交易机会识别 │ │
│ │ - 日内机会 (Intraday) │ │
│ │ - 中长线机会 (Swing) │ │
│ │ - 埋伏点位 (Ambush) │ │
│ │ • 包含: 进场价/止损/止盈/风险评估 │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ 5⃣ Signal Aggregator │ │
│ │ • 融合量化信号 + LLM信号 │ │
│ │ • 检查一致性 (Consensus) │ │
│ │ • 计算最终置信度 │ │
│ │ • 生成综合建议 │ │
│ └─────────────────────────────────────────────────────────┘ │
│ │ │
│ ▼ │
│ 保存到文件 │
└─────────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ Output (输出文件) │
│ │
│ ./output/latest_signal.json │
│ { │
│ "timestamp": "2025-11-30T13:23:24", │
│ "aggregated_signal": { │
│ "final_signal": "BUY", │
│ "final_confidence": 0.75, │
│ "opportunities": {...} │
│ }, │
│ "quantitative_signal": {...}, │
│ "llm_signal": {...} │
│ } │
└─────────────────────────────────────────────────────────────────┘
```
## 🔄 数据流详解
### 1. 实时数据采集 (24/7持续)
```
Binance WebSocket → Ingestion Service → Redis Streams
↓ ↓ ↓
• K线更新 • 去重过滤 • 持久化存储
• 订单簿快照 • 缓冲优化 • 时间序列
• 实时成交 • 批量写入 • 可回溯查询
```
**特点**:
- ✅ 低延迟 (< 100ms)
- ✅ 高可靠 (自动重连)
- ✅ 防重复 (去重缓存)
- ✅ 流量控制 (缓冲 + 速率限制)
### 2. 信号生成 (定时或手动)
#### 模式A: 定时自动生成 (scheduler服务)
```
每N分钟触发
读取Redis历史数据
执行完整分析链路
保存信号到文件
```
**配置**: `SIGNAL_INTERVAL_MINUTES=5` (可调整)
#### 模式B: 手动执行
```
用户触发
docker compose exec ingestion python /app/scripts/generate_trading_signal.py
一次性生成信号
```
### 3. LLM调用控制流程
```
量化信号生成
┌───────────────────┐
│ LLM Gate检查 │
│ │
│ ✓ 频率OK? │────NO───→ 跳过LLM使用量化信号
│ ✓ 数据OK? │
│ ✓ 质量OK? │
└───────┬───────────┘
│ YES
调用 LLM API
获取深度分析
融合量化+LLM
输出最终信号
```
**Gate保护机制**:
- 🚦 防止过度调用 (成本控制)
- 🚦 确保数据质量 (分析准确性)
- 🚦 频率合理分布 (避免集中消耗)
## ⚙️ 服务依赖关系
```
┌──────────┐
│ Redis │ ← 基础服务,必须先启动
└────┬─────┘
├─────→ ┌──────────────┐
│ │ Ingestion │ ← 数据采集,持续运行
│ └──────────────┘
└─────→ ┌──────────────┐
│ Scheduler │ ← 信号生成,可选启动
└──────────────┘
```
**启动顺序**:
1. Redis (自动健康检查)
2. Ingestion (依赖Redis健康)
3. Scheduler (依赖Redis健康可选)
## 🗂️ 数据存储结构
### Redis Streams 结构
```
binance:raw:kline:5m
├─ 消息ID: 1701234567890-0
│ ├─ event_time: 1701234567890
│ ├─ symbol: BTCUSDT
│ ├─ kline_start_time: 1701234500000
│ ├─ kline_close_time: 1701234599999
│ ├─ open: "91650.10"
│ ├─ high: "91680.50"
│ ├─ low: "91620.00"
│ ├─ close: "91670.30"
│ ├─ volume: "123.45"
│ └─ ...
├─ 消息ID: 1701234567891-0
│ └─ ...
└─ (最多保留10000条)
```
### 输出文件结构
```json
{
"timestamp": "ISO格式时间戳",
"aggregated_signal": {
"final_signal": "BUY|SELL|HOLD",
"final_confidence": 0.0-1.0,
"consensus": "字符串描述",
"opportunities": {
"intraday": "日内机会详情",
"swing": "中长线机会详情",
"ambush": "埋伏点位详情"
},
"levels": "价格位详情",
"recommendation": "综合建议"
},
"market_analysis": "市场分析详情",
"quantitative_signal": "量化信号详情",
"llm_signal": "LLM信号详情"
}
```
## 🔐 安全和配置
### 环境变量层级
```
1. docker-compose.yml (服务默认配置)
↓ 可被覆盖
2. .env 文件 (本地配置)
↓ 可被覆盖
3. 环境变量 (运行时配置)
```
### 敏感信息保护
```
✅ 推荐: 使用 .env.local 文件 (添加到 .gitignore)
❌ 避免: 直接在 docker-compose.yml 中硬编码API key
```
## 📊 性能特征
### 数据采集服务 (Ingestion)
- **吞吐量**: ~1000 消息/秒
- **延迟**: < 100ms (WebSocket Redis)
- **内存**: ~50-100 MB
- **CPU**: < 5% (空闲时)
### 信号生成服务 (Scheduler)
- **执行时间**: 2-5秒/次 (无LLM), 10-30秒/次 (有LLM)
- **内存**: ~200-300 MB
- **CPU**: 10-20% (计算时), < 1% (等待时)
### LLM API调用
- **延迟**: 5-20秒 (Deepseek), 2-10秒 (GPT-4)
- **成本**: ~$0.001/次 (Deepseek), ~$0.01/次 (GPT-4)
- **限流**: 通过LLM Gate控制
## 🎯 设计原则
1. **关注点分离**
- 数据采集 ≠ 信号生成
- 可独立部署、独立扩展
2. **可靠性优先**
- WebSocket自动重连
- Redis持久化
- 优雅关闭
3. **成本可控**
- LLM Gate严格限流
- 只在高质量场景调用
4. **灵活配置**
- 环境变量控制所有参数
- 支持多种运行模式
5. **可观测性**
- 详细日志
- 健康检查
- 统计信息

131
CLEANUP_SUMMARY.md Normal file
View File

@ -0,0 +1,131 @@
# 代码清理总结
## 清理日期
2025-11-30
## 1. 删除的测试日志文件 (10个)
✅ 已删除所有测试log文件
- signal_simplified_gate.log
- signal_optimized_gate.log
- signal_fixed_indicators.log
- signal_clean_prompt.log
- signal_optimized_prompt.log
- signal_mtf.log
- signal_with_daily_weekly.log
- signal_final_test.log
- signal_with_prompt.log
- signal_opportunities.log
## 2. 删除的不需要的代码
### signals/llm_gate.py
**删除的方法 (3个):**
- `_check_delivery_time()` - 交割时段检查(已不使用)
- `_check_market_health()` - 市场健康度检查(已不使用)
- `_identify_high_value_scenario()` - 高价值场景识别(已不使用)
**简化的参数 (从10个减少到4个):**
- ❌ 删除: `volatility_min`, `volatility_max`, `min_liquidity_depth`
- ❌ 删除: `min_consensus_score`, `min_volume_ratio`
- ✅ 保留: `min_candles`, `min_composite_score`, `max_calls_per_day`, `min_call_interval_minutes`
**简化的检查逻辑:**
- 从6层复杂检查简化为3层极简检查
- 只保留:频率限制 + 数据基本可用 + 综合得分
### config/settings.py
**删除的配置项 (5个):**
- ❌ `LLM_VOLATILITY_MIN`
- ❌ `LLM_VOLATILITY_MAX`
- ❌ `LLM_MIN_LIQUIDITY`
- ❌ `LLM_MIN_CONSENSUS`
- ❌ `LLM_MIN_VOLUME_RATIO`
**保留的配置项 (4个):**
- ✅ `LLM_MIN_CANDLES`
- ✅ `LLM_MIN_COMPOSITE_SCORE`
- ✅ `LLM_MAX_CALLS_PER_DAY`
- ✅ `LLM_MIN_INTERVAL_MINUTES`
### .env
**删除的环境变量 (5个):**
同 settings.py
### scripts/generate_trading_signal.py
**简化的LLMGate初始化:**
- 从13行参数传递简化为4行
- 删除所有不再使用的参数
## 3. 更新的文档字符串
### signals/llm_gate.py
**之前:**
```
LLM Gate - 高级门控系统严格控制LLM调用频率和质量
只在以下情况调用 LLM:
1. 市场状态健康 (波动率适中、流动性充足、资金费率正常)
2. 信号质量高 (多时间框架一致、成交量验证、订单簿支持、共识≥0.75)
3. 高价值场景 (趋势回调、真假突破、多因子冲突、重大事件)
4. 频率受限 (每天最多3~5次、间隔≥30分钟、避开交割和极端行情)
```
**现在:**
```
LLM Gate - 极简门控系统,以频率控制为主
核心原则:
1. 频率限制 - 每天最多12次间隔≥15分钟核心控制
2. 数据基本可用 - 至少100根K线基础指标完整
3. 信号基本质量 - 综合得分≥15只过滤完全中性的信号
```
## 4. 测试结果
✅ 所有清理后的代码测试通过:
- LLM Gate初始化成功极简模式
- 频率限制正常工作距离上次6.3分钟 < 15分钟 拦截
- 信号生成正常量化信号BUY综合得分51.5
- 无错误或警告
## 5. 代码行数减少
| 文件 | 之前行数 | 现在行数 | 减少 |
|------|----------|----------|------|
| signals/llm_gate.py | ~437行 | ~280行 | ~157行 (-36%) |
| config/settings.py | ~87行 | ~76行 | ~11行 (-13%) |
| scripts/generate_trading_signal.py | ~331行 | ~322行 | ~9行 (-3%) |
| .env | ~53行 | ~45行 | ~8行 (-15%) |
**总计减少: ~185行代码**
## 6. 清理效果
- ✅ 代码更简洁、易维护
- ✅ 参数更少、配置更简单
- ✅ 逻辑更清晰、注释更准确
- ✅ 无冗余代码、无测试日志
- ✅ 功能完整、测试通过
## 7. 核心改进
**之前(复杂模式):**
- 6层检查频率 + 交割时段 + 数据充足 + 市场健康 + 信号质量 + 高价值场景
- 10个参数配置
- 严格门槛共识≥0.75, 波动率0.8%-2.5%, 成交量≥1.2x
**现在(极简模式):**
- 3层检查频率 + 数据充足 + 综合得分
- 4个参数配置
- 宽松门槛综合得分≥15, 频率12次/天)
**目标达成:**
- ✅ 频率控制为主(防止过度调用)
- ✅ 量化初筛为辅(过滤完全中性信号)
- ✅ 最大化LLM深度分析机会

363
DINGTALK_SETUP.md Normal file
View File

@ -0,0 +1,363 @@
# 钉钉消息通知配置指南
## 📱 功能说明
系统支持通过钉钉群机器人自动推送交易信号通知,包括:
- ✅ 实时交易信号推送BUY/SELL
- ✅ 量化分析 + AI深度分析结果
- ✅ 日内/中长线/埋伏点位机会详情
- ✅ 进场价/止损/止盈建议
- ✅ 风险评估和综合建议
- ✅ Markdown格式可读性强
## 🔧 配置步骤
### 第一步:创建钉钉群机器人
1. **打开钉钉群**
- 在电脑端或手机端打开需要接收通知的钉钉群
2. **添加自定义机器人**
- 点击群设置 → 智能群助手 → 添加机器人
- 选择"自定义"机器人
- 点击"添加"
3. **配置机器人**
- **机器人名称**: 例如 "交易信号助手"
- **消息推送**:
- ✅ 勾选 "加签"(推荐,更安全)
- 或 勾选 "自定义关键词"(输入:信号、交易)
- 或 勾选 "IP地址段"如果固定IP
4. **获取配置信息**
- **Webhook地址**: 复制形如 `https://oapi.dingtalk.com/robot/send?access_token=xxxxx` 的URL
- **加签密钥**: 如果选择了"加签",复制 `SEC` 开头的密钥
5. **完成配置**
- 点击"完成"
- 机器人添加成功
---
### 第二步:配置系统环境变量
有两种配置方式:
#### 方式A: 修改 docker-compose.yml推荐
编辑 `docker-compose.yml` 文件,找到 `scheduler` 服务,取消注释并填入配置:
```yaml
scheduler:
environment:
# DingTalk Notification
- DINGTALK_WEBHOOK=https://oapi.dingtalk.com/robot/send?access_token=你的access_token
- DINGTALK_SECRET=SEC你的加签密钥 # 如果使用了加签
```
**示例**:
```yaml
scheduler:
environment:
# DingTalk Notification
- DINGTALK_WEBHOOK=https://oapi.dingtalk.com/robot/send?access_token=1a2b3c4d5e6f7g8h9i0j
- DINGTALK_SECRET=SEC1a2b3c4d5e6f7g8h9i0j1k2l3m4n5o6p7q8r9s0
```
#### 方式B: 使用 .env.local 文件
创建 `.env.local` 文件:
```bash
# DingTalk Configuration
DINGTALK_WEBHOOK=https://oapi.dingtalk.com/robot/send?access_token=你的access_token
DINGTALK_SECRET=SEC你的加签密钥
```
然后在 `docker-compose.yml` 中添加:
```yaml
scheduler:
env_file:
- .env.local
```
⚠️ **重要**: 将 `.env.local` 添加到 `.gitignore`,避免泄露敏感信息
---
### 第三步:重启服务
```bash
# 停止服务
docker compose down
# 重新构建镜像如果修改了requirements.txt
docker compose build scheduler
# 启动服务
docker compose --profile scheduler up -d
# 查看日志,确认钉钉已启用
docker compose logs scheduler
```
**预期日志**:
```
📱 钉钉通知已启用 - Webhook: https://oapi.dingtalk.com/robot/send?access...
```
---
## 📊 消息示例
### BUY信号示例
```markdown
# 🟢 交易信号: BUY
**时间**: 2025-11-30 21:45:23
**置信度**: 75.0%
**共识**: QUANT_LLM_AGREE
---
## 📊 量化分析
- **信号**: BUY
- **综合得分**: 51.5
- **置信度**: 75.0%
- **趋势得分**: 99.0
- **动量得分**: 75.0
- **订单流得分**: -7.8
## 🤖 AI深度分析
- **信号**: BUY
- **置信度**: 60.0%
- **风险等级**: MEDIUM
### 📊 日内交易机会
- **方向**: LONG
- **入场**: $91,128.00
- **止损**: $91,000.00
- **止盈**: $91,342.00
- **说明**: 5分钟和15分钟周期呈现上涨趋势MACD金叉扩大...
### 📌 埋伏点位
- **埋伏价位**: $90,612.00
- **说明**: 等待价格回调至4小时支撑位90612附近...
### 💡 AI分析
> 当前市场呈现多周期趋势分歧,日线下跌但短周期上涨,形成震荡格局...
**关键因素**:
- 多周期趋势分歧
- 成交量萎缩
- RSI中性区域
## 💰 价格位
- **当前价格**: $91,650.10
- **入场价**: $91,650.10
- **止损价**: $90,985.52
- **目标价1**: $92,646.96
- **目标价2**: $93,311.54
- **目标价3**: $94,308.40
- **风险回报比**: 1:1.50
## 📝 综合建议
> 量化和AI分析一致看多建议在回调时逢低做多
---
*本信号由AI量化系统自动生成仅供参考不构成投资建议*
```
### SELL信号示例
```markdown
# 🔴 交易信号: SELL
**时间**: 2025-11-30 22:30:15
**置信度**: 68.0%
**共识**: QUANT_ONLY
---
[类似格式信号为SELL]
```
---
## 🎯 通知规则
### 何时发送通知
- ✅ **BUY信号**: 立即发送通知
- ✅ **SELL信号**: 立即发送通知
- ❌ **HOLD信号**: 不发送通知(避免噪音)
### 消息去重
- 每次只发送最新的信号
- 不会重复发送相同的信号
---
## 🔐 安全建议
### 1. 使用加签(强烈推荐)
加签可以验证请求来源防止webhook被滥用
```yaml
- DINGTALK_SECRET=SEC你的加签密钥
```
### 2. 限制IP地址可选
如果服务器IP固定可以在钉钉机器人设置中配置IP白名单。
### 3. 保护webhook地址
- ❌ 不要将webhook地址提交到Git仓库
- ❌ 不要在公开场合分享webhook地址
- ✅ 使用 `.env.local` 文件存储
- ✅ 添加 `.env.local``.gitignore`
---
## 🐛 故障排查
### 问题1: 未收到通知
**检查步骤**:
1. **确认钉钉配置是否正确**
```bash
docker compose logs scheduler | grep "钉钉"
```
期望输出: `📱 钉钉通知已启用`
2. **确认是否有BUY/SELL信号**
```bash
docker compose logs scheduler | grep "发送钉钉通知"
```
3. **检查钉钉机器人配置**
- webhook地址是否正确
- 加签密钥是否匹配
- 关键词是否配置正确
4. **手动测试webhook**
```bash
docker compose exec scheduler python -c "
from notifiers.dingtalk import DingTalkNotifier
import os
notifier = DingTalkNotifier(
webhook_url=os.getenv('DINGTALK_WEBHOOK'),
secret=os.getenv('DINGTALK_SECRET')
)
notifier.send_markdown('测试', '# 测试消息\n\n这是一条测试消息')
"
```
### 问题2: 发送失败
**可能原因**:
1. **Webhook地址错误**
- 检查是否完整复制了webhook URL
- 确认access_token没有遗漏
2. **加签密钥错误**
- 检查密钥是否以 `SEC` 开头
- 确认密钥完整
3. **网络问题**
- 检查容器是否能访问外网
- 尝试 `docker compose exec scheduler ping oapi.dingtalk.com`
4. **关键词不匹配**
- 如果使用"自定义关键词",确保消息包含关键词
- 推荐使用"加签"方式
### 问题3: 消息格式错误
**症状**: 收到消息但格式混乱
**解决**:
- 确保使用最新版本的代码
- 检查日志中是否有错误信息
- 联系开发者反馈问题
---
## 📞 支持的通知场景
### 当前已支持
- ✅ 交易信号通知BUY/SELL
- ✅ 包含量化 + AI分析
- ✅ 包含价格位和风险建议
### 未来计划支持
- ⏳ 系统错误通知
- ⏳ 每日汇总报告
- ⏳ 重要市场事件提醒
- ⏳ 仓位管理提醒
---
## 🔄 取消通知
如果需要暂时关闭钉钉通知:
### 方法1: 注释环境变量
编辑 `docker-compose.yml`:
```yaml
scheduler:
environment:
# - DINGTALK_WEBHOOK=... # 注释掉
# - DINGTALK_SECRET=... # 注释掉
```
### 方法2: 删除机器人
在钉钉群中删除机器人即可。
### 重新启动服务
```bash
docker compose --profile scheduler restart scheduler
```
---
## 📝 最佳实践
1. **专用群组**: 建议为交易信号创建专门的钉钉群,避免干扰其他讨论
2. **多环境隔离**: 测试环境和生产环境使用不同的webhook
3. **定期检查**: 定期查看通知是否正常,避免错过重要信号
4. **备份配置**: 保存webhook和密钥的备份避免丢失
5. **安全第一**: 始终使用加签不要分享webhook地址
---
## 🎉 配置完成
完成上述配置后系统会在生成BUY/SELL信号时自动发送钉钉通知
**下一步**:
- 等待下一次信号生成根据SIGNAL_INTERVAL_MINUTES配置
- 或手动触发一次: `docker compose exec scheduler python /app/scripts/generate_trading_signal.py`
- 在钉钉群中查看通知消息
祝交易顺利!🚀

48
Dockerfile Normal file
View File

@ -0,0 +1,48 @@
# Multi-stage build for optimized image size
FROM python:3.11-slim as builder
WORKDIR /app
# Install build dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc \
git \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements and install Python packages
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Final stage
FROM python:3.11-slim
WORKDIR /app
# Copy Python packages from builder
COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
COPY --from=builder /usr/local/bin /usr/local/bin
# Copy application code
COPY config ./config
COPY core ./core
COPY analysis ./analysis
COPY signals ./signals
COPY scripts ./scripts
COPY notifiers ./notifiers
COPY main.py .
COPY scheduler.py .
COPY .env.example .env
# Create non-root user for security
RUN useradd -m -u 1000 appuser && \
chown -R appuser:appuser /app
USER appuser
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD python -c "import sys; sys.exit(0)"
# Run application
CMD ["python", "-u", "main.py"]

97
Makefile Normal file
View File

@ -0,0 +1,97 @@
.PHONY: help build up down logs restart clean test monitor redis-cli
# Detect Docker Compose command (v1 or v2)
DOCKER_COMPOSE := $(shell command -v docker-compose 2>/dev/null)
ifndef DOCKER_COMPOSE
DOCKER_COMPOSE := docker compose
endif
help:
@echo "Binance Real-time Data Ingestion - Quick Commands"
@echo ""
@echo "Setup:"
@echo " make setup - Initial setup (copy .env, build images)"
@echo " make build - Build Docker images"
@echo ""
@echo "Operations:"
@echo " make up - Start all services"
@echo " make down - Stop all services"
@echo " make restart - Restart all services"
@echo " make logs - View application logs (follow)"
@echo ""
@echo "Monitoring:"
@echo " make monitor - Show system status and statistics"
@echo " make redis-cli - Open Redis CLI"
@echo " make test-read - Test reading data from Redis"
@echo ""
@echo "Maintenance:"
@echo " make clean - Stop and remove all containers, volumes"
@echo " make clean-data - Remove Redis data volume"
@echo ""
setup:
@echo "Setting up environment..."
@cp -n .env.example .env || true
@chmod +x scripts/*.sh scripts/*.py 2>/dev/null || true
@echo "✓ Environment file created (.env)"
@echo "✓ Please edit .env if needed"
@$(MAKE) build
build:
@echo "Building Docker images..."
@$(DOCKER_COMPOSE) build
@echo "✓ Build completed"
up:
@echo "Starting services..."
@$(DOCKER_COMPOSE) up -d
@echo "✓ Services started"
@echo ""
@$(MAKE) logs
down:
@echo "Stopping services..."
@$(DOCKER_COMPOSE) down
@echo "✓ Services stopped"
restart:
@$(MAKE) down
@$(MAKE) up
logs:
@$(DOCKER_COMPOSE) logs -f ingestion
monitor:
@bash scripts/monitor.sh
redis-cli:
@docker exec -it tradus-redis redis-cli
test-read:
@python scripts/test_redis_read.py
clean:
@echo "Cleaning up..."
@$(DOCKER_COMPOSE) down -v
@docker system prune -f
@echo "✓ Cleanup completed"
clean-data:
@echo "Removing Redis data volume..."
@docker volume rm realtime-ingestion_redis_data || true
@echo "✓ Data volume removed"
# Development
dev:
@echo "Starting in development mode with Redis UI..."
@$(DOCKER_COMPOSE) --profile debug up -d
@echo "✓ Services started with Redis Commander"
@echo " Redis UI: http://localhost:8081"
# Health check
health:
@echo "Checking service health..."
@$(DOCKER_COMPOSE) ps
@echo ""
@docker exec tradus-redis redis-cli PING
@echo "✓ Health check completed"

339
NOTIFICATION_SUMMARY.md Normal file
View File

@ -0,0 +1,339 @@
# 钉钉通知功能实现总结
## ✅ 已完成功能
### 1. 核心模块开发
#### notifiers/dingtalk.py
- ✅ 钉钉Markdown消息发送
- ✅ 加签验证(安全增强)
- ✅ 交易信号格式化
- ✅ 错误通知支持
- ✅ 发送统计和重试机制
#### scheduler.py 集成
- ✅ 自动初始化钉钉通知器
- ✅ 在生成BUY/SELL信号后自动发送
- ✅ HOLD信号不发送减少噪音
- ✅ 异常处理和日志记录
### 2. 配置管理
#### docker-compose.yml
```yaml
scheduler:
environment:
- DINGTALK_WEBHOOK=https://oapi.dingtalk.com/robot/send?access_token=...
- DINGTALK_SECRET=SEC...
```
#### requirements.txt
- ✅ 添加 requests==2.31.0 依赖
### 3. 文档完善
- ✅ DINGTALK_SETUP.md - 详细配置指南
- ✅ 包含钉钉机器人创建步骤
- ✅ 包含故障排查指南
- ✅ 包含消息示例
---
## 📊 消息格式特性
### 支持的消息类型
1. **交易信号通知** (BUY/SELL)
- 信号类型和置信度
- 量化分析详情
- AI深度分析如果启用
- 交易机会细分:
- 📊 日内交易机会 (Intraday)
- 📈 中长线交易机会 (Swing)
- 📌 埋伏点位 (Ambush)
- 价格位建议(入场/止损/止盈)
- 风险回报比
- 综合建议和警告
2. **系统错误通知** (未来支持)
- 错误消息
- 上下文信息
- @所有人
### Markdown格式优化
- ✅ 清晰的层级结构(标题/子标题)
- ✅ 信号类型emoji标识🟢 BUY / 🔴 SELL / 🟡 HOLD
- ✅ 关键信息加粗
- ✅ 引用格式的AI分析
- ✅ 列表格式的价格位和因素
- ✅ 分隔线划分区块
- ✅ 页脚免责声明
---
## 🔐 安全特性
### 1. 加签验证
```python
# 自动生成时间戳和签名
timestamp = int(time.time() * 1000)
sign = hmac_sha256(f"{timestamp}\n{secret}", secret)
url = f"{webhook}&timestamp={timestamp}&sign={sign}"
```
### 2. 敏感信息保护
- ✅ Webhook和密钥通过环境变量配置
- ✅ 不硬编码在代码中
- ✅ 建议使用 .env.local 文件
### 3. 请求限流
- ✅ 只在BUY/SELL信号时发送
- ✅ HOLD信号不发送
- ✅ 避免频繁通知骚扰
---
## 📱 当前配置状态
### 已配置信息
```
Webhook: https://oapi.dingtalk.com/robot/send?access_token=9438788...
Secret: SEC88678d8970f0882a1cca36476b92947409fea5f562a09db4cd03524...
状态: ✅ 已启用
```
### 测试结果
```
✅ 测试消息发送成功
✅ 模拟交易信号发送成功
✅ 统计信息正常
- 发送成功: 2次
- 发送失败: 0次
- 成功率: 100%
```
---
## 🚀 使用方式
### 自动发送(推荐)
启动scheduler服务后系统会自动
1. 每5分钟生成一次交易信号
2. 如果信号为BUY或SELL自动发送钉钉通知
3. 如果信号为HOLD跳过通知
```bash
# 启动自动信号生成
docker compose --profile scheduler up -d
# 查看日志
docker compose logs -f scheduler
```
### 手动测试
```bash
# 测试钉钉通知
docker compose exec ingestion python -c "
from notifiers.dingtalk import DingTalkNotifier
import os
notifier = DingTalkNotifier(
webhook_url=os.getenv('DINGTALK_WEBHOOK'),
secret=os.getenv('DINGTALK_SECRET')
)
notifier.send_markdown('测试', '# 测试消息\n\n这是一条测试消息')
"
# 手动生成信号(会自动发送通知)
docker compose exec ingestion python /app/scripts/generate_trading_signal.py
```
---
## 📊 预期效果
### 钉钉群将收到
#### 测试消息
```markdown
# 🚀 钉钉通知测试
这是一条测试消息用于验证钉钉webhook配置是否正确。
**测试时间**: 2025-11-30 14:55:00
---
*如果您收到此消息,说明配置成功!*
```
#### 真实交易信号(示例)
```markdown
# 🟢 交易信号: BUY
**时间**: 2025-11-30 14:55:28
**置信度**: 75.0%
**共识**: QUANT_LLM_AGREE
---
## 📊 量化分析
- **信号**: BUY
- **综合得分**: 51.5
- **置信度**: 75.0%
- **趋势得分**: 99.0
- **动量得分**: 75.0
- **订单流得分**: -7.8
## 🤖 AI深度分析
- **信号**: BUY
- **置信度**: 60.0%
- **风险等级**: MEDIUM
### 📊 日内交易机会
- **方向**: LONG
- **入场**: $91,128.00
- **止损**: $91,000.00
- **止盈**: $91,342.00
- **说明**: 5分钟和15分钟周期呈现上涨趋势MACD金叉扩大...
### 📌 埋伏点位
- **埋伏价位**: $90,612.00
- **说明**: 等待价格回调至4小时支撑位90612附近...
### 💡 AI分析
> 当前市场呈现多周期趋势分歧,日线下跌但短周期上涨...
**关键因素**:
- 多周期趋势分歧
- 成交量萎缩
- RSI中性区域
## 💰 价格位
- **当前价格**: $91,650.10
- **入场价**: $91,650.10
- **止损价**: $90,985.52
- **目标价1**: $92,646.96
- **目标价2**: $93,311.54
- **目标价3**: $94,308.40
- **风险回报比**: 1:1.50
## 📝 综合建议
> 量化和AI分析一致看多建议在回调时逢低做多
## ⚠️ 风险提示
- 市场波动较大,注意风险控制
---
*本信号由AI量化系统自动生成仅供参考不构成投资建议*
```
---
## 🔍 日志示例
### 成功发送
```
📱 发送钉钉通知...
✅ 钉钉消息发送成功: 🚨 交易信号: BUY (置信度: 75%)
✅ 钉钉通知发送成功
```
### 跳过发送
```
HOLD信号跳过钉钉通知
```
### 未配置
```
⚠️ 钉钉通知发送失败或未配置
```
---
## 🛠️ 技术实现细节
### 依赖库
- requests==2.31.0 - HTTP请求
- hmac, hashlib, base64 - 加签验证(内置库)
### 关键类和方法
#### DingTalkNotifier
```python
class DingTalkNotifier:
def __init__(webhook_url, secret, enabled)
def send_markdown(title, text, at_mobiles, at_all) -> bool
def send_signal(aggregated_signal) -> bool
def send_error(error_msg, context) -> bool
def get_stats() -> dict
```
#### SignalScheduler集成
```python
# 初始化
self.dingtalk = DingTalkNotifier(
webhook_url=os.getenv('DINGTALK_WEBHOOK'),
secret=os.getenv('DINGTALK_SECRET')
)
# 发送
if final_signal in ['BUY', 'SELL']:
self.dingtalk.send_signal(aggregated)
```
---
## 🎯 下一步优化建议
### 短期
- ✅ 基础功能已完成
- ⏳ 添加消息去重(避免短时间内重复发送相同信号)
- ⏳ 添加@特定人员功能
- ⏳ 支持自定义消息模板
### 中期
- ⏳ 每日交易汇总报告
- ⏳ 系统错误实时告警
- ⏳ 重要市场事件推送(如大幅波动)
- ⏳ 信号准确率统计和回测结果推送
### 长期
- ⏳ 支持企业微信
- ⏳ 支持Telegram
- ⏳ 支持邮件通知
- ⏳ 支持Webhook回调供第三方系统集成
---
## ✅ 验收清单
- [x] notifiers模块创建完成
- [x] DingTalkNotifier类实现
- [x] 加签验证功能
- [x] Markdown格式化
- [x] scheduler集成
- [x] docker-compose配置
- [x] requirements.txt更新
- [x] 配置文档编写
- [x] 测试消息发送成功
- [x] 模拟信号发送成功
- [x] 用户配置已应用
---
## 📞 支持
如有问题,请参考:
1. DINGTALK_SETUP.md - 配置指南
2. 查看日志: `docker compose logs scheduler`
3. 检查统计: 调用 `notifier.get_stats()`
---
**状态**: ✅ 完全实现并测试通过
**版本**: 1.0.0
**日期**: 2025-11-30

250
QUICK_START.md Normal file
View File

@ -0,0 +1,250 @@
# 🚀 Tradus AI 快速开始指南
## 📋 目录
- [系统启动](#系统启动)
- [手动运行分析](#手动运行分析)
- [查看结果](#查看结果)
- [日志监控](#日志监控)
- [系统管理](#系统管理)
---
## 🎯 系统启动
### 方式一:使用脚本(推荐)
```bash
# 启动完整系统(数据采集 + 自动信号生成)
./start_system.sh
# 停止系统
./stop_system.sh
```
### 方式二:使用 Docker Compose
```bash
# 启动完整系统
docker compose --profile scheduler up -d
# 仅启动数据采集(不生成信号)
docker compose up -d
# 停止系统
docker compose --profile scheduler down
```
---
## 🔍 手动运行分析
### 运行一次完整的量化+LLM分析
```bash
# 使用脚本(推荐)
./run_signal.sh
# 或直接运行
docker compose exec scheduler python /app/scripts/generate_trading_signal.py
```
**系统会自动**
- ✅ 执行量化分析
- ✅ 调用 DeepSeek LLM 分析(如果通过 Gate
- ✅ 聚合两种分析结果
- ✅ 保存到 `output/latest_signal.json`
- ✅ 如果是 BUY/SELL 信号,发送钉钉通知
---
## 📊 查看结果
### 查看最新信号
```bash
# 使用脚本(格式化显示)
./view_signal.sh
# 或查看完整 JSON
cat output/latest_signal.json | python -m json.tool
```
### 信号包含内容
- **最终信号**: BUY / SELL / HOLD
- **置信度**: 0-100%
- **共识状态**: QUANT_LLM_AGREE / LLM_LEADING / QUANT_ONLY
- **量化分析**: 趋势、动量、订单流得分
- **LLM 分析**: 深度推理、交易计划、风险评估
- **价格建议**: 入场/止损/止盈
---
## 📋 日志监控
### 查看实时日志
```bash
# 使用脚本
./view_logs.sh scheduler
# 或直接查看
docker compose logs -f scheduler --tail 50
```
### 查看特定日志
```bash
# 查看数据采集日志
docker compose logs -f ingestion
# 查看 Redis 日志
docker compose logs -f redis
# 查看所有服务
docker compose logs -f
```
---
## 🎛️ 系统管理
### 检查服务状态
```bash
docker compose ps
```
### 重启服务
```bash
# 重启 scheduler
docker compose restart scheduler
# 重启所有服务
docker compose restart
```
### 重新构建
```bash
# 重新构建 scheduler应用代码更改
docker compose build scheduler
docker compose --profile scheduler up -d
```
### 完全清理并重启
```bash
# 停止并删除所有容器和卷
docker compose --profile scheduler down -v
# 重新启动
./start_system.sh
```
---
## ⚙️ 配置调整
### LLM Gate 阈值
编辑 `config/settings.py`:
```python
LLM_MIN_COMPOSITE_SCORE: float = 15.0 # 降低到 5.0 可看到更多 LLM 分析
```
### 信号生成频率
编辑 `docker-compose.yml`:
```yaml
environment:
- SIGNAL_INTERVAL_MINUTES=5 # 修改为想要的分钟数
```
### 钉钉配置
编辑 `docker-compose.yml`:
```yaml
environment:
- DINGTALK_WEBHOOK=你的webhook地址
- DINGTALK_SECRET=你的密钥
```
---
## 🔔 钉钉通知规则
- ✅ **BUY 信号**: 自动发送简洁通知
- ✅ **SELL 信号**: 自动发送简洁通知
- ❌ **HOLD 信号**: 不发送(减少噪音)
**通知内容**
- 信号类型和置信度
- 当前价格
- 交易计划(入场/止损/止盈)
- 风险回报比
- 简短原因
---
## 📞 常见问题
### Q: 如何强制 LLM 分析每次都运行?
A: 降低 Gate 阈值到 5.0 或更低:
```bash
# 临时降低阈值,然后重启
docker compose restart scheduler
```
### Q: 如何查看 DeepSeek 调用情况?
A: 查看日志中的 LLM 相关信息:
```bash
docker compose logs scheduler | grep -E "LLM|Deepseek"
```
### Q: 信号文件在哪里?
A: `output/latest_signal.json`
---
## 🎯 典型工作流
```bash
# 1. 启动系统
./start_system.sh
# 2. 监控日志(新终端窗口)
./view_logs.sh scheduler
# 3. 手动运行分析(可选)
./run_signal.sh
# 4. 查看结果
./view_signal.sh
# 5. 停止系统(工作完成后)
./stop_system.sh
```
---
## 📚 更多文档
- `DINGTALK_SETUP.md` - 钉钉配置详细指南
- `NOTIFICATION_SUMMARY.md` - 通知功能实现总结
- `README.md` - 完整系统文档
---
**系统版本**: 1.0.0
**最后更新**: 2025-12-01

421
README.md Normal file
View File

@ -0,0 +1,421 @@
# Binance Real-time Data Ingestion System
生产级的 Binance WebSocket 实时数据采集系统,用于加密货币日内交易辅助。
## 功能特性
### 核心功能
- **多流订阅**: 同时订阅 K线、订单簿深度、实时成交数据
- **自动重连**: 指数退避策略,网络中断自动恢复
- **消息去重**: 基于事件时间戳 (E字段) 的 LRU 缓存去重
- **内存保护**: 限流 + 有界缓冲区,防止内存泄漏
- **流式存储**: 数据写入 Redis Stream支持多消费者
### 生产级特性
- 异步 I/O (asyncio) 高性能处理
- 批量写入 Redis降低网络开销
- 健康检查和性能监控
- 优雅关闭和信号处理
- Docker 容器化部署
- 完整的日志和统计信息
---
## 系统架构
```
┌─────────────────────────────────────────────────────────────────┐
│ Binance WebSocket API │
│ wss://fstream.binance.com/stream │
└────────────────────────┬────────────────────────────────────────┘
│ Multi-stream subscription
│ (kline_5m, depth20, aggTrade)
┌─────────────────────────────────────────────────────────────────┐
│ WebSocket Client (Auto-reconnect) │
│ - Exponential backoff │
│ - Heartbeat monitoring │
│ - Connection pooling │
└────────────────────────┬────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ Message Deduplicator │
│ - LRU cache (10,000 entries) │
│ - Event time (E field) based │
│ - TTL: 5 minutes │
└────────────────────────┬────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ Buffered Message Processor │
│ - Max buffer: 1,000 messages │
│ - Rate limit: 1,000 msg/sec │
│ - Batch processing │
└────────────────────────┬────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ Redis Stream Writer │
│ Stream keys: │
│ - binance:raw:kline:5m (K线数据) │
│ - binance:raw:depth:20 (订单簿深度) │
│ - binance:raw:trade (实时成交) │
│ │
│ MAXLEN: ~10,000 (auto-trim) │
└─────────────────────────────────────────────────────────────────┘
```
---
## 快速开始
### 前置要求
- Docker & Docker Compose
- 网络连接 (访问 Binance API)
### 1. 启动系统
```bash
# 克隆仓库
cd realtime-ingestion
# 复制环境变量配置
cp .env.example .env
# 启动所有服务 (Redis + 数据采集)
docker-compose up -d
# 查看日志
docker-compose logs -f ingestion
```
### 2. 验证数据采集
```bash
# 进入 Redis 容器
docker exec -it tradus-redis redis-cli
# 查看所有 Stream keys
KEYS binance:raw:*
# 查看 K线数据数量
XLEN binance:raw:kline:5m
# 读取最新的 10 条 K线数据
XREVRANGE binance:raw:kline:5m + - COUNT 10
# 实时监控新数据 (阻塞式读取)
XREAD BLOCK 0 STREAMS binance:raw:trade $
```
### 3. 使用 Web UI (可选)
```bash
# 启动 Redis Commander (Web 界面)
docker-compose --profile debug up -d redis-commander
# 访问: http://localhost:8081
```
---
## 配置说明
### 环境变量 (.env)
```bash
# Binance 配置
SYMBOL=btcusdt # 交易对
KLINE_INTERVAL=5m # K线周期
BINANCE_WS_BASE_URL=wss://fstream.binance.com
# Redis 配置
REDIS_HOST=redis
REDIS_PORT=6379
REDIS_STREAM_MAXLEN=10000 # Stream 最大长度
# 性能调优
MAX_BUFFER_SIZE=1000 # 最大缓冲区大小
RATE_LIMIT_MESSAGES_PER_SEC=1000 # 每秒处理消息数上限
DEDUP_CACHE_SIZE=10000 # 去重缓存大小
# 重连策略
RECONNECT_INITIAL_DELAY=1.0 # 初始重连延迟 (秒)
RECONNECT_MAX_DELAY=60.0 # 最大重连延迟 (秒)
MAX_RECONNECT_ATTEMPTS=100 # 最大重连次数 (-1 = 无限)
# 监控
HEALTH_CHECK_INTERVAL=30 # 健康检查间隔 (秒)
LOG_LEVEL=INFO # 日志级别
```
---
## 数据格式
详见 [REDIS_DATA_EXAMPLES.md](./REDIS_DATA_EXAMPLES.md)
### Redis Stream Keys
| Stream Key | 数据类型 | 更新频率 | 说明 |
|------------|----------|----------|------|
| `binance:raw:kline:5m` | K线 | 每5分钟 | OHLCV 数据 |
| `binance:raw:depth:20` | 订单簿 | 100ms | 前20档买卖盘 |
| `binance:raw:trade` | 成交 | 实时 | 归集成交记录 |
### 数据示例
**K线数据:**
```json
{
"e": "kline",
"E": 1701234567890,
"s": "BTCUSDT",
"k": {
"o": "42350.50",
"h": "42400.00",
"l": "42340.10",
"c": "42385.20",
"v": "125.4563"
}
}
```
---
## 监控和运维
### 查看系统状态
```bash
# 查看容器状态
docker-compose ps
# 查看实时日志
docker-compose logs -f ingestion
# 查看 Redis 内存使用
docker exec tradus-redis redis-cli INFO memory
```
### 健康检查
系统每 30 秒输出健康状态:
```
Health Check | WebSocket: ✓ | Redis: ✓ | Buffer: 15.2% | Dedup: 2.34% | Written: 12345
```
### 性能指标
日志中会定期输出:
- **WebSocket 状态**: 连接是否健康
- **Redis 状态**: 写入是否正常
- **缓冲区使用率**: 内存压力指示
- **去重率**: 重复消息比例
- **已写入消息数**: 累计处理量
---
## 故障排查
### 1. WebSocket 连接失败
**症状**: 日志显示 "WebSocket connection closed"
**解决方案**:
```bash
# 检查网络连接
ping fstream.binance.com
# 检查防火墙规则
# 确保允许出站 HTTPS (443) 和 WebSocket 连接
# 重启服务
docker-compose restart ingestion
```
### 2. Redis 连接失败
**症状**: "Failed to connect to Redis"
**解决方案**:
```bash
# 检查 Redis 是否运行
docker-compose ps redis
# 测试 Redis 连接
docker exec tradus-redis redis-cli ping
# 重启 Redis
docker-compose restart redis
```
### 3. 缓冲区溢出
**症状**: "Buffer overflow! Dropped message"
**解决方案**:
```bash
# 增加缓冲区大小
# 编辑 .env:
MAX_BUFFER_SIZE=2000
# 或降低数据流量
# 只订阅必要的流 (修改 websocket_client.py)
# 重启服务
docker-compose restart ingestion
```
### 4. 高内存占用
**症状**: Redis 或应用内存使用过高
**解决方案**:
```bash
# 减少 Stream MAXLEN
REDIS_STREAM_MAXLEN=5000
# 减少去重缓存大小
DEDUP_CACHE_SIZE=5000
# 重启并清空数据
docker-compose down
docker volume rm realtime-ingestion_redis_data
docker-compose up -d
```
---
## 开发模式
### 本地开发 (不使用 Docker)
```bash
# 安装依赖
pip install -r requirements.txt
# 启动 Redis (使用 Docker)
docker run -d -p 6379:6379 redis:7.2-alpine
# 修改 .env
cp .env.example .env
# 设置: REDIS_HOST=localhost
# 运行应用
python main.py
```
### 运行测试
```bash
# 单元测试
pytest tests/
# 集成测试
pytest tests/integration/
# 覆盖率报告
pytest --cov=core --cov-report=html
```
---
## 生产部署建议
### 1. 高可用配置
- 使用 **Redis Sentinel****Redis Cluster** 实现高可用
- 部署多个采集实例 (消息去重会自动处理)
- 配置健康检查和自动重启
### 2. 监控告警
集成 Prometheus + Grafana:
```yaml
# docker-compose.yml 添加
prometheus:
image: prom/prometheus
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
```
### 3. 日志收集
使用 ELK Stack 或 Loki:
```yaml
logging:
driver: "loki"
options:
loki-url: "http://loki:3100/loki/api/v1/push"
```
### 4. 安全加固
- 为 Redis 设置密码 (`.env` 中的 `REDIS_PASSWORD`)
- 使用专用网络隔离服务
- 限制容器资源使用 (`deploy.resources`)
---
## API 文档
### Python 消费端示例
```python
import redis
import orjson
# 创建 Redis 客户端
r = redis.Redis(host='localhost', port=6379, decode_responses=False)
# 使用 Consumer Group (推荐)
r.xgroup_create('binance:raw:kline:5m', 'my-processor', id='0', mkstream=True)
while True:
# 读取数据
messages = r.xreadgroup(
groupname='my-processor',
consumername='worker-1',
streams={'binance:raw:kline:5m': '>'},
count=10,
block=1000
)
for stream, stream_msgs in messages:
for msg_id, fields in stream_msgs:
# 解析 JSON
data = orjson.loads(fields[b'data'])
# 提取 K线数据
kline = data['k']
print(f"Price: {kline['c']}, Volume: {kline['v']}")
# 确认消息
r.xack('binance:raw:kline:5m', 'my-processor', msg_id)
```
---
## 许可证
MIT License
---
## 联系方式
如有问题或建议,请提交 Issue 或 Pull Request.
---
## 更新日志
### v1.0.0 (2023-11-29)
- 初始版本发布
- 支持 Binance 永续合约 WebSocket 数据采集
- 实现自动重连、消息去重、内存保护
- Docker 容器化部署

423
SIGNAL_GENERATION_GUIDE.md Normal file
View File

@ -0,0 +1,423 @@
# 交易信号生成指南
## 📊 系统架构
系统分为两个独立的服务:
### 1. 数据采集服务 (ingestion) - 持续运行
- **功能**: WebSocket实时数据采集
- **数据源**: Binance期货WebSocket (K线、订单簿、交易流)
- **存储**: Redis Streams
- **状态**: 24/7持续运行
### 2. 信号生成服务 (scheduler) - 可选运行
- **功能**: 定时执行量化分析 + LLM决策
- **数据源**: 从Redis读取历史数据
- **输出**: 交易信号JSON文件
- **运行模式**: 定时自动 或 手动执行
---
## 🚀 运行方式
### 方式一:只运行数据采集(默认)
**适用场景**: 只需要采集数据,手动生成信号
```bash
# 启动数据采集服务
docker compose up -d
# 查看状态
docker compose ps
# 查看日志
docker compose logs -f ingestion
```
**运行的服务**:
- ✅ `redis` - 数据存储
- ✅ `ingestion` - WebSocket数据采集
- ❌ `scheduler` - 未启动
**手动生成信号**:
```bash
# 随时可以手动执行
docker compose exec ingestion python /app/scripts/generate_trading_signal.py
```
---
### 方式二:数据采集 + 自动信号生成(推荐)
**适用场景**: 完全自动化,定时生成交易信号
```bash
# 启动数据采集 + 信号调度器
docker compose --profile scheduler up -d
# 查看状态
docker compose --profile scheduler ps
# 查看调度器日志
docker compose logs -f scheduler
# 查看采集服务日志
docker compose logs -f ingestion
```
**运行的服务**:
- ✅ `redis` - 数据存储
- ✅ `ingestion` - WebSocket数据采集
- ✅ `scheduler` - 定时信号生成 (每5分钟)
**配置信号生成间隔**:
编辑 `docker-compose.yml` 修改环境变量:
```yaml
scheduler:
environment:
- SIGNAL_INTERVAL_MINUTES=5 # 改为你想要的间隔(分钟)
```
支持的间隔:
- `1` - 每1分钟高频适合测试
- `5` - 每5分钟推荐平衡频率和成本
- `15` - 每15分钟低频节省LLM调用
- `60` - 每1小时很低频
---
### 方式三:手动执行单次信号生成
**适用场景**: 测试、调试、手动判断
```bash
# 确保数据采集服务在运行
docker compose up -d
# 手动执行一次信号生成
docker compose exec ingestion python /app/scripts/generate_trading_signal.py
```
---
## 📁 信号输出
### 输出文件位置
```
./output/latest_signal.json
```
### 输出文件格式
```json
{
"timestamp": "2025-11-30T13:23:24.816000",
"aggregated_signal": {
"final_signal": "BUY",
"final_confidence": 0.75,
"consensus": "QUANT_ONLY",
"opportunities": {
"intraday": {
"exists": true,
"direction": "LONG",
"entry_price": 91128.0,
"stop_loss": 91000.0,
"take_profit": 91342.0
},
"swing": {...},
"ambush": {...}
}
},
"market_analysis": {...},
"quantitative_signal": {...},
"llm_signal": {...}
}
```
### 查看最新信号
```bash
# 直接查看
cat output/latest_signal.json
# 格式化查看
cat output/latest_signal.json | jq .
```
---
## 🔧 配置LLM API可选
如果需要LLM深度分析配置API key
### 方法1: 修改 docker-compose.yml
编辑 `docker-compose.yml``scheduler` 服务:
```yaml
scheduler:
environment:
# 使用 Deepseek (推荐,低成本)
- OPENAI_API_KEY=sk-your-deepseek-key
- OPENAI_BASE_URL=https://api.deepseek.com
# 或使用 OpenAI GPT
# - OPENAI_API_KEY=sk-your-openai-key
# 或使用 Claude
# - ANTHROPIC_API_KEY=sk-ant-your-key
```
### 方法2: 使用 .env 文件
创建 `.env.local` 文件:
```bash
OPENAI_API_KEY=sk-your-key
OPENAI_BASE_URL=https://api.deepseek.com
```
修改 docker-compose.yml:
```yaml
scheduler:
env_file:
- .env.local
```
---
## 📊 LLM Gate 控制
### 当前配置(极简模式)
LLM Gate 确保合理使用LLM API
```yaml
LLM_GATE_ENABLED: true # 启用门控
LLM_MIN_CANDLES: 100 # 最少K线数量
LLM_MIN_COMPOSITE_SCORE: 15.0 # 综合得分门槛
LLM_MAX_CALLS_PER_DAY: 12 # 每天最多调用12次
LLM_MIN_INTERVAL_MINUTES: 15 # 调用间隔≥15分钟
```
### Gate 逻辑
信号生成时LLM调用需要通过
1. ✅ **频率检查** - 今日调用 < 12次且距上次 15分钟
2. ✅ **数据检查** - K线数量 ≥ 100根
3. ✅ **质量检查** - 综合得分 ≥ 15过滤完全中性信号
### 查看Gate统计
```bash
docker compose exec scheduler python -c "
from signals.llm_gate import LLMGate
gate = LLMGate()
stats = gate.get_stats()
print(f'今日调用: {stats[\"today_calls\"]}/{stats[\"max_calls_per_day\"]}')
print(f'剩余配额: {stats[\"remaining_calls_today\"]}')
print(f'距离上次: {stats[\"minutes_since_last_call\"]:.1f} 分钟' if stats['minutes_since_last_call'] else '从未调用')
"
```
---
## 🔍 监控和日志
### 查看实时日志
```bash
# 数据采集日志
docker compose logs -f ingestion
# 信号生成日志
docker compose logs -f scheduler
# 所有服务日志
docker compose logs -f
```
### 日志关键信息
**数据采集 (ingestion)**:
```
Health Check | WebSocket: ✓ | Redis: ✓ | Buffer: 0.5% | Written: 12345
```
**信号生成 (scheduler)**:
```
📊 开始生成交易信号 - 2025-11-30 13:23:24
✅ 市场分析完成 - 价格: $91,650.10, 趋势: 上涨
📈 量化信号: BUY (得分: 51.5)
✅ LLM Gate: PASSED - 信号类型: BUY, 综合得分: 51.5
🤖 LLM信号: BUY (置信度: 60.00%)
🎯 最终信号: BUY (置信度: 75.00%)
💾 信号已保存到: /app/output/latest_signal.json
```
---
## ⚙️ 常用命令
### 启动/停止服务
```bash
# 只启动数据采集
docker compose up -d
# 启动数据采集 + 信号调度
docker compose --profile scheduler up -d
# 停止所有服务
docker compose down
# 重启某个服务
docker compose restart scheduler
docker compose restart ingestion
```
### 查看状态
```bash
# 查看运行状态
docker compose ps
# 查看资源使用
docker stats tradus-ingestion tradus-scheduler
# 查看网络
docker network inspect realtime-ingestion_tradus-network
```
### 进入容器
```bash
# 进入数据采集容器
docker compose exec ingestion bash
# 进入调度器容器
docker compose exec scheduler bash
# 进入Redis
docker compose exec redis redis-cli
```
---
## 🐛 故障排查
### 问题1: scheduler未启动
**症状**: `docker compose ps` 看不到 `tradus-scheduler`
**原因**: 默认情况下scheduler是profile服务不会自动启动
**解决**:
```bash
docker compose --profile scheduler up -d
```
### 问题2: LLM总是被Gate拦截
**症状**: 日志显示 `🚫 LLM Gate: BLOCKED`
**可能原因**:
1. 综合得分太低 (< 15)
2. 调用频率太高 (< 15分钟)
3. 今日配额用完 (≥ 12次)
**解决**:
```bash
# 查看Gate状态
docker compose exec scheduler python -c "from signals.llm_gate import LLMGate; print(LLMGate().get_stats())"
# 降低综合得分门槛 (修改 .env)
LLM_MIN_COMPOSITE_SCORE=10.0
# 增加每日配额 (修改 .env)
LLM_MAX_CALLS_PER_DAY=20
# 重启服务
docker compose --profile scheduler restart scheduler
```
### 问题3: 生成信号失败
**症状**: 日志显示 `❌ 信号生成失败`
**可能原因**:
1. Redis数据不足
2. K线数据 < 100根
**解决**:
```bash
# 检查Redis数据
docker compose exec redis redis-cli
> XLEN binance:raw:kline:5m
# 等待数据累积至少20分钟
# 或手动从Binance API获取历史数据
```
---
## 📈 性能优化
### 调整信号生成间隔
根据交易策略调整:
- **高频策略**: 1-5分钟
- **中频策略**: 5-15分钟推荐
- **低频策略**: 15-60分钟
### 调整LLM调用频率
`.env` 中配置:
```bash
# 保守模式(节省成本)
LLM_MAX_CALLS_PER_DAY=5
LLM_MIN_INTERVAL_MINUTES=30
LLM_MIN_COMPOSITE_SCORE=20.0
# 激进模式更多LLM分析
LLM_MAX_CALLS_PER_DAY=20
LLM_MIN_INTERVAL_MINUTES=10
LLM_MIN_COMPOSITE_SCORE=10.0
# 平衡模式(推荐)
LLM_MAX_CALLS_PER_DAY=12
LLM_MIN_INTERVAL_MINUTES=15
LLM_MIN_COMPOSITE_SCORE=15.0
```
---
## 🎯 推荐使用方式
### 开发/测试阶段
```bash
# 只运行数据采集,手动测试信号生成
docker compose up -d
docker compose exec ingestion python /app/scripts/generate_trading_signal.py
```
### 生产/自动化阶段
```bash
# 运行数据采集 + 自动信号生成
docker compose --profile scheduler up -d
# 配置LLM API key在docker-compose.yml中
# 监控日志
docker compose logs -f scheduler
```
### 回测/分析阶段
```bash
# 只运行数据采集,积累历史数据
docker compose up -d
# 定期导出数据用于回测
# (后续可以开发专门的回测工具)
```

38
START_HERE.md Executable file
View File

@ -0,0 +1,38 @@
# 🚀 快速开始
## 一键运行命令
```bash
cd /Users/aaron/source_code/tradus-ai/realtime-ingestion
# 1⃣ 生成交易信号(含 AI 分析)
./run_signal.sh
# 2⃣ 查看最新信号
./view_signal.sh
# 3⃣ 查看实时数据
./view_data.sh
```
---
## 📋 所有可用命令
| 命令 | 说明 |
|------|------|
| `./run_signal.sh` | 生成交易信号(每次调用 AI |
| `./run_signal_smart.sh` | 生成交易信号(智能门控,节省成本) |
| `./view_signal.sh` | 查看最新信号 |
| `./view_data.sh` | 查看实时数据 |
| `make logs` | 查看系统日志 |
| `make monitor` | 系统监控 |
---
## 💡 详细说明
查看完整使用指南:
```bash
cat USAGE.md
```

260
USAGE.md Normal file
View File

@ -0,0 +1,260 @@
# 🚀 快速使用指南
## 📋 前置条件
确保 Docker 服务正在运行:
```bash
docker compose ps
```
如果服务未运行,先启动:
```bash
docker compose up -d
```
---
## 🎯 运行交易信号分析
### 方式 1: 完整 AI 分析(推荐测试)
每次都调用 Deepseek AI 进行深度分析:
```bash
./run_signal.sh
```
**特点**:
- ✅ 每次运行都会调用 Deepseek AI
- ✅ 获得量化 + AI 双重分析
- ⚠️ 每次调用成本约 ¥0.005 元
---
### 方式 2: 智能门控模式(推荐生产)
只在高质量信号时调用 AI节省 95% 成本:
```bash
./run_signal_smart.sh
```
**特点**:
- ✅ 自动判断是否需要 AI 分析
- ✅ 节省 API 调用成本
- ✅ 每天最多调用 5 次
- ✅ 只在关键时刻使用 AI
**何时调用 AI**:
- 综合得分 ≥ 20.0
- 共识度 ≥ 0.75
- 数据充足≥50 根 K 线)
- 属于高价值场景(趋势回调/真假突破等)
---
## 📊 查看结果
### 查看最新信号
```bash
./view_signal.sh
```
**输出示例**:
```
════════════════════════════════════════════════════════════════
📊 最新交易信号
════════════════════════════════════════════════════════════════
🎯 最终信号: HOLD
📈 置信度: 33%
🤝 共识: CONSENSUS_HOLD
📊 当前价格: $90,662.10
────────────────────────────────────────────────────────────────
🔢 量化信号: HOLD (置信度: 25%)
综合得分: -2.6
趋势: 23.1 | 动量: 5.0
订单流: -47.6 | 突破: 0.0
────────────────────────────────────────────────────────────────
🤖 AI 信号: HOLD (置信度: 40%)
推理: 当前价格处于上涨后的回调阶段RSI显示中性偏强...
关键因素: RSI中性区域, MACD死叉收窄, 成交量萎缩
────────────────────────────────────────────────────────────────
💡 建议: 量化和AI分析均建议观望,等待更好的机会
════════════════════════════════════════════════════════════════
```
### 查看实时数据
```bash
./view_data.sh
```
**输出示例**:
```
════════════════════════════════════════════════════════════════
📡 实时数据监控
════════════════════════════════════════════════════════════════
📊 当前 BTC 价格:
$ 90,662.10 (最新)
$ 90,680.00 (5分钟最高)
$ 90,640.00 (5分钟最低)
成交量: 45.23 BTC
─────────────────────────────────────────────────────────────────
📈 数据流状态:
kline:5m : 10,007 条消息
kline:15m : 3,500 条消息
kline:1h : 1,200 条消息
trade : 50,000 条消息
depth:20 : 80,000 条消息
─────────────────────────────────────────────────────────────────
🚀 服务状态:
tradus-redis: Up
tradus-ingestion: Up
════════════════════════════════════════════════════════════════
```
---
## 📁 查看完整 JSON 结果
```bash
docker compose exec ingestion cat /app/output/latest_signal.json | python3 -m json.tool
```
---
## ⚙️ 高级用法
### 使用 Makefile 快捷命令
```bash
# 查看所有可用命令
make help
# 查看日志
make logs
# 查看系统监控
make monitor
# 进入 Redis CLI
make redis-cli
# 重启服务
make restart
```
### 手动运行分析
```bash
# 进入容器
docker compose exec ingestion bash
# 设置 API Key
export OPENAI_API_KEY='sk-9f6b56f08796435d988cf202e37f6ee3'
export OPENAI_BASE_URL='https://api.deepseek.com'
# 运行市场分析
python scripts/run_analysis.py
# 运行信号生成
python scripts/generate_trading_signal.py
```
---
## 🔧 定时自动运行(可选)
### 使用 cron 定时任务
每 30 分钟运行一次(智能门控模式):
```bash
# 编辑 crontab
crontab -e
# 添加以下行:
*/30 * * * * cd /Users/aaron/source_code/tradus-ai/realtime-ingestion && ./run_signal_smart.sh >> logs/signal.log 2>&1
```
每小时运行一次(完整 AI 分析):
```bash
0 * * * * cd /Users/aaron/source_code/tradus-ai/realtime-ingestion && ./run_signal.sh >> logs/signal.log 2>&1
```
### 使用 watch 命令持续监控
```bash
# 每 5 分钟运行一次
watch -n 300 ./run_signal_smart.sh
```
---
## 📌 常见问题
### Q: 如何切换 LLM 服务商?
**使用 Claude**:
```bash
export ANTHROPIC_API_KEY='your-claude-key'
# 然后修改 scripts/generate_trading_signal.py 中的
# LLMDecisionMaker(provider='claude')
```
**使用 OpenAI GPT**:
```bash
export OPENAI_API_KEY='your-openai-key'
unset OPENAI_BASE_URL # 删除 Deepseek 的 base_url
```
### Q: 如何调整门控参数?
编辑 `.env` 文件:
```bash
LLM_GATE_ENABLED=true
LLM_MIN_CONSENSUS=0.75 # 共识度阈值
LLM_MIN_COMPOSITE_SCORE=40 # 综合得分阈值
LLM_MAX_CALLS_PER_DAY=5 # 每天最多调用次数
```
### Q: 数据不足怎么办?
系统会自动从 Binance API 获取历史数据。如果仍然提示数据不足,请等待:
- 5分钟图: 需要约 17 小时积累 200 根 K 线
- 或让系统持续运行,会自动补全历史数据
---
## 🎓 下一步
1. **了解信号含义**: 查看生成的 JSON 结果,理解各个字段
2. **调整参数**: 根据自己的交易风格调整门控阈值
3. **集成通知**: 添加 Telegram/钉钉通知高质量信号
4. **回测验证**: 使用历史数据验证信号准确性
---
## 📞 获取帮助
查看日志:
```bash
make logs
```
查看系统状态:
```bash
docker compose ps
```
查看 Redis 数据:
```bash
make redis-cli
```

21
analysis/__init__.py Normal file
View File

@ -0,0 +1,21 @@
"""
Market Analysis & Feature Engineering Module
"""
from .config import config
from .data_reader import MarketDataReader
from .indicators import TechnicalIndicators
from .market_structure import MarketStructureAnalyzer
from .orderflow import OrderFlowAnalyzer
from .llm_context import LLMContextBuilder
from .engine import MarketAnalysisEngine
__all__ = [
'config',
'MarketDataReader',
'TechnicalIndicators',
'MarketStructureAnalyzer',
'OrderFlowAnalyzer',
'LLMContextBuilder',
'MarketAnalysisEngine',
]

61
analysis/config.py Normal file
View File

@ -0,0 +1,61 @@
"""
Analysis configuration
"""
from pydantic_settings import BaseSettings
class AnalysisConfig(BaseSettings):
"""Analysis configuration"""
# Redis connection
REDIS_HOST: str = "localhost"
REDIS_PORT: int = 6379
REDIS_DB: int = 0
# Stream keys
KLINE_5M_KEY: str = "binance:raw:kline:5m"
KLINE_15M_KEY: str = "binance:raw:kline:15m"
KLINE_1H_KEY: str = "binance:raw:kline:1h"
KLINE_4H_KEY: str = "binance:raw:kline:4h"
KLINE_1D_KEY: str = "binance:raw:kline:1d"
KLINE_1W_KEY: str = "binance:raw:kline:1w"
DEPTH_KEY: str = "binance:raw:depth:20"
TRADE_KEY: str = "binance:raw:trade"
# Analysis parameters
LOOKBACK_PERIODS: int = 200 # Number of candles to analyze
# Technical indicator periods
EMA_FAST: int = 20
EMA_SLOW: int = 50
RSI_PERIOD: int = 14
ATR_PERIOD: int = 14
ADX_PERIOD: int = 14
MACD_FAST: int = 12
MACD_SLOW: int = 26
MACD_SIGNAL: int = 9
BB_PERIOD: int = 20
BB_STD: float = 2.0
VOLUME_MA_PERIOD: int = 20
# Support/Resistance detection
SR_LOOKBACK: int = 50 # Periods to look back for S/R
SR_TOLERANCE: float = 0.002 # 0.2% price tolerance
# Order flow analysis
ORDERBOOK_IMBALANCE_THRESHOLD: float = 0.2 # 20% imbalance
LARGE_ORDER_THRESHOLD_USD: float = 100000 # $100k considered large
# Risk parameters (example values)
ACCOUNT_SIZE_USD: float = 100000
MAX_RISK_PCT: float = 0.01 # 1% per trade
DEFAULT_LEVERAGE: int = 3
ATR_STOP_MULTIPLIER: float = 1.8
class Config:
env_file = ".env"
case_sensitive = True
extra = 'ignore' # Ignore extra fields from .env
config = AnalysisConfig()

294
analysis/data_reader.py Normal file
View File

@ -0,0 +1,294 @@
"""
Data reader for fetching market data from Redis Streams
"""
import logging
from typing import Optional, List, Dict, Any
from datetime import datetime, timedelta
import pandas as pd
import redis
import orjson
import requests
import time
from .config import config
logger = logging.getLogger(__name__)
class MarketDataReader:
"""Read and aggregate market data from Redis Streams"""
def __init__(self):
self.redis_client = redis.Redis(
host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_DB,
decode_responses=False,
)
def fetch_historical_klines_from_api(
self, symbol: str = 'BTCUSDT', interval: str = '5m', limit: int = 200
) -> pd.DataFrame:
"""
Fetch historical kline data from Binance API
Args:
symbol: Trading pair (e.g., 'BTCUSDT')
interval: Kline interval (e.g., '5m', '15m', '1h', '4h')
limit: Number of candles to fetch (max 1500)
Returns:
DataFrame with historical OHLCV data
"""
try:
# Binance API endpoint
url = 'https://fapi.binance.com/fapi/v1/klines'
params = {
'symbol': symbol,
'interval': interval,
'limit': min(limit, 1500) # API limit
}
logger.info(f"Fetching {limit} historical candles from Binance API ({symbol} {interval})...")
response = requests.get(url, params=params, timeout=10)
response.raise_for_status()
data = response.json()
# Parse API response
klines = []
for item in data:
klines.append({
'timestamp': datetime.fromtimestamp(item[0] / 1000),
'open': float(item[1]),
'high': float(item[2]),
'low': float(item[3]),
'close': float(item[4]),
'volume': float(item[5]),
'quote_volume': float(item[7]),
'trades': int(item[8]),
'is_closed': True, # Historical data is always closed
})
df = pd.DataFrame(klines)
if not df.empty:
df.set_index('timestamp', inplace=True)
df.sort_index(inplace=True)
logger.info(f"✅ Fetched {len(df)} candles from Binance API")
return df
except Exception as e:
logger.error(f"Error fetching from Binance API: {e}")
return pd.DataFrame()
def read_kline_stream(
self, stream_key: str, count: int = None, use_api_fallback: bool = True
) -> pd.DataFrame:
"""
Read kline data from Redis Stream and convert to DataFrame
Only includes completed candles (x: true). If insufficient data,
fetches historical data from Binance API.
Args:
stream_key: Redis stream key (e.g., 'binance:raw:kline:5m')
count: Number of recent candles to fetch (default: LOOKBACK_PERIODS)
use_api_fallback: Whether to fetch from API if Redis data insufficient
Returns:
DataFrame with OHLCV data and indicators
"""
if count is None:
count = config.LOOKBACK_PERIODS
try:
# Read MORE messages from stream to account for duplicates
# Multiply by 10 to ensure we get enough unique candles after filtering
messages = self.redis_client.xrevrange(stream_key, count=count * 10)
if not messages:
logger.warning(f"No data found in stream: {stream_key}")
# Fallback to API
if use_api_fallback:
return self._fetch_from_api_with_interval(stream_key, count)
return pd.DataFrame()
# Parse messages - ONLY keep completed candles (x: true)
klines = []
seen_timestamps = set()
for msg_id, fields in reversed(messages): # Reverse to get chronological order
data = orjson.loads(fields[b'data'])
k = data.get('k', {})
# IMPORTANT: Only keep completed candles
if not k.get('x', False):
continue
# Deduplicate by timestamp
timestamp = k['t']
if timestamp in seen_timestamps:
continue
seen_timestamps.add(timestamp)
klines.append({
'timestamp': datetime.fromtimestamp(k['t'] / 1000),
'open': float(k['o']),
'high': float(k['h']),
'low': float(k['l']),
'close': float(k['c']),
'volume': float(k['v']),
'quote_volume': float(k['q']),
'trades': int(k['n']),
'is_closed': k['x'],
})
# Stop if we have enough candles
if len(klines) >= count:
break
# Create DataFrame
df = pd.DataFrame(klines)
if df.empty:
logger.warning(f"No completed candles found in stream: {stream_key}")
# Fallback to API
if use_api_fallback:
return self._fetch_from_api_with_interval(stream_key, count)
return df
df.set_index('timestamp', inplace=True)
df.sort_index(inplace=True)
logger.info(f"Loaded {len(df)} completed candles from {stream_key}")
# If still insufficient, supplement with API data
if len(df) < count and use_api_fallback:
logger.warning(f"Insufficient data: {len(df)}/{count} candles. Fetching from API...")
api_df = self._fetch_from_api_with_interval(stream_key, count)
if not api_df.empty:
# Merge Redis and API data, preferring Redis for overlapping periods
combined = pd.concat([api_df, df])
combined = combined[~combined.index.duplicated(keep='last')]
combined.sort_index(inplace=True)
logger.info(f"Combined data: {len(combined)} candles (Redis: {len(df)}, API: {len(api_df)})")
return combined
return df
except Exception as e:
logger.error(f"Error reading kline stream {stream_key}: {e}")
return pd.DataFrame()
def _fetch_from_api_with_interval(self, stream_key: str, count: int) -> pd.DataFrame:
"""Extract interval from stream key and fetch from API"""
# Extract interval from stream key (e.g., 'binance:raw:kline:5m' -> '5m')
try:
interval = stream_key.split(':')[-1]
return self.fetch_historical_klines_from_api(
symbol='BTCUSDT',
interval=interval,
limit=count
)
except Exception as e:
logger.error(f"Error extracting interval from {stream_key}: {e}")
return pd.DataFrame()
def read_latest_depth(self) -> Optional[Dict[str, Any]]:
"""
Read latest order book depth data
Returns:
Dict with bids and asks, or None if no data
"""
try:
messages = self.redis_client.xrevrange(config.DEPTH_KEY, count=1)
if not messages:
return None
msg_id, fields = messages[0]
data = orjson.loads(fields[b'data'])
return {
'timestamp': datetime.fromtimestamp(data['E'] / 1000),
'bids': [[float(p), float(q)] for p, q in data['b']],
'asks': [[float(p), float(q)] for p, q in data['a']],
}
except Exception as e:
logger.error(f"Error reading depth data: {e}")
return None
def read_recent_trades(self, count: int = 100) -> List[Dict[str, Any]]:
"""
Read recent trade data
Args:
count: Number of recent trades to fetch
Returns:
List of trade dictionaries
"""
try:
messages = self.redis_client.xrevrange(config.TRADE_KEY, count=count)
if not messages:
return []
trades = []
for msg_id, fields in messages:
data = orjson.loads(fields[b'data'])
trades.append({
'timestamp': datetime.fromtimestamp(data['T'] / 1000),
'price': float(data['p']),
'quantity': float(data['q']),
'is_buyer_maker': data['m'], # True = sell, False = buy
})
return trades
except Exception as e:
logger.error(f"Error reading trade data: {e}")
return []
def get_multi_timeframe_data(self) -> Dict[str, pd.DataFrame]:
"""
Fetch data from multiple timeframes
Returns:
Dict mapping timeframe to DataFrame
"""
# Different timeframes need different amount of data
# Shorter timeframes: 200 candles (for detailed analysis)
# Longer timeframes: fewer candles (100 for 1d, 60+ for 1w)
timeframes = {
'5m': (config.KLINE_5M_KEY, 200),
'15m': (config.KLINE_15M_KEY, 200),
'1h': (config.KLINE_1H_KEY, 200),
'4h': (config.KLINE_4H_KEY, 200),
'1d': (config.KLINE_1D_KEY, 100), # 100 days ≈ 3+ months
'1w': (config.KLINE_1W_KEY, 65), # 65 weeks ≈ 15 months
}
data = {}
for tf, (key, count) in timeframes.items():
df = self.read_kline_stream(key, count=count)
if not df.empty:
data[tf] = df
return data
def get_latest_price(self) -> Optional[float]:
"""Get latest close price from 5m kline"""
try:
df = self.read_kline_stream(config.KLINE_5M_KEY, count=1)
if not df.empty:
return float(df.iloc[-1]['close'])
except Exception as e:
logger.error(f"Error getting latest price: {e}")
return None

266
analysis/engine.py Normal file
View File

@ -0,0 +1,266 @@
"""
Main Market Analysis Engine - Orchestrates all analysis components
"""
import logging
from typing import Dict, Any, Optional
import pandas as pd
from .data_reader import MarketDataReader
from .indicators import TechnicalIndicators
from .market_structure import MarketStructureAnalyzer
from .orderflow import OrderFlowAnalyzer
from .llm_context import LLMContextBuilder
logger = logging.getLogger(__name__)
class MarketAnalysisEngine:
"""
Main analysis engine that orchestrates all market analysis components
"""
def __init__(self):
self.data_reader = MarketDataReader()
self.llm_builder = LLMContextBuilder()
def analyze_current_market(
self, timeframe: str = '5m', symbol: str = 'BTCUSDT'
) -> Dict[str, Any]:
"""
Perform complete market analysis for current state
Args:
timeframe: Primary timeframe for analysis (5m, 15m, 1h, 4h)
symbol: Trading symbol
Returns:
Complete analysis dictionary
"""
try:
logger.info(f"Starting market analysis for {symbol} on {timeframe}")
# Fetch data
df = self._fetch_and_prepare_data(timeframe)
if df.empty:
logger.error(f"No data available for {timeframe}")
return {'error': 'No data available'}
# Get current price
current_price = float(df.iloc[-1]['close'])
# Fetch order book
depth_data = self.data_reader.read_latest_depth()
# Perform analysis components
analysis = {
'symbol': symbol,
'timeframe': timeframe,
'current_price': round(current_price, 2),
'timestamp': df.index[-1].isoformat(),
'trend_analysis': MarketStructureAnalyzer.identify_trend(df),
'support_resistance': MarketStructureAnalyzer.find_support_resistance(
df, current_price
),
'momentum': MarketStructureAnalyzer.calculate_momentum(df),
'indicators': TechnicalIndicators.get_latest_indicators(df),
'price_changes': TechnicalIndicators.calculate_price_changes(df),
}
# Add order flow if depth data available
if depth_data:
analysis['orderflow'] = {
'imbalance': OrderFlowAnalyzer.analyze_orderbook_imbalance(depth_data),
'liquidity': OrderFlowAnalyzer.analyze_liquidity_depth(
depth_data, current_price
),
'large_orders': OrderFlowAnalyzer.detect_large_orders(depth_data),
}
# Calculate order flow strength
analysis['orderflow']['strength'] = OrderFlowAnalyzer.calculate_orderflow_strength(
analysis['orderflow']['imbalance'],
analysis['orderflow']['large_orders'],
analysis['orderflow']['liquidity'],
)
# Add breakout detection
analysis['breakout'] = MarketStructureAnalyzer.detect_breakout(
df, analysis['support_resistance']
)
# Add volatility and volume analysis for LLM gate
analysis['volatility_analysis'] = {
'atr': analysis['indicators'].get('atr', 0),
'atr_pct': (analysis['indicators'].get('atr', 0) / current_price * 100) if current_price > 0 else 0,
'bb_status': self._get_bb_status(df),
}
analysis['volume_analysis'] = {
'current_volume': float(df.iloc[-1]['volume']),
'avg_volume': float(df['volume'].tail(20).mean()),
'volume_status': self._get_volume_status(df),
'obv_trend': self._get_obv_trend(df),
}
# Add metadata for LLM gate
analysis['metadata'] = {
'candle_count': len(df),
'timeframe': timeframe,
'analysis_timestamp': df.index[-1].isoformat(),
}
logger.info(
f"Analysis complete: trend={analysis['trend_analysis']['direction']}, "
f"rsi={analysis['momentum']['rsi']}, "
f"candles={len(df)}"
)
return analysis
except Exception as e:
logger.error(f"Error in market analysis: {e}", exc_info=True)
return {'error': str(e)}
def get_llm_context(self, format: str = 'full') -> Dict[str, Any]:
"""
Get market context formatted for LLM consumption
Args:
format: 'full' or 'simplified'
Returns:
LLM-ready context dictionary
"""
if format == 'simplified':
return self.llm_builder.get_simplified_context()
else:
return self.llm_builder.build_full_context()
def get_multi_timeframe_analysis(self) -> Dict[str, Any]:
"""
Get analysis across all timeframes
Returns:
Dict mapping timeframe to analysis
"""
timeframes = ['5m', '15m', '1h', '4h']
results = {}
for tf in timeframes:
analysis = self.analyze_current_market(timeframe=tf)
if 'error' not in analysis:
results[tf] = {
'trend': analysis['trend_analysis'].get('direction', 'unknown'),
'strength': analysis['trend_analysis'].get('strength', 'weak'),
'rsi': analysis['momentum'].get('rsi', 50),
'adx': analysis['trend_analysis'].get('adx', 0),
}
return results
def _fetch_and_prepare_data(self, timeframe: str) -> pd.DataFrame:
"""
Fetch data and add all technical indicators
Args:
timeframe: Timeframe to fetch (5m, 15m, 1h, 4h)
Returns:
DataFrame with OHLCV and indicators
"""
# Map timeframe to stream key
stream_key = f"binance:raw:kline:{timeframe}"
# Fetch data
df = self.data_reader.read_kline_stream(stream_key)
if df.empty:
return df
# Add all technical indicators
df = TechnicalIndicators.add_all_indicators(df)
return df
def check_data_availability(self) -> Dict[str, Any]:
"""
Check what data is available in Redis
Returns:
Dict with data availability status
"""
status = {
'klines': {},
'depth': False,
'trades': False,
}
# Check kline streams
for tf in ['5m', '15m', '1h', '4h']:
stream_key = f"binance:raw:kline:{tf}"
df = self.data_reader.read_kline_stream(stream_key, count=1)
status['klines'][tf] = {
'available': not df.empty,
'latest': df.index[-1].isoformat() if not df.empty else None,
}
# Check depth
depth = self.data_reader.read_latest_depth()
status['depth'] = depth is not None
# Check trades
trades = self.data_reader.read_recent_trades(count=1)
status['trades'] = len(trades) > 0
return status
def _get_bb_status(self, df: pd.DataFrame) -> str:
"""Get Bollinger Bands status"""
if 'bb_upper' not in df.columns or 'bb_lower' not in df.columns:
return 'unknown'
last_close = df.iloc[-1]['close']
bb_upper = df.iloc[-1]['bb_upper']
bb_lower = df.iloc[-1]['bb_lower']
bb_middle = df.iloc[-1].get('bb_middle', (bb_upper + bb_lower) / 2)
if last_close > bb_upper:
return 'overbought'
elif last_close < bb_lower:
return 'oversold'
elif last_close > bb_middle:
return 'upper_half'
else:
return 'lower_half'
def _get_volume_status(self, df: pd.DataFrame) -> str:
"""Get volume status compared to average"""
if len(df) < 20:
return 'unknown'
current_volume = df.iloc[-1]['volume']
avg_volume = df['volume'].tail(20).mean()
if current_volume > avg_volume * 1.5:
return 'high'
elif current_volume > avg_volume * 0.8:
return 'normal'
else:
return 'low'
def _get_obv_trend(self, df: pd.DataFrame) -> str:
"""Get OBV (On-Balance Volume) trend"""
if 'obv' not in df.columns or len(df) < 20:
return 'unknown'
obv_current = df.iloc[-1]['obv']
obv_sma = df['obv'].tail(20).mean()
if obv_current > obv_sma * 1.05:
return 'bullish'
elif obv_current < obv_sma * 0.95:
return 'bearish'
else:
return 'neutral'

224
analysis/indicators.py Normal file
View File

@ -0,0 +1,224 @@
"""
Technical indicator calculation engine
"""
import logging
import pandas as pd
import numpy as np
from ta import trend, momentum, volatility, volume
from .config import config
logger = logging.getLogger(__name__)
class TechnicalIndicators:
"""Calculate technical indicators for market analysis"""
@staticmethod
def add_all_indicators(df: pd.DataFrame) -> pd.DataFrame:
"""
Add all technical indicators to DataFrame
Args:
df: DataFrame with OHLCV data
Returns:
DataFrame with all indicators added
"""
# Minimum data needed for indicators is ~60 candles
# (based on EMA_SLOW=50 + some buffer)
MIN_CANDLES = 60
if df.empty or len(df) < MIN_CANDLES:
logger.warning(f"Insufficient data for indicators: {len(df)} candles (min: {MIN_CANDLES})")
return df
df = df.copy()
# Trend indicators
df = TechnicalIndicators.add_trend_indicators(df)
# Momentum indicators
df = TechnicalIndicators.add_momentum_indicators(df)
# Volatility indicators
df = TechnicalIndicators.add_volatility_indicators(df)
# Volume indicators
df = TechnicalIndicators.add_volume_indicators(df)
return df
@staticmethod
def add_trend_indicators(df: pd.DataFrame) -> pd.DataFrame:
"""Add trend-following indicators"""
# EMAs
df[f'ema_{config.EMA_FAST}'] = trend.EMAIndicator(
df['close'], window=config.EMA_FAST
).ema_indicator()
df[f'ema_{config.EMA_SLOW}'] = trend.EMAIndicator(
df['close'], window=config.EMA_SLOW
).ema_indicator()
# MACD
macd = trend.MACD(
df['close'],
window_slow=config.MACD_SLOW,
window_fast=config.MACD_FAST,
window_sign=config.MACD_SIGNAL
)
df['macd'] = macd.macd()
df['macd_signal'] = macd.macd_signal()
df['macd_hist'] = macd.macd_diff()
# ADX (trend strength)
adx = trend.ADXIndicator(
df['high'], df['low'], df['close'], window=config.ADX_PERIOD
)
df['adx'] = adx.adx()
df['dmp'] = adx.adx_pos() # +DI
df['dmn'] = adx.adx_neg() # -DI
return df
@staticmethod
def add_momentum_indicators(df: pd.DataFrame) -> pd.DataFrame:
"""Add momentum indicators"""
# RSI
df['rsi'] = momentum.RSIIndicator(
df['close'], window=config.RSI_PERIOD
).rsi()
# Stochastic
stoch = momentum.StochasticOscillator(
df['high'],
df['low'],
df['close'],
window=14,
smooth_window=3
)
df['stoch_k'] = stoch.stoch()
df['stoch_d'] = stoch.stoch_signal()
# Williams %R
df['willr'] = momentum.WilliamsRIndicator(
df['high'], df['low'], df['close'], lbp=14
).williams_r()
return df
@staticmethod
def add_volatility_indicators(df: pd.DataFrame) -> pd.DataFrame:
"""Add volatility indicators"""
# ATR (Average True Range)
df['atr'] = volatility.AverageTrueRange(
df['high'], df['low'], df['close'], window=config.ATR_PERIOD
).average_true_range()
# Bollinger Bands
bbands = volatility.BollingerBands(
df['close'],
window=config.BB_PERIOD,
window_dev=config.BB_STD
)
df['bb_upper'] = bbands.bollinger_hband()
df['bb_middle'] = bbands.bollinger_mavg()
df['bb_lower'] = bbands.bollinger_lband()
df['bb_width'] = bbands.bollinger_wband()
# Historical Volatility (20-period)
df['hist_vol'] = df['close'].pct_change().rolling(20).std() * np.sqrt(24 * 365) * 100
return df
@staticmethod
def add_volume_indicators(df: pd.DataFrame) -> pd.DataFrame:
"""Add volume-based indicators"""
# Volume SMA
df['volume_ma'] = df['volume'].rolling(window=config.VOLUME_MA_PERIOD).mean()
# Volume ratio
df['volume_ratio'] = df['volume'] / df['volume_ma']
# OBV (On-Balance Volume)
df['obv'] = volume.OnBalanceVolumeIndicator(
df['close'], df['volume']
).on_balance_volume()
# VWAP (Volume Weighted Average Price) - for intraday
if 'quote_volume' in df.columns:
df['vwap'] = (df['quote_volume'].cumsum() / df['volume'].cumsum())
return df
@staticmethod
def calculate_price_changes(df: pd.DataFrame) -> dict:
"""
Calculate price changes over various periods
Returns:
Dict with price change percentages
"""
if df.empty:
return {}
latest_close = df['close'].iloc[-1]
changes = {}
for periods, label in [(1, '1candle'), (5, '5candles'), (20, '20candles')]:
if len(df) > periods:
old_close = df['close'].iloc[-periods - 1]
change_pct = ((latest_close - old_close) / old_close) * 100
changes[label] = round(change_pct, 2)
return changes
@staticmethod
def get_latest_indicators(df: pd.DataFrame) -> dict:
"""
Extract latest indicator values for analysis
Returns:
Dict with latest indicator values
"""
if df.empty:
return {}
latest = df.iloc[-1]
indicators = {
# Trend
'ema_20': round(latest.get(f'ema_{config.EMA_FAST}', 0), 2),
'ema_50': round(latest.get(f'ema_{config.EMA_SLOW}', 0), 2),
'macd': round(latest.get('macd', 0), 4),
'macd_signal': round(latest.get('macd_signal', 0), 4),
'macd_hist': round(latest.get('macd_hist', 0), 4),
'adx': round(latest.get('adx', 0), 1),
# Momentum
'rsi': round(latest.get('rsi', 0), 1),
'stoch_k': round(latest.get('stoch_k', 0), 1),
'willr': round(latest.get('willr', 0), 1),
# Volatility
'atr': round(latest.get('atr', 0), 2),
'bb_upper': round(latest.get('bb_upper', 0), 2),
'bb_lower': round(latest.get('bb_lower', 0), 2),
'bb_width': round(latest.get('bb_width', 0), 4),
'hist_vol': round(latest.get('hist_vol', 0), 2),
# Volume
'volume_ratio': round(latest.get('volume_ratio', 0), 2),
'obv': int(latest.get('obv', 0)),
# Price
'close': round(latest['close'], 2),
'high': round(latest['high'], 2),
'low': round(latest['low'], 2),
}
return indicators

502
analysis/llm_context.py Normal file
View File

@ -0,0 +1,502 @@
"""
LLM Context Builder - Generate structured market analysis for LLM decision making
"""
import logging
from typing import Dict, Any, Optional
from datetime import datetime
import pandas as pd
from .data_reader import MarketDataReader
from .indicators import TechnicalIndicators
from .market_structure import MarketStructureAnalyzer
from .orderflow import OrderFlowAnalyzer
from .config import config
# Import QuantitativeSignalGenerator for scoring
import sys
sys.path.insert(0, '/app')
from signals.quantitative import QuantitativeSignalGenerator
logger = logging.getLogger(__name__)
class LLMContextBuilder:
"""Build structured context for LLM trading decisions"""
def __init__(self):
self.data_reader = MarketDataReader()
def build_full_context(self, symbol: str = "BTCUSDT") -> Dict[str, Any]:
"""
Build complete market context for LLM analysis
Args:
symbol: Trading symbol (default: BTCUSDT)
Returns:
Dict with structured market analysis
"""
try:
# Fetch multi-timeframe data
mtf_data = self.data_reader.get_multi_timeframe_data()
if '5m' not in mtf_data or mtf_data['5m'].empty:
logger.error("No 5m data available for analysis")
return self._empty_context()
# Use 5m as primary timeframe for real-time analysis
df_5m = mtf_data['5m']
# Add technical indicators
df_5m = TechnicalIndicators.add_all_indicators(df_5m)
# Get current price
current_price = float(df_5m.iloc[-1]['close'])
# Fetch order book data
depth_data = self.data_reader.read_latest_depth()
# Build context sections
context = {
'timestamp': datetime.now().isoformat(),
'symbol': symbol,
'current_price': round(current_price, 2),
'market_state': self._build_market_state(df_5m, mtf_data),
'key_prices': self._build_key_prices(df_5m, current_price),
'momentum': self._build_momentum_analysis(df_5m, depth_data),
'volatility_analysis': self._build_volatility_analysis(df_5m),
'volume_analysis': self._build_volume_analysis(df_5m),
'multi_timeframe': self._build_mtf_summary(mtf_data),
'signal_consensus': self._calculate_signal_consensus(df_5m, depth_data),
'risk_metrics': self._build_risk_metrics(df_5m, current_price),
}
logger.info(f"Built LLM context: trend={context['market_state']['trend_direction']}, consensus={context['signal_consensus']}")
return context
except Exception as e:
logger.error(f"Error building LLM context: {e}", exc_info=True)
return self._empty_context()
def _build_market_state(
self, df: pd.DataFrame, mtf_data: Dict[str, pd.DataFrame]
) -> Dict[str, Any]:
"""Build market state section"""
trend_info = MarketStructureAnalyzer.identify_trend(df)
# Get ATR for volatility measure
latest = df.iloc[-1]
atr = latest.get('atr', 0)
current_price = latest['close']
atr_pct = (atr / current_price * 100) if current_price > 0 else 0
# Volatility classification
if atr_pct > 1.5:
vol_status = f"高 (ATR=${atr:.0f}, {atr_pct:.1f}%)"
elif atr_pct > 0.8:
vol_status = f"中等偏高 (ATR=${atr:.0f}, {atr_pct:.1f}%)"
elif atr_pct > 0.5:
vol_status = f"中等 (ATR=${atr:.0f}, {atr_pct:.1f}%)"
else:
vol_status = f"低 (ATR=${atr:.0f}, {atr_pct:.1f}%)"
# Check higher timeframe alignment
htf_alignment = self._check_htf_trend_alignment(mtf_data)
return {
'trend_direction': trend_info.get('direction', 'unknown'),
'trend_strength': trend_info.get('strength', 'weak'),
'market_phase': trend_info.get('phase', '未知'),
'volatility': vol_status,
'adx': trend_info.get('adx', 0),
'higher_timeframe_alignment': htf_alignment,
}
def _build_key_prices(self, df: pd.DataFrame, current_price: float) -> Dict[str, Any]:
"""Build key price levels section"""
sr_levels = MarketStructureAnalyzer.find_support_resistance(df, current_price)
breakout_info = MarketStructureAnalyzer.detect_breakout(df, sr_levels)
# Format prices
support_str = f"${sr_levels['nearest_support']:,.0f}" if sr_levels.get('nearest_support') else "无明显支撑"
resistance_str = f"${sr_levels['nearest_resistance']:,.0f}" if sr_levels.get('nearest_resistance') else "无明显压力"
# Get Bollinger Bands
latest = df.iloc[-1]
bb_upper = latest.get('bb_upper')
bb_lower = latest.get('bb_lower')
return {
'support': support_str,
'support_level': sr_levels.get('nearest_support'),
'resistance': resistance_str,
'resistance_level': sr_levels.get('nearest_resistance'),
'all_support_levels': sr_levels.get('support', []),
'all_resistance_levels': sr_levels.get('resistance', []),
'breakout_status': breakout_info,
'bollinger_upper': round(bb_upper, 2) if bb_upper else None,
'bollinger_lower': round(bb_lower, 2) if bb_lower else None,
}
def _build_momentum_analysis(
self, df: pd.DataFrame, depth_data: Optional[Dict[str, Any]]
) -> Dict[str, Any]:
"""Build momentum analysis section"""
momentum = MarketStructureAnalyzer.calculate_momentum(df)
# RSI status with value
rsi_display = f"{momentum['rsi_status']} ({momentum['rsi']:.0f})"
# Order flow analysis
orderflow_summary = "数据不可用"
if depth_data:
imbalance = OrderFlowAnalyzer.analyze_orderbook_imbalance(depth_data)
orderflow_summary = imbalance.get('summary', '数据不可用')
return {
'rsi_status': rsi_display,
'rsi_value': momentum['rsi'],
'rsi_trend': momentum['rsi_trend'],
'macd': momentum['macd_signal'],
'macd_hist': momentum['macd_hist'],
'orderflow': orderflow_summary,
}
def _build_volatility_analysis(self, df: pd.DataFrame) -> Dict[str, Any]:
"""Build volatility analysis"""
latest = df.iloc[-1]
atr = latest.get('atr', 0)
bb_width = latest.get('bb_width', 0)
hist_vol = latest.get('hist_vol', 0)
# Bollinger Band squeeze detection
if bb_width < 0.02:
bb_status = '极度收窄 (即将突破)'
elif bb_width < 0.04:
bb_status = '收窄'
elif bb_width > 0.08:
bb_status = '扩张 (高波动)'
else:
bb_status = '正常'
return {
'atr': round(atr, 2),
'bb_width': round(bb_width, 4),
'bb_status': bb_status,
'hist_volatility': round(hist_vol, 2),
}
def _build_volume_analysis(self, df: pd.DataFrame) -> Dict[str, Any]:
"""Build volume analysis"""
latest = df.iloc[-1]
volume_ratio = latest.get('volume_ratio', 1)
obv = latest.get('obv', 0)
# Volume status
if volume_ratio > 2:
volume_status = '异常放量'
elif volume_ratio > 1.5:
volume_status = '显著放量'
elif volume_ratio > 1.1:
volume_status = '温和放量'
elif volume_ratio < 0.5:
volume_status = '显著缩量'
elif volume_ratio < 0.8:
volume_status = '温和缩量'
else:
volume_status = '正常'
# OBV trend
if len(df) >= 5:
obv_5_ago = df.iloc[-5].get('obv', 0)
obv_trend = '上升' if obv > obv_5_ago else '下降'
else:
obv_trend = '中性'
return {
'volume_ratio': round(volume_ratio, 2),
'volume_status': volume_status,
'obv_trend': obv_trend,
}
def _build_mtf_summary(self, mtf_data: Dict[str, pd.DataFrame]) -> Dict[str, Any]:
"""Build comprehensive multi-timeframe summary with detailed indicators and quantitative scores"""
mtf_summary = {}
for timeframe, df in mtf_data.items():
if df.empty:
continue
# Add indicators
df = TechnicalIndicators.add_all_indicators(df)
# Get latest candle
latest = df.iloc[-1]
current_price = latest['close']
# Get trend
trend_info = MarketStructureAnalyzer.identify_trend(df)
# Get momentum
momentum = MarketStructureAnalyzer.calculate_momentum(df)
# Get support/resistance
sr_levels = MarketStructureAnalyzer.find_support_resistance(df, current_price)
# Get ATR
atr = latest.get('atr', 0)
atr_pct = (atr / current_price * 100) if current_price > 0 else 0
# Get volume ratio
volume_ratio = latest.get('volume_ratio', 1)
# ===== NEW: Calculate quantitative scores for this timeframe =====
# Build mini analysis for this timeframe
mini_analysis = {
'current_price': current_price,
'trend_analysis': trend_info,
'momentum': momentum,
'support_resistance': sr_levels,
'breakout': {'has_breakout': False}, # Simplified for now
'orderflow': None, # Orderflow only for 5m
'indicators': {'atr': atr}
}
# Generate quantitative signal for this timeframe
try:
quant_signal = QuantitativeSignalGenerator.generate_signal(mini_analysis)
quant_scores = {
'composite_score': quant_signal.get('composite_score', 0),
'trend_score': quant_signal['scores'].get('trend', 0),
'momentum_score': quant_signal['scores'].get('momentum', 0),
'orderflow_score': quant_signal['scores'].get('orderflow', 0),
'breakout_score': quant_signal['scores'].get('breakout', 0),
'consensus_score': quant_signal.get('consensus_score', 0),
'signal_type': quant_signal.get('signal_type', 'HOLD'),
'confidence': quant_signal.get('confidence', 0),
}
except Exception as e:
logger.warning(f"Failed to calculate quant scores for {timeframe}: {e}")
quant_scores = {
'composite_score': 0,
'trend_score': 0,
'momentum_score': 0,
'orderflow_score': 0,
'breakout_score': 0,
'consensus_score': 0,
'signal_type': 'HOLD',
'confidence': 0,
}
mtf_summary[timeframe] = {
# Trend
'trend_direction': trend_info.get('direction', 'unknown'),
'trend_strength': trend_info.get('strength', 'weak'),
'ema_alignment': trend_info.get('ema_alignment', 'neutral'),
# Momentum
'rsi': round(momentum.get('rsi', 50), 1),
'rsi_status': momentum.get('rsi_status', 'unknown'),
'macd_signal': momentum.get('macd_signal', 'unknown'),
'macd_hist': round(momentum.get('macd_hist', 0), 2),
# Support/Resistance
'support': sr_levels.get('nearest_support'),
'resistance': sr_levels.get('nearest_resistance'),
# Volatility
'atr': round(atr, 2),
'atr_pct': round(atr_pct, 2),
# Volume
'volume_ratio': round(volume_ratio, 2),
# ===== NEW: Quantitative scores =====
'quantitative': quant_scores,
}
return mtf_summary
def _check_htf_trend_alignment(self, mtf_data: Dict[str, pd.DataFrame]) -> str:
"""
Check if higher timeframe trends are aligned
Returns:
Alignment status string
"""
trends = []
for timeframe in ['15m', '1h', '4h']:
if timeframe not in mtf_data or mtf_data[timeframe].empty:
continue
df = TechnicalIndicators.add_all_indicators(mtf_data[timeframe])
trend_info = MarketStructureAnalyzer.identify_trend(df)
trends.append(trend_info['direction'])
if not trends:
return '数据不足'
# Count trend directions
uptrend_count = trends.count('上涨')
downtrend_count = trends.count('下跌')
if uptrend_count == len(trends):
return '完全一致看涨'
elif downtrend_count == len(trends):
return '完全一致看跌'
elif uptrend_count > downtrend_count:
return '多数看涨'
elif downtrend_count > uptrend_count:
return '多数看跌'
else:
return '分歧'
def _calculate_signal_consensus(
self, df: pd.DataFrame, depth_data: Optional[Dict[str, Any]]
) -> float:
"""
Calculate signal consensus score (0-1)
Combines multiple signals to determine overall market conviction
"""
signals = []
# 1. Trend signal (EMA alignment)
latest = df.iloc[-1]
ema_20 = latest.get(f'ema_{config.EMA_FAST}', 0)
ema_50 = latest.get(f'ema_{config.EMA_SLOW}', 0)
if ema_20 > ema_50 * 1.01: # Bullish with buffer
signals.append(1)
elif ema_20 < ema_50 * 0.99: # Bearish with buffer
signals.append(-1)
else:
signals.append(0)
# 2. MACD signal
macd_hist = latest.get('macd_hist', 0)
if macd_hist > 0:
signals.append(1)
elif macd_hist < 0:
signals.append(-1)
else:
signals.append(0)
# 3. RSI signal
rsi = latest.get('rsi', 50)
if rsi > 55 and rsi < 70: # Bullish but not overbought
signals.append(1)
elif rsi < 45 and rsi > 30: # Bearish but not oversold
signals.append(-1)
else:
signals.append(0) # Neutral or extreme
# 4. ADX strength
adx = latest.get('adx', 0)
if adx > 25: # Strong trend
# Confirm with EMA direction
if ema_20 > ema_50:
signals.append(1)
else:
signals.append(-1)
else:
signals.append(0) # Weak trend
# 5. Order flow signal (if available)
if depth_data:
imbalance = OrderFlowAnalyzer.analyze_orderbook_imbalance(depth_data)
imbalance_val = imbalance.get('imbalance', 0)
if imbalance_val > 0.15:
signals.append(1)
elif imbalance_val < -0.15:
signals.append(-1)
else:
signals.append(0)
# Calculate consensus
if not signals:
return 0.5
# Count aligned signals
positive_signals = sum(1 for s in signals if s == 1)
negative_signals = sum(1 for s in signals if s == -1)
total_signals = len(signals)
# Consensus is the proportion of aligned signals
if positive_signals > negative_signals:
consensus = positive_signals / total_signals
elif negative_signals > positive_signals:
consensus = negative_signals / total_signals
else:
consensus = 0.5 # No consensus
return round(consensus, 2)
def _build_risk_metrics(self, df: pd.DataFrame, current_price: float) -> Dict[str, Any]:
"""Build risk management metrics"""
latest = df.iloc[-1]
atr = latest.get('atr', 0)
# Calculate stop loss based on ATR
stop_loss_distance = atr * config.ATR_STOP_MULTIPLIER
stop_loss_pct = (stop_loss_distance / current_price * 100) if current_price > 0 else 0
# Calculate position size based on risk
risk_per_trade_usd = config.ACCOUNT_SIZE_USD * config.MAX_RISK_PCT
position_size_btc = risk_per_trade_usd / stop_loss_distance if stop_loss_distance > 0 else 0
position_size_usd = position_size_btc * current_price
return {
'stop_loss_distance': round(stop_loss_distance, 2),
'stop_loss_pct': round(stop_loss_pct, 2),
'suggested_position_size_btc': round(position_size_btc, 4),
'suggested_position_size_usd': round(position_size_usd, 2),
'risk_reward_ratio': '1:2', # Default, can be calculated based on targets
}
def _empty_context(self) -> Dict[str, Any]:
"""Return empty context when data is unavailable"""
return {
'timestamp': datetime.now().isoformat(),
'error': 'Insufficient data for analysis',
'market_state': {},
'key_prices': {},
'momentum': {},
'signal_consensus': 0.5,
}
def get_simplified_context(self) -> Dict[str, Any]:
"""
Get simplified context matching user's example format
Returns:
Simplified context dict
"""
full_context = self.build_full_context()
if 'error' in full_context:
return full_context
# Extract and simplify to match user's example
return {
'market_state': {
'trend_direction': full_context['market_state']['trend_direction'],
'market_phase': full_context['market_state']['market_phase'],
'volatility': full_context['market_state']['volatility'],
},
'key_prices': {
'support': full_context['key_prices']['support'],
'resistance': full_context['key_prices']['resistance'],
},
'momentum': {
'rsi_status': full_context['momentum']['rsi_status'],
'macd': full_context['momentum']['macd'],
'orderflow': full_context['momentum']['orderflow'],
},
'signal_consensus': full_context['signal_consensus'],
}

View File

@ -0,0 +1,290 @@
"""
Market structure analysis: trend, support/resistance, breakouts
"""
import logging
from typing import List, Dict, Any, Tuple, Optional
import pandas as pd
import numpy as np
from .config import config
logger = logging.getLogger(__name__)
class MarketStructureAnalyzer:
"""Analyze market structure, S/R levels, and trend"""
@staticmethod
def identify_trend(df: pd.DataFrame) -> Dict[str, Any]:
"""
Identify trend direction and strength
Returns:
Dict with trend info
"""
if df.empty or len(df) < 50:
return {'direction': 'unknown', 'strength': 0}
latest = df.iloc[-1]
# EMA comparison
ema_20 = latest.get(f'ema_{config.EMA_FAST}', 0)
ema_50 = latest.get(f'ema_{config.EMA_SLOW}', 0)
adx = latest.get('adx', 0)
# Determine direction
if ema_20 > ema_50:
direction = '上涨'
if adx > 25:
strength = 'strong'
elif adx > 20:
strength = 'moderate'
else:
strength = 'weak'
elif ema_20 < ema_50:
direction = '下跌'
if adx > 25:
strength = 'strong'
elif adx > 20:
strength = 'moderate'
else:
strength = 'weak'
else:
direction = '震荡'
strength = 'weak'
# Detect trend phase
rsi = latest.get('rsi', 50)
if direction == '上涨':
if rsi > 70:
phase = '上涨中的强势回调'
elif rsi > 55:
phase = '上涨中'
else:
phase = '上涨后回调'
elif direction == '下跌':
if rsi < 30:
phase = '下跌中的超卖反弹'
elif rsi < 45:
phase = '下跌中'
else:
phase = '下跌后反弹'
else:
phase = '震荡盘整'
return {
'direction': direction,
'strength': strength,
'phase': phase,
'adx': round(adx, 1),
'ema_alignment': 'bullish' if ema_20 > ema_50 else 'bearish',
}
@staticmethod
def find_support_resistance(df: pd.DataFrame, current_price: float) -> Dict[str, Any]:
"""
Find support and resistance levels
Args:
df: DataFrame with OHLCV data
current_price: Current market price
Returns:
Dict with S/R levels
"""
if df.empty or len(df) < config.SR_LOOKBACK:
return {'support': [], 'resistance': []}
lookback_df = df.tail(config.SR_LOOKBACK)
# Find local highs (resistance) and lows (support)
highs = MarketStructureAnalyzer._find_local_extrema(lookback_df['high'], 'high')
lows = MarketStructureAnalyzer._find_local_extrema(lookback_df['low'], 'low')
# Cluster similar levels
support_levels = MarketStructureAnalyzer._cluster_levels(lows, current_price)
resistance_levels = MarketStructureAnalyzer._cluster_levels(highs, current_price)
# Filter to levels near current price (±5%)
support = [s for s in support_levels if s < current_price and s > current_price * 0.95]
resistance = [r for r in resistance_levels if r > current_price and r < current_price * 1.05]
# Sort and get closest
support = sorted(support, reverse=True)[:3] # Top 3 support levels
resistance = sorted(resistance)[:3] # Top 3 resistance levels
return {
'support': [round(s, 2) for s in support],
'resistance': [round(r, 2) for r in resistance],
'nearest_support': round(support[0], 2) if support else None,
'nearest_resistance': round(resistance[0], 2) if resistance else None,
}
@staticmethod
def _find_local_extrema(series: pd.Series, kind: str) -> List[float]:
"""Find local highs or lows"""
extrema = []
for i in range(2, len(series) - 2):
if kind == 'high':
# Local high
if (series.iloc[i] > series.iloc[i-1] and
series.iloc[i] > series.iloc[i-2] and
series.iloc[i] > series.iloc[i+1] and
series.iloc[i] > series.iloc[i+2]):
extrema.append(series.iloc[i])
else:
# Local low
if (series.iloc[i] < series.iloc[i-1] and
series.iloc[i] < series.iloc[i-2] and
series.iloc[i] < series.iloc[i+1] and
series.iloc[i] < series.iloc[i+2]):
extrema.append(series.iloc[i])
return extrema
@staticmethod
def _cluster_levels(levels: List[float], reference_price: float) -> List[float]:
"""Cluster similar price levels"""
if not levels:
return []
tolerance = reference_price * config.SR_TOLERANCE
clustered = []
sorted_levels = sorted(levels)
current_cluster = [sorted_levels[0]]
for level in sorted_levels[1:]:
if abs(level - current_cluster[-1]) < tolerance:
current_cluster.append(level)
else:
# Average the cluster
clustered.append(np.mean(current_cluster))
current_cluster = [level]
# Add last cluster
if current_cluster:
clustered.append(np.mean(current_cluster))
return clustered
@staticmethod
def detect_breakout(df: pd.DataFrame, sr_levels: Dict[str, Any]) -> Dict[str, Any]:
"""
Detect if price has broken through S/R levels
Returns:
Dict with breakout info
"""
if df.empty or len(df) < 5:
return {'has_breakout': False}
latest = df.iloc[-1]
current_price = latest['close']
recent_high = df.tail(20)['high'].max()
recent_low = df.tail(20)['low'].min()
# Check resistance breakout
resistance = sr_levels.get('nearest_resistance')
if resistance and current_price > resistance:
# Confirm breakout (price closed above resistance)
return {
'has_breakout': True,
'type': 'resistance_breakout',
'level': resistance,
'confirmation': '价格突破压力位' if latest['close'] > resistance else '未确认',
}
# Check support breakdown
support = sr_levels.get('nearest_support')
if support and current_price < support:
return {
'has_breakout': True,
'type': 'support_breakdown',
'level': support,
'confirmation': '价格跌破支撑位' if latest['close'] < support else '未确认',
}
# Check if approaching key level
if resistance and abs(current_price - resistance) / resistance < 0.005: # Within 0.5%
return {
'has_breakout': False,
'approaching': 'resistance',
'level': resistance,
'distance_pct': round((resistance - current_price) / current_price * 100, 2),
}
if support and abs(current_price - support) / support < 0.005:
return {
'has_breakout': False,
'approaching': 'support',
'level': support,
'distance_pct': round((current_price - support) / current_price * 100, 2),
}
return {'has_breakout': False}
@staticmethod
def calculate_momentum(df: pd.DataFrame) -> Dict[str, Any]:
"""
Calculate momentum indicators
Returns:
Dict with momentum analysis
"""
if df.empty:
return {}
latest = df.iloc[-1]
prev = df.iloc[-2] if len(df) > 1 else latest
rsi = latest.get('rsi', 50)
macd_hist = latest.get('macd_hist', 0)
prev_macd_hist = prev.get('macd_hist', 0)
# RSI status
if rsi > 70:
rsi_status = '超买'
elif rsi > 60:
rsi_status = '强势'
elif rsi > 50:
rsi_status = '中性偏强'
elif rsi > 40:
rsi_status = '中性偏弱'
elif rsi > 30:
rsi_status = '弱势'
else:
rsi_status = '超卖'
# MACD signal
if macd_hist > 0 and prev_macd_hist <= 0:
macd_signal = '金叉'
elif macd_hist < 0 and prev_macd_hist >= 0:
macd_signal = '死叉'
elif macd_hist > 0:
if macd_hist > prev_macd_hist:
macd_signal = '金叉扩大'
else:
macd_signal = '金叉收窄'
else:
if abs(macd_hist) > abs(prev_macd_hist):
macd_signal = '死叉扩大'
else:
macd_signal = '死叉收窄'
# RSI trend
if len(df) >= 5:
rsi_5_ago = df.iloc[-5].get('rsi', 50)
rsi_trend = '上升中' if rsi > rsi_5_ago else '下降中'
else:
rsi_trend = '中性'
return {
'rsi': round(rsi, 1),
'rsi_status': rsi_status,
'rsi_trend': rsi_trend,
'macd_signal': macd_signal,
'macd_hist': round(macd_hist, 4),
}

307
analysis/orderflow.py Normal file
View File

@ -0,0 +1,307 @@
"""
Order flow analysis based on order book depth data
"""
import logging
from typing import Dict, Any, List, Tuple, Optional
import numpy as np
from .config import config
logger = logging.getLogger(__name__)
class OrderFlowAnalyzer:
"""Analyze order flow and liquidity from order book data"""
@staticmethod
def analyze_orderbook_imbalance(depth_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Calculate order book imbalance (buy vs sell pressure)
Args:
depth_data: Dict with 'bids' and 'asks' arrays
Returns:
Dict with imbalance metrics
"""
if not depth_data or 'bids' not in depth_data or 'asks' not in depth_data:
return {'imbalance': 0, 'status': '未知'}
bids = depth_data['bids']
asks = depth_data['asks']
if not bids or not asks:
return {'imbalance': 0, 'status': '未知'}
# Calculate total bid/ask volume
total_bid_volume = sum(float(qty) for _, qty in bids)
total_ask_volume = sum(float(qty) for _, qty in asks)
# Calculate total bid/ask value (price * quantity)
total_bid_value = sum(float(price) * float(qty) for price, qty in bids)
total_ask_value = sum(float(price) * float(qty) for price, qty in asks)
# Imbalance ratio: (bids - asks) / (bids + asks)
# Positive = buy pressure, Negative = sell pressure
volume_imbalance = (total_bid_volume - total_ask_volume) / (total_bid_volume + total_ask_volume) if (total_bid_volume + total_ask_volume) > 0 else 0
value_imbalance = (total_bid_value - total_ask_value) / (total_bid_value + total_ask_value) if (total_bid_value + total_ask_value) > 0 else 0
# Average the two imbalance measures
imbalance = (volume_imbalance + value_imbalance) / 2
# Determine status
if imbalance > config.ORDERBOOK_IMBALANCE_THRESHOLD:
status = '强买方主导'
pressure = 'buy'
elif imbalance < -config.ORDERBOOK_IMBALANCE_THRESHOLD:
status = '强卖方主导'
pressure = 'sell'
elif imbalance > 0.05:
status = '买方偏强'
pressure = 'buy_slight'
elif imbalance < -0.05:
status = '卖方偏强'
pressure = 'sell_slight'
else:
status = '买卖平衡'
pressure = 'neutral'
return {
'imbalance': round(imbalance, 3),
'imbalance_pct': round(imbalance * 100, 1),
'status': status,
'pressure': pressure,
'total_bid_volume': round(total_bid_volume, 2),
'total_ask_volume': round(total_ask_volume, 2),
'total_bid_value': round(total_bid_value, 2),
'total_ask_value': round(total_ask_value, 2),
}
@staticmethod
def analyze_liquidity_depth(depth_data: Dict[str, Any], current_price: float) -> Dict[str, Any]:
"""
Analyze liquidity at different price levels
Args:
depth_data: Dict with 'bids' and 'asks' arrays
current_price: Current market price
Returns:
Dict with liquidity metrics
"""
if not depth_data or 'bids' not in depth_data or 'asks' not in depth_data:
return {}
bids = depth_data['bids']
asks = depth_data['asks']
if not bids or not asks:
return {}
# Calculate cumulative liquidity at different distances from mid price
bid_liquidity_levels = OrderFlowAnalyzer._calculate_liquidity_at_levels(
bids, current_price, side='bid'
)
ask_liquidity_levels = OrderFlowAnalyzer._calculate_liquidity_at_levels(
asks, current_price, side='ask'
)
# Find bid and ask walls (largest orders)
bid_wall = OrderFlowAnalyzer._find_largest_order(bids)
ask_wall = OrderFlowAnalyzer._find_largest_order(asks)
# Calculate spread
best_bid = float(bids[0][0]) if bids else 0
best_ask = float(asks[0][0]) if asks else 0
spread = best_ask - best_bid
spread_pct = (spread / current_price * 100) if current_price > 0 else 0
return {
'bid_liquidity': bid_liquidity_levels,
'ask_liquidity': ask_liquidity_levels,
'bid_wall': bid_wall,
'ask_wall': ask_wall,
'spread': round(spread, 2),
'spread_pct': round(spread_pct, 4),
'best_bid': round(best_bid, 2),
'best_ask': round(best_ask, 2),
}
@staticmethod
def _calculate_liquidity_at_levels(
orders: List[List[float]],
current_price: float,
side: str
) -> Dict[str, float]:
"""
Calculate cumulative liquidity at 0.1%, 0.5%, 1%, 2% price levels
Args:
orders: List of [price, quantity] pairs
current_price: Current market price
side: 'bid' or 'ask'
Returns:
Dict with liquidity at different levels
"""
levels = [0.001, 0.005, 0.01, 0.02] # 0.1%, 0.5%, 1%, 2%
liquidity = {f'{level*100}%': 0 for level in levels}
for price, qty in orders:
price = float(price)
qty = float(qty)
# Calculate distance from current price
if side == 'bid':
distance = (current_price - price) / current_price
else: # ask
distance = (price - current_price) / current_price
# Add to appropriate levels
for level in levels:
if distance <= level:
liquidity[f'{level*100}%'] += qty
# Round values
return {k: round(v, 2) for k, v in liquidity.items()}
@staticmethod
def _find_largest_order(orders: List[List[float]]) -> Optional[Dict[str, Any]]:
"""
Find the largest order (potential wall)
Returns:
Dict with price, quantity, and value of largest order
"""
if not orders:
return None
largest = max(orders, key=lambda x: float(x[1]))
price = float(largest[0])
qty = float(largest[1])
return {
'price': round(price, 2),
'quantity': round(qty, 2),
'value': round(price * qty, 2),
}
@staticmethod
def detect_large_orders(depth_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Detect large orders that could indicate institutional activity
Returns:
Dict with large order detection results
"""
if not depth_data or 'bids' not in depth_data or 'asks' not in depth_data:
return {'has_large_orders': False}
bids = depth_data['bids']
asks = depth_data['asks']
large_bids = []
large_asks = []
# Find orders exceeding the large order threshold
for price, qty in bids:
price = float(price)
qty = float(qty)
value = price * qty
if value >= config.LARGE_ORDER_THRESHOLD_USD:
large_bids.append({
'price': round(price, 2),
'quantity': round(qty, 2),
'value': round(value, 2),
})
for price, qty in asks:
price = float(price)
qty = float(qty)
value = price * qty
if value >= config.LARGE_ORDER_THRESHOLD_USD:
large_asks.append({
'price': round(price, 2),
'quantity': round(qty, 2),
'value': round(value, 2),
})
has_large_orders = len(large_bids) > 0 or len(large_asks) > 0
# Determine dominant side
if len(large_bids) > len(large_asks) * 1.5:
dominant_side = '买方'
elif len(large_asks) > len(large_bids) * 1.5:
dominant_side = '卖方'
else:
dominant_side = '均衡'
return {
'has_large_orders': has_large_orders,
'large_bids_count': len(large_bids),
'large_asks_count': len(large_asks),
'large_bids': large_bids[:3], # Top 3 largest bids
'large_asks': large_asks[:3], # Top 3 largest asks
'dominant_side': dominant_side,
}
@staticmethod
def calculate_orderflow_strength(
imbalance: Dict[str, Any],
large_orders: Dict[str, Any],
liquidity: Dict[str, Any]
) -> Dict[str, Any]:
"""
Calculate overall order flow strength and direction
Args:
imbalance: Orderbook imbalance metrics
large_orders: Large order detection results
liquidity: Liquidity depth metrics
Returns:
Dict with orderflow strength metrics
"""
# Get imbalance percentage
imbalance_pct = imbalance.get('imbalance_pct', 0)
pressure = imbalance.get('pressure', 'neutral')
# Check for large order bias
large_bid_count = large_orders.get('large_bids_count', 0)
large_ask_count = large_orders.get('large_asks_count', 0)
large_order_bias = large_bid_count - large_ask_count
# Check spread (tight spread = healthy market)
spread_pct = liquidity.get('spread_pct', 0)
spread_status = '紧密' if spread_pct < 0.01 else '正常' if spread_pct < 0.05 else '宽松'
# Calculate composite strength score (-100 to +100)
# Positive = bullish, Negative = bearish
strength_score = imbalance_pct + (large_order_bias * 5)
strength_score = max(-100, min(100, strength_score)) # Clamp to [-100, 100]
# Determine strength category
if strength_score > 30:
strength = '强烈看涨'
elif strength_score > 15:
strength = '看涨'
elif strength_score > 5:
strength = '偏涨'
elif strength_score < -30:
strength = '强烈看跌'
elif strength_score < -15:
strength = '看跌'
elif strength_score < -5:
strength = '偏跌'
else:
strength = '中性'
return {
'strength_score': round(strength_score, 1),
'strength': strength,
'pressure': pressure,
'spread_status': spread_status,
'large_order_bias': large_order_bias,
'summary': f"{imbalance.get('status', '')} ({imbalance_pct:+.1f}%)",
}

20
analysis/requirements.txt Normal file
View File

@ -0,0 +1,20 @@
# Data processing
pandas==2.1.4
numpy==1.26.2
# Technical analysis
pandas-ta==0.3.14b0
# Redis client
redis==5.0.1
# JSON handling
orjson==3.9.10
# Configuration
pydantic==2.5.3
pydantic-settings==2.1.0
python-dotenv==1.0.0
# Utilities
python-dateutil==2.8.2

3
config/__init__.py Normal file
View File

@ -0,0 +1,3 @@
from .settings import settings
__all__ = ["settings"]

75
config/settings.py Normal file
View File

@ -0,0 +1,75 @@
"""
Configuration settings for Binance WebSocket data ingestion system
"""
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
"""Application settings with validation"""
model_config = SettingsConfigDict(
env_file=".env",
case_sensitive=True,
extra="ignore" # Ignore extra fields from environment
)
# Binance WebSocket Configuration
BINANCE_WS_BASE_URL: str = "wss://fstream.binance.com"
SYMBOL: str = "btcusdt"
# Stream subscriptions
KLINE_INTERVALS: str = "5m,15m,1h,4h" # Multiple kline intervals (comma-separated)
DEPTH_LEVEL: int = 20 # Top 20 order book levels
# Redis Configuration
REDIS_HOST: str = "redis"
REDIS_PORT: int = 6379
REDIS_DB: int = 0
REDIS_PASSWORD: str = ""
# Redis Stream Keys (prefix, actual keys are dynamic based on intervals)
REDIS_STREAM_KLINE_PREFIX: str = "binance:raw:kline" # Will be: binance:raw:kline:5m, etc.
REDIS_STREAM_DEPTH: str = "binance:raw:depth:20"
REDIS_STREAM_TRADE: str = "binance:raw:trade"
@property
def kline_intervals_list(self) -> list:
"""Parse kline intervals from comma-separated string"""
return [interval.strip() for interval in self.KLINE_INTERVALS.split(',')]
# Stream Configuration
REDIS_STREAM_MAXLEN: int = 10000 # Keep last 10k messages per stream
# Reconnection Strategy
RECONNECT_INITIAL_DELAY: float = 1.0 # Initial delay in seconds
RECONNECT_MAX_DELAY: float = 60.0 # Max delay in seconds
RECONNECT_MULTIPLIER: float = 2.0 # Exponential backoff multiplier
MAX_RECONNECT_ATTEMPTS: int = 100 # -1 for unlimited
# Memory Protection
MAX_BUFFER_SIZE: int = 1000 # Max messages in memory buffer
RATE_LIMIT_MESSAGES_PER_SEC: int = 1000 # Max messages processed per second
# Message Deduplication
DEDUP_CACHE_SIZE: int = 10000 # Size of deduplication cache
DEDUP_TTL_SECONDS: int = 300 # TTL for dedup entries (5 minutes)
# Monitoring
HEALTH_CHECK_INTERVAL: int = 30 # Health check interval in seconds
LOG_LEVEL: str = "INFO"
# LLM Gate Configuration (极简门控 - 频率为主,量化初筛)
LLM_GATE_ENABLED: bool = True # 启用 LLM 门控
# 数据要求
LLM_MIN_CANDLES: int = 100 # 最少K线数量
# 信号质量(极简 - 只检查综合得分)
LLM_MIN_COMPOSITE_SCORE: float = 0.0 # Gate关闭 - 每次都调用LLM
# 频率限制(核心控制!)
LLM_MAX_CALLS_PER_DAY: int = 12 # 每天最多调用次数
LLM_MIN_INTERVAL_MINUTES: int = 15 # 最小调用间隔(分钟)
settings = Settings()

12
core/__init__.py Normal file
View File

@ -0,0 +1,12 @@
from .websocket_client import BinanceWebSocketClient
from .redis_writer import RedisStreamWriter
from .deduplicator import MessageDeduplicator
from .rate_limiter import RateLimiter, BufferedMessageProcessor
__all__ = [
"BinanceWebSocketClient",
"RedisStreamWriter",
"MessageDeduplicator",
"RateLimiter",
"BufferedMessageProcessor",
]

176
core/deduplicator.py Normal file
View File

@ -0,0 +1,176 @@
"""
Message deduplication using event time (E field) and LRU cache
"""
import logging
import time
from collections import OrderedDict
from typing import Dict, Any, Optional
from config import settings
logger = logging.getLogger(__name__)
class MessageDeduplicator:
"""
LRU-based message deduplicator with TTL support.
Uses the 'E' field (event time) from Binance messages as unique identifier.
Automatically evicts old entries to prevent memory leaks.
"""
def __init__(
self,
max_size: int = settings.DEDUP_CACHE_SIZE,
ttl_seconds: int = settings.DEDUP_TTL_SECONDS,
):
"""
Initialize deduplicator
Args:
max_size: Maximum number of entries to keep in cache
ttl_seconds: Time-to-live for cache entries in seconds
"""
self.max_size = max_size
self.ttl_seconds = ttl_seconds
# OrderedDict for LRU cache: {message_key: timestamp}
self._cache: OrderedDict[str, float] = OrderedDict()
# Statistics
self.stats = {
"total_checked": 0,
"duplicates_found": 0,
"cache_evictions": 0,
"ttl_evictions": 0,
}
def _generate_key(self, message: Dict[str, Any]) -> Optional[str]:
"""
Generate unique key for message
Uses combination of:
- Stream name (_stream field)
- Event time (E field)
- Symbol (s field)
Args:
message: Message data
Returns:
Unique key or None if key cannot be generated
"""
try:
# Get stream name
stream = message.get("_stream", "unknown")
# Get event time (E field) - primary dedup identifier
event_time = message.get("E")
if not event_time:
# Fallback to T field for some message types
event_time = message.get("T")
if not event_time:
logger.warning(f"No event time found in message: {message}")
return None
# Get symbol (s field)
symbol = message.get("s", "")
# Create composite key
key = f"{stream}:{symbol}:{event_time}"
return key
except Exception as e:
logger.error(f"Error generating dedup key: {e}")
return None
def _evict_expired(self) -> None:
"""Remove expired entries based on TTL"""
if not self._cache:
return
current_time = time.time()
expired_keys = []
# Find expired entries
for key, timestamp in self._cache.items():
if current_time - timestamp > self.ttl_seconds:
expired_keys.append(key)
else:
# OrderedDict is sorted by insertion time
# Once we hit a non-expired entry, all following entries are also non-expired
break
# Remove expired entries
for key in expired_keys:
del self._cache[key]
self.stats["ttl_evictions"] += 1
def _evict_lru(self) -> None:
"""Remove least recently used entry"""
if self._cache:
self._cache.popitem(last=False) # FIFO: remove oldest
self.stats["cache_evictions"] += 1
def is_duplicate(self, message: Dict[str, Any]) -> bool:
"""
Check if message is a duplicate
Args:
message: Message data to check
Returns:
True if duplicate, False if new message
"""
self.stats["total_checked"] += 1
# Generate unique key
key = self._generate_key(message)
if not key:
# If we can't generate a key, assume it's not a duplicate
return False
# Clean up expired entries periodically
if self.stats["total_checked"] % 100 == 0:
self._evict_expired()
# Check if key exists in cache
current_time = time.time()
if key in self._cache:
# Update timestamp (move to end for LRU)
del self._cache[key]
self._cache[key] = current_time
self.stats["duplicates_found"] += 1
return True
# New message - add to cache
self._cache[key] = current_time
# Enforce max size
if len(self._cache) > self.max_size:
self._evict_lru()
return False
def clear(self) -> None:
"""Clear all cache entries"""
self._cache.clear()
logger.info("Deduplication cache cleared")
def get_stats(self) -> Dict[str, Any]:
"""Get deduplication statistics"""
duplicate_rate = (
self.stats["duplicates_found"] / self.stats["total_checked"]
if self.stats["total_checked"] > 0
else 0.0
)
return {
**self.stats,
"cache_size": len(self._cache),
"duplicate_rate": f"{duplicate_rate:.2%}",
}

209
core/rate_limiter.py Normal file
View File

@ -0,0 +1,209 @@
"""
Rate limiter and buffer manager for memory leak protection
"""
import asyncio
import logging
import time
from typing import Dict, Any, List
from collections import deque
from config import settings
logger = logging.getLogger(__name__)
class RateLimiter:
"""
Token bucket rate limiter for message processing.
Prevents overwhelming downstream systems and protects against memory leaks.
"""
def __init__(self, max_rate: int = settings.RATE_LIMIT_MESSAGES_PER_SEC):
"""
Initialize rate limiter
Args:
max_rate: Maximum messages per second
"""
self.max_rate = max_rate
self.tokens = max_rate
self.last_update = time.time()
self.lock = asyncio.Lock()
async def acquire(self) -> bool:
"""
Acquire token for processing a message
Returns:
True if token acquired, False if rate limit exceeded
"""
async with self.lock:
now = time.time()
elapsed = now - self.last_update
# Refill tokens based on elapsed time
self.tokens = min(
self.max_rate,
self.tokens + elapsed * self.max_rate
)
self.last_update = now
if self.tokens >= 1:
self.tokens -= 1
return True
return False
async def wait(self) -> None:
"""Wait until a token is available"""
while not await self.acquire():
await asyncio.sleep(0.01) # 10ms sleep
class BufferedMessageProcessor:
"""
Buffered message processor with memory protection.
Features:
- Bounded buffer to prevent memory exhaustion
- Batch processing for efficiency
- Overflow detection and alerts
- Backpressure handling
"""
def __init__(
self,
max_buffer_size: int = settings.MAX_BUFFER_SIZE,
batch_size: int = 100,
batch_timeout: float = 1.0,
):
"""
Initialize buffered processor
Args:
max_buffer_size: Maximum messages in buffer
batch_size: Number of messages to batch before processing
batch_timeout: Max time to wait before processing partial batch (seconds)
"""
self.max_buffer_size = max_buffer_size
self.batch_size = batch_size
self.batch_timeout = batch_timeout
# Bounded deque for FIFO buffer
self.buffer: deque = deque(maxlen=max_buffer_size)
self.lock = asyncio.Lock()
# Statistics
self.stats = {
"messages_buffered": 0,
"messages_processed": 0,
"messages_dropped": 0,
"buffer_overflows": 0,
"current_buffer_size": 0,
"max_buffer_size_reached": 0,
}
async def add_message(self, message: Dict[str, Any]) -> bool:
"""
Add message to buffer
Args:
message: Message to buffer
Returns:
True if added successfully, False if buffer is full (message dropped)
"""
async with self.lock:
current_size = len(self.buffer)
# Check if buffer is full
if current_size >= self.max_buffer_size:
self.stats["messages_dropped"] += 1
self.stats["buffer_overflows"] += 1
if self.stats["buffer_overflows"] % 100 == 1:
logger.warning(
f"Buffer overflow! Dropped message. "
f"Buffer size: {current_size}/{self.max_buffer_size}"
)
return False
# Add to buffer
self.buffer.append(message)
self.stats["messages_buffered"] += 1
self.stats["current_buffer_size"] = len(self.buffer)
# Track max buffer size
if current_size > self.stats["max_buffer_size_reached"]:
self.stats["max_buffer_size_reached"] = current_size
return True
async def get_batch(self, timeout: float = None) -> List[Dict[str, Any]]:
"""
Get batch of messages from buffer
Args:
timeout: Max time to wait for batch (seconds)
Returns:
List of messages (may be less than batch_size)
"""
timeout = timeout or self.batch_timeout
start_time = time.time()
batch = []
while len(batch) < self.batch_size:
async with self.lock:
if self.buffer:
batch.append(self.buffer.popleft())
self.stats["current_buffer_size"] = len(self.buffer)
# Check timeout
if time.time() - start_time >= timeout:
break
# If buffer is empty and we have some messages, return them
if not self.buffer and batch:
break
# Small sleep to avoid busy waiting
if not batch:
await asyncio.sleep(0.01)
if batch:
self.stats["messages_processed"] += len(batch)
return batch
def get_buffer_usage(self) -> float:
"""Get buffer usage percentage (0.0 to 1.0)"""
return len(self.buffer) / self.max_buffer_size if self.max_buffer_size > 0 else 0.0
def is_buffer_critical(self, threshold: float = 0.8) -> bool:
"""Check if buffer usage is above critical threshold"""
return self.get_buffer_usage() > threshold
def get_stats(self) -> Dict[str, Any]:
"""Get processor statistics"""
buffer_usage = self.get_buffer_usage()
drop_rate = (
self.stats["messages_dropped"] / self.stats["messages_buffered"]
if self.stats["messages_buffered"] > 0
else 0.0
)
return {
**self.stats,
"buffer_usage": f"{buffer_usage:.1%}",
"drop_rate": f"{drop_rate:.2%}",
}
async def clear(self) -> None:
"""Clear all buffered messages"""
async with self.lock:
self.buffer.clear()
self.stats["current_buffer_size"] = 0
logger.info("Message buffer cleared")

247
core/redis_writer.py Normal file
View File

@ -0,0 +1,247 @@
"""
Redis Stream writer with batch support and error handling
"""
import asyncio
import logging
from typing import Dict, Any, Optional
import orjson
import redis.asyncio as redis
from redis.exceptions import RedisError, ConnectionError as RedisConnectionError
from config import settings
logger = logging.getLogger(__name__)
class RedisStreamWriter:
"""
Redis Stream writer for real-time market data.
Features:
- Async Redis client with connection pooling
- Automatic stream trimming (MAXLEN)
- JSON serialization with orjson
- Connection retry logic
- Performance metrics
"""
def __init__(self):
self.redis_client: Optional[redis.Redis] = None
self.is_connected = False
# Statistics
self.stats = {
"messages_written": 0,
"kline_count": 0,
"depth_count": 0,
"trade_count": 0,
"errors": 0,
}
async def connect(self) -> None:
"""Establish Redis connection"""
try:
self.redis_client = redis.Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_DB,
password=settings.REDIS_PASSWORD if settings.REDIS_PASSWORD else None,
encoding="utf-8",
decode_responses=False, # We'll handle JSON encoding
socket_connect_timeout=5,
socket_keepalive=True,
health_check_interval=30,
)
# Test connection
await self.redis_client.ping()
self.is_connected = True
logger.info("✓ Redis connection established")
except RedisConnectionError as e:
logger.error(f"Failed to connect to Redis: {e}")
raise
except Exception as e:
logger.error(f"Unexpected error connecting to Redis: {e}")
raise
async def close(self) -> None:
"""Close Redis connection"""
if self.redis_client:
await self.redis_client.close()
self.is_connected = False
logger.info("Redis connection closed")
def _serialize_message(self, message: Dict[str, Any]) -> bytes:
"""
Serialize message to JSON bytes using orjson
Args:
message: Message data
Returns:
JSON bytes
"""
return orjson.dumps(message)
def _determine_stream_key(self, message: Dict[str, Any]) -> Optional[str]:
"""
Determine which Redis Stream to write to based on message type
Args:
message: Message data
Returns:
Redis stream key or None if unknown type
"""
stream = message.get("_stream", "")
# Kline stream - extract interval from stream name
if "kline" in stream or ("e" in message and message["e"] == "kline"):
# Extract interval from stream name (e.g., "btcusdt@kline_5m" -> "5m")
if "@kline_" in stream:
interval = stream.split("@kline_")[1]
return f"{settings.REDIS_STREAM_KLINE_PREFIX}:{interval}"
# Fallback: extract from message data
elif "k" in message and "i" in message["k"]:
interval = message["k"]["i"]
return f"{settings.REDIS_STREAM_KLINE_PREFIX}:{interval}"
# Depth stream
if "depth" in stream or ("e" in message and message["e"] == "depthUpdate"):
return settings.REDIS_STREAM_DEPTH
# Trade stream
if "trade" in stream or "aggTrade" in stream or ("e" in message and message["e"] in ["trade", "aggTrade"]):
return settings.REDIS_STREAM_TRADE
logger.warning(f"Unknown message type, stream: {stream}, message: {message}")
return None
async def write_message(self, message: Dict[str, Any]) -> bool:
"""
Write single message to appropriate Redis Stream
Args:
message: Message data
Returns:
True if successful, False otherwise
"""
if not self.is_connected or not self.redis_client:
logger.error("Redis client not connected")
return False
try:
# Determine stream key
stream_key = self._determine_stream_key(message)
if not stream_key:
return False
# Serialize message
message_json = self._serialize_message(message)
# Write to Redis Stream with MAXLEN
await self.redis_client.xadd(
name=stream_key,
fields={"data": message_json},
maxlen=settings.REDIS_STREAM_MAXLEN,
approximate=True, # Use ~ for better performance
)
# Update statistics
self.stats["messages_written"] += 1
if "kline" in stream_key:
self.stats["kline_count"] += 1
elif "depth" in stream_key:
self.stats["depth_count"] += 1
elif "trade" in stream_key:
self.stats["trade_count"] += 1
return True
except RedisError as e:
logger.error(f"Redis error writing message: {e}")
self.stats["errors"] += 1
return False
except Exception as e:
logger.error(f"Unexpected error writing message: {e}", exc_info=True)
self.stats["errors"] += 1
return False
async def write_batch(self, messages: list[Dict[str, Any]]) -> int:
"""
Write batch of messages using pipeline
Args:
messages: List of messages
Returns:
Number of successfully written messages
"""
if not self.is_connected or not self.redis_client:
logger.error("Redis client not connected")
return 0
if not messages:
return 0
try:
# Group messages by stream key
streams: Dict[str, list[bytes]] = {}
for message in messages:
stream_key = self._determine_stream_key(message)
if not stream_key:
continue
message_json = self._serialize_message(message)
if stream_key not in streams:
streams[stream_key] = []
streams[stream_key].append(message_json)
# Write using pipeline
async with self.redis_client.pipeline(transaction=False) as pipe:
for stream_key, stream_messages in streams.items():
for msg in stream_messages:
pipe.xadd(
name=stream_key,
fields={"data": msg},
maxlen=settings.REDIS_STREAM_MAXLEN,
approximate=True,
)
await pipe.execute()
# Update statistics
total_written = sum(len(msgs) for msgs in streams.values())
self.stats["messages_written"] += total_written
return total_written
except RedisError as e:
logger.error(f"Redis error in batch write: {e}")
self.stats["errors"] += 1
return 0
except Exception as e:
logger.error(f"Unexpected error in batch write: {e}", exc_info=True)
self.stats["errors"] += 1
return 0
async def health_check(self) -> bool:
"""Check Redis connection health"""
try:
if not self.redis_client:
return False
await self.redis_client.ping()
return True
except Exception:
return False
def get_stats(self) -> Dict[str, Any]:
"""Get writer statistics"""
return {**self.stats}

209
core/websocket_client.py Normal file
View File

@ -0,0 +1,209 @@
"""
Binance WebSocket Client with auto-reconnection and exponential backoff
"""
import asyncio
import logging
import json
import time
from typing import Callable, Optional, Dict, Any
from datetime import datetime
import websockets
from websockets.exceptions import ConnectionClosed, WebSocketException
from config import settings
logger = logging.getLogger(__name__)
class BinanceWebSocketClient:
"""
Binance Futures WebSocket client with production-grade features:
- Auto-reconnection with exponential backoff
- Multi-stream subscription
- Heartbeat monitoring
- Graceful shutdown
"""
def __init__(
self,
symbol: str,
on_message: Callable[[Dict[str, Any]], None],
on_error: Optional[Callable[[Exception], None]] = None,
):
self.symbol = symbol.lower()
self.on_message = on_message
self.on_error = on_error
self.ws: Optional[websockets.WebSocketClientProtocol] = None
self.is_running = False
self.reconnect_count = 0
self.last_message_time = time.time()
# Reconnection settings
self.reconnect_delay = settings.RECONNECT_INITIAL_DELAY
self.max_reconnect_delay = settings.RECONNECT_MAX_DELAY
self.reconnect_multiplier = settings.RECONNECT_MULTIPLIER
# Build stream URL
self.ws_url = self._build_stream_url()
def _build_stream_url(self) -> str:
"""Build multi-stream WebSocket URL"""
streams = []
# Add multiple kline intervals
for interval in settings.kline_intervals_list:
streams.append(f"{self.symbol}@kline_{interval}")
# Add depth and trade streams
streams.append(f"{self.symbol}@depth20@100ms") # Top 20 depth, 100ms updates
streams.append(f"{self.symbol}@aggTrade") # Aggregated trades
stream_path = "/".join(streams)
url = f"{settings.BINANCE_WS_BASE_URL}/stream?streams={stream_path}"
logger.info(f"WebSocket URL: {url}")
logger.info(f"Subscribing to kline intervals: {', '.join(settings.kline_intervals_list)}")
return url
async def connect(self) -> None:
"""Establish WebSocket connection with retry logic"""
attempt = 0
while self.is_running:
try:
attempt += 1
logger.info(f"Connecting to Binance WebSocket (attempt {attempt})...")
async with websockets.connect(
self.ws_url,
ping_interval=20, # Send ping every 20s
ping_timeout=10, # Wait 10s for pong
close_timeout=10,
) as websocket:
self.ws = websocket
self.reconnect_delay = settings.RECONNECT_INITIAL_DELAY
self.reconnect_count = 0
logger.info("✓ WebSocket connected successfully")
# Message receiving loop
await self._receive_messages()
except ConnectionClosed as e:
logger.warning(f"WebSocket connection closed: {e.code} - {e.reason}")
await self._handle_reconnect()
except WebSocketException as e:
logger.error(f"WebSocket error: {e}")
if self.on_error:
self.on_error(e)
await self._handle_reconnect()
except Exception as e:
logger.error(f"Unexpected error: {e}", exc_info=True)
if self.on_error:
self.on_error(e)
await self._handle_reconnect()
finally:
self.ws = None
logger.info("WebSocket client stopped")
async def _receive_messages(self) -> None:
"""Receive and process messages from WebSocket"""
if not self.ws:
return
async for message in self.ws:
try:
self.last_message_time = time.time()
# Parse JSON message
data = json.loads(message)
# Handle combined stream format
if "stream" in data and "data" in data:
stream_name = data["stream"]
stream_data = data["data"]
# Add metadata
stream_data["_stream"] = stream_name
stream_data["_received_at"] = datetime.utcnow().isoformat()
# Process message
await self._process_message(stream_data)
else:
# Single stream format
data["_received_at"] = datetime.utcnow().isoformat()
await self._process_message(data)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse JSON: {e}, message: {message[:200]}")
except Exception as e:
logger.error(f"Error processing message: {e}", exc_info=True)
async def _process_message(self, data: Dict[str, Any]) -> None:
"""Process received message"""
try:
# Call user-defined message handler
if asyncio.iscoroutinefunction(self.on_message):
await self.on_message(data)
else:
self.on_message(data)
except Exception as e:
logger.error(f"Error in message handler: {e}", exc_info=True)
async def _handle_reconnect(self) -> None:
"""Handle reconnection with exponential backoff"""
if not self.is_running:
return
self.reconnect_count += 1
# Check max attempts
if (
settings.MAX_RECONNECT_ATTEMPTS > 0
and self.reconnect_count > settings.MAX_RECONNECT_ATTEMPTS
):
logger.error("Max reconnection attempts reached. Stopping client.")
self.is_running = False
return
# Calculate delay with exponential backoff
delay = min(
self.reconnect_delay * (self.reconnect_multiplier ** (self.reconnect_count - 1)),
self.max_reconnect_delay,
)
logger.info(f"Reconnecting in {delay:.1f}s (attempt {self.reconnect_count})...")
await asyncio.sleep(delay)
async def start(self) -> None:
"""Start WebSocket client"""
if self.is_running:
logger.warning("Client is already running")
return
self.is_running = True
logger.info("Starting WebSocket client...")
await self.connect()
async def stop(self) -> None:
"""Stop WebSocket client gracefully"""
logger.info("Stopping WebSocket client...")
self.is_running = False
if self.ws:
await self.ws.close()
self.ws = None
def is_healthy(self) -> bool:
"""Check if client is healthy (receiving messages)"""
if not self.is_running or not self.ws:
return False
# Check if we've received a message in the last 60 seconds
time_since_last_message = time.time() - self.last_message_time
return time_since_last_message < 60

140
docker-compose.yml Normal file
View File

@ -0,0 +1,140 @@
version: '3.8'
services:
# Redis - Message Stream Storage
redis:
image: redis:7.2-alpine
container_name: tradus-redis
ports:
- "6379:6379"
volumes:
- redis_data:/data
- ./redis.conf:/usr/local/etc/redis/redis.conf
command: redis-server /usr/local/etc/redis/redis.conf
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 3
start_period: 10s
networks:
- tradus-network
restart: unless-stopped
# Binance WebSocket Ingestion Service
ingestion:
build:
context: .
dockerfile: Dockerfile
container_name: tradus-ingestion
env_file: .env
volumes:
- llm_gate_data:/app/data # LLM Gate 状态文件持久化
environment:
# Binance Configuration
- BINANCE_WS_BASE_URL=wss://fstream.binance.com
- SYMBOL=btcusdt
- KLINE_INTERVAL=5m
# Redis Configuration
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_DB=0
- REDIS_PASSWORD=
# Stream Keys
- REDIS_STREAM_KLINE=binance:raw:kline:5m
- REDIS_STREAM_DEPTH=binance:raw:depth:20
- REDIS_STREAM_TRADE=binance:raw:trade
# Performance Tuning
- MAX_BUFFER_SIZE=1000
- RATE_LIMIT_MESSAGES_PER_SEC=1000
- DEDUP_CACHE_SIZE=10000
- REDIS_STREAM_MAXLEN=10000
# Reconnection Strategy
- RECONNECT_INITIAL_DELAY=1.0
- RECONNECT_MAX_DELAY=60.0
- MAX_RECONNECT_ATTEMPTS=100
# Monitoring
- HEALTH_CHECK_INTERVAL=30
- LOG_LEVEL=INFO
depends_on:
redis:
condition: service_healthy
networks:
- tradus-network
restart: unless-stopped
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# Signal Generator Scheduler - 定时生成交易信号
scheduler:
build:
context: .
dockerfile: Dockerfile
container_name: tradus-scheduler
command: python -u scheduler.py
env_file: .env
volumes:
- llm_gate_data:/app/data # 共享 LLM Gate 状态
- ./output:/app/output # 输出信号文件
environment:
# Redis Configuration
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_DB=0
- REDIS_PASSWORD=
# Signal generation interval
- SIGNAL_INTERVAL_MINUTES=15 # 每15分钟生成一次信号
# Note: LLM API and DingTalk configs are loaded from .env file
- LOG_LEVEL=INFO
depends_on:
redis:
condition: service_healthy
networks:
- tradus-network
restart: unless-stopped
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
profiles:
- scheduler # Only start with: docker-compose --profile scheduler up
# Redis Commander - Optional Web UI for Redis
redis-commander:
image: rediscommander/redis-commander:latest
container_name: tradus-redis-ui
environment:
- REDIS_HOSTS=local:redis:6379
ports:
- "8081:8081"
depends_on:
- redis
networks:
- tradus-network
restart: unless-stopped
profiles:
- debug # Only start with: docker-compose --profile debug up
volumes:
redis_data:
driver: local
llm_gate_data:
driver: local
networks:
tradus-network:
driver: bridge

236
main.py Normal file
View File

@ -0,0 +1,236 @@
"""
Main application: Binance WebSocket to Redis Stream ingestion pipeline
"""
import asyncio
import logging
import signal
import sys
from typing import Dict, Any
from config import settings
from core import (
BinanceWebSocketClient,
RedisStreamWriter,
MessageDeduplicator,
BufferedMessageProcessor,
)
# Configure logging
logging.basicConfig(
level=getattr(logging, settings.LOG_LEVEL),
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.StreamHandler(sys.stdout),
],
)
logger = logging.getLogger(__name__)
class IngestionPipeline:
"""
Main ingestion pipeline orchestrator.
Coordinates:
- WebSocket client
- Message deduplication
- Buffering and rate limiting
- Redis Stream writing
- Health monitoring
"""
def __init__(self):
self.ws_client: BinanceWebSocketClient = None
self.redis_writer = RedisStreamWriter()
self.deduplicator = MessageDeduplicator()
self.buffer_processor = BufferedMessageProcessor()
self.is_running = False
self.tasks = []
async def on_message(self, message: Dict[str, Any]) -> None:
"""
Handle incoming WebSocket message
Args:
message: Raw message from WebSocket
"""
try:
# Check for duplicates
if self.deduplicator.is_duplicate(message):
logger.debug(f"Duplicate message filtered: {message.get('E')}")
return
# Add to buffer (with overflow protection)
success = await self.buffer_processor.add_message(message)
if not success:
logger.warning("Message dropped due to buffer overflow")
except Exception as e:
logger.error(f"Error in message handler: {e}", exc_info=True)
async def process_messages(self) -> None:
"""Background task to process buffered messages"""
logger.info("Starting message processor...")
while self.is_running:
try:
# Get batch of messages
batch = await self.buffer_processor.get_batch(timeout=1.0)
if not batch:
await asyncio.sleep(0.1)
continue
# Write batch to Redis
written = await self.redis_writer.write_batch(batch)
if written > 0:
logger.debug(f"Wrote {written} messages to Redis")
# Check buffer health
if self.buffer_processor.is_buffer_critical():
logger.warning(
f"Buffer usage critical: "
f"{self.buffer_processor.get_buffer_usage():.1%}"
)
except Exception as e:
logger.error(f"Error processing messages: {e}", exc_info=True)
await asyncio.sleep(1)
logger.info("Message processor stopped")
async def monitor_health(self) -> None:
"""Background task to monitor system health"""
logger.info("Starting health monitor...")
while self.is_running:
try:
await asyncio.sleep(settings.HEALTH_CHECK_INTERVAL)
# Check WebSocket health
ws_healthy = self.ws_client.is_healthy() if self.ws_client else False
# Check Redis health
redis_healthy = await self.redis_writer.health_check()
# Get statistics
dedup_stats = self.deduplicator.get_stats()
buffer_stats = self.buffer_processor.get_stats()
redis_stats = self.redis_writer.get_stats()
# Log health status
logger.info(
f"Health Check | "
f"WebSocket: {'' if ws_healthy else ''} | "
f"Redis: {'' if redis_healthy else ''} | "
f"Buffer: {buffer_stats['buffer_usage']} | "
f"Dedup: {dedup_stats['duplicate_rate']} | "
f"Written: {redis_stats['messages_written']}"
)
# Alert if unhealthy
if not ws_healthy:
logger.error("WebSocket connection is unhealthy!")
if not redis_healthy:
logger.error("Redis connection is unhealthy!")
except Exception as e:
logger.error(f"Error in health monitor: {e}", exc_info=True)
logger.info("Health monitor stopped")
async def start(self) -> None:
"""Start ingestion pipeline"""
logger.info("=" * 60)
logger.info("Starting Binance Real-time Data Ingestion Pipeline")
logger.info("=" * 60)
logger.info(f"Symbol: {settings.SYMBOL.upper()}")
logger.info(f"Kline Intervals: {', '.join(settings.kline_intervals_list)}")
logger.info(f"Redis Host: {settings.REDIS_HOST}:{settings.REDIS_PORT}")
logger.info("=" * 60)
self.is_running = True
try:
# Connect to Redis
logger.info("Connecting to Redis...")
await self.redis_writer.connect()
# Initialize WebSocket client
self.ws_client = BinanceWebSocketClient(
symbol=settings.SYMBOL,
on_message=self.on_message,
)
# Start background tasks
logger.info("Starting background tasks...")
self.tasks = [
asyncio.create_task(self.ws_client.start()),
asyncio.create_task(self.process_messages()),
asyncio.create_task(self.monitor_health()),
]
# Wait for all tasks
await asyncio.gather(*self.tasks)
except Exception as e:
logger.error(f"Fatal error in pipeline: {e}", exc_info=True)
await self.stop()
async def stop(self) -> None:
"""Stop ingestion pipeline gracefully"""
logger.info("Stopping ingestion pipeline...")
self.is_running = False
# Stop WebSocket client
if self.ws_client:
await self.ws_client.stop()
# Cancel background tasks
for task in self.tasks:
if not task.done():
task.cancel()
# Wait for tasks to complete
if self.tasks:
await asyncio.gather(*self.tasks, return_exceptions=True)
# Close Redis connection
await self.redis_writer.close()
# Print final statistics
logger.info("=" * 60)
logger.info("Final Statistics:")
logger.info(f"Deduplication: {self.deduplicator.get_stats()}")
logger.info(f"Buffer: {self.buffer_processor.get_stats()}")
logger.info(f"Redis: {self.redis_writer.get_stats()}")
logger.info("=" * 60)
logger.info("Pipeline stopped successfully")
async def main():
"""Main entry point"""
pipeline = IngestionPipeline()
# Setup signal handlers for graceful shutdown
def signal_handler(sig, frame):
logger.info(f"Received signal {sig}, shutting down...")
asyncio.create_task(pipeline.stop())
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Start pipeline
await pipeline.start()
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
logger.info("Interrupted by user")
except Exception as e:
logger.error(f"Fatal error: {e}", exc_info=True)
sys.exit(1)

13
notifiers/__init__.py Normal file
View File

@ -0,0 +1,13 @@
"""
Notifiers - 消息通知模块
支持的通知方式:
- DingTalk (钉钉)
- WeChat (企业微信) - 待实现
- Telegram - 待实现
- Email - 待实现
"""
from .dingtalk import DingTalkNotifier
__all__ = ['DingTalkNotifier']

522
notifiers/dingtalk.py Normal file
View File

@ -0,0 +1,522 @@
"""
DingTalk Notifier - 钉钉群机器人消息推送
支持功能:
- Markdown格式消息
- 交易信号格式化
- 错误重试
- 消息去重
"""
import logging
import json
import time
from typing import Dict, Any, Optional
from datetime import datetime
import hmac
import hashlib
import base64
import urllib.parse
logger = logging.getLogger(__name__)
class DingTalkNotifier:
"""钉钉群机器人通知器"""
def __init__(
self,
webhook_url: Optional[str] = None,
secret: Optional[str] = None,
enabled: bool = True
):
"""
初始化钉钉通知器
Args:
webhook_url: 钉钉机器人webhook地址
secret: 钉钉机器人加签密钥可选增强安全性
enabled: 是否启用通知
"""
self.webhook_url = webhook_url
self.secret = secret
self.enabled = enabled and webhook_url is not None
if self.enabled:
logger.info(f"📱 钉钉通知已启用 - Webhook: {webhook_url[:50]}...")
else:
logger.info("📱 钉钉通知未启用 (未配置webhook_url)")
# 统计信息
self.stats = {
'total_sent': 0,
'total_failed': 0,
'last_send_time': None
}
def _generate_sign(self, timestamp: int) -> str:
"""
生成钉钉机器人加签
Args:
timestamp: 时间戳毫秒
Returns:
签名字符串
"""
if not self.secret:
return ""
secret_enc = self.secret.encode('utf-8')
string_to_sign = f'{timestamp}\n{self.secret}'
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(
secret_enc,
string_to_sign_enc,
digestmod=hashlib.sha256
).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
return sign
def _build_webhook_url(self) -> str:
"""
构建带签名的webhook URL
Returns:
完整的webhook URL
"""
if not self.secret:
return self.webhook_url
timestamp = int(time.time() * 1000)
sign = self._generate_sign(timestamp)
return f"{self.webhook_url}&timestamp={timestamp}&sign={sign}"
def send_markdown(
self,
title: str,
text: str,
at_mobiles: Optional[list] = None,
at_all: bool = False
) -> bool:
"""
发送Markdown格式消息
Args:
title: 消息标题
text: Markdown格式文本
at_mobiles: @的手机号列表
at_all: 是否@所有人
Returns:
是否发送成功
"""
if not self.enabled:
logger.debug("钉钉通知未启用,跳过发送")
return False
try:
import requests
url = self._build_webhook_url()
payload = {
"msgtype": "markdown",
"markdown": {
"title": title,
"text": text
},
"at": {
"atMobiles": at_mobiles or [],
"isAtAll": at_all
}
}
headers = {'Content-Type': 'application/json'}
response = requests.post(
url,
data=json.dumps(payload),
headers=headers,
timeout=5
)
result = response.json()
if result.get('errcode') == 0:
self.stats['total_sent'] += 1
self.stats['last_send_time'] = datetime.now().isoformat()
logger.info(f"✅ 钉钉消息发送成功: {title}")
return True
else:
self.stats['total_failed'] += 1
logger.error(f"❌ 钉钉消息发送失败: {result.get('errmsg')}")
return False
except Exception as e:
self.stats['total_failed'] += 1
logger.error(f"❌ 钉钉消息发送异常: {e}", exc_info=True)
return False
def send_signal(self, aggregated_signal: Dict[str, Any]) -> bool:
"""
发送交易信号通知
Args:
aggregated_signal: 聚合后的交易信号
Returns:
是否发送成功
"""
if not self.enabled:
return False
try:
# 格式化信号为Markdown
markdown = self._format_signal_markdown(aggregated_signal)
# 提取标题
signal_type = aggregated_signal.get('final_signal', 'HOLD')
confidence = aggregated_signal.get('final_confidence', 0)
title = f"🚨 交易信号: {signal_type} (置信度: {confidence:.0%})"
# 发送消息
return self.send_markdown(title, markdown)
except Exception as e:
logger.error(f"❌ 格式化交易信号失败: {e}", exc_info=True)
return False
def _format_signal_markdown(self, signal: Dict[str, Any]) -> str:
"""
格式化交易信号为Markdown文本多时间级别版本
Args:
signal: 聚合信号
Returns:
Markdown格式文本
"""
# 信号类型对应的emoji
signal_type = signal.get('final_signal', 'HOLD')
confidence = signal.get('final_confidence', 0)
signal_emoji = {
'BUY': '🟢',
'SELL': '🔴',
'HOLD': '🟡'
}
emoji = signal_emoji.get(signal_type, '')
lines = []
# === 核心信号 ===
lines.append(f"# {emoji} {signal_type}")
lines.append("")
lines.append(f"**综合置信度**: {confidence:.0%} | **时间**: {datetime.now().strftime('%H:%M')}")
lines.append("")
# === 当前价格 ===
levels = signal.get('levels', {})
current_price = levels.get('current_price', 0)
if current_price > 0:
lines.append(f"**当前价格**: ${current_price:,.2f}")
lines.append("")
# === 多时间级别分析 ===
lines.append("## 📊 多时间级别分析")
lines.append("")
# 获取LLM信号
llm_signal = signal.get('llm_signal') or {}
opportunities = llm_signal.get('opportunities', {})
recommendations = llm_signal.get('recommendations_by_timeframe', {})
# 短期分析
self._add_timeframe_section(
lines,
"短期 (5m/15m/1h)",
"",
opportunities.get('short_term_5m_15m_1h', {}),
recommendations.get('short_term', ''),
signal # 传递完整信号数据
)
# 中期分析
self._add_timeframe_section(
lines,
"中期 (4h/1d)",
"📈",
opportunities.get('medium_term_4h_1d', {}),
recommendations.get('medium_term', ''),
signal
)
# 长期分析
self._add_timeframe_section(
lines,
"长期 (1d/1w)",
"📅",
opportunities.get('long_term_1d_1w', {}),
recommendations.get('long_term', ''),
signal
)
# === 综合建议 ===
reason = llm_signal.get('reasoning', '') or self._get_brief_reason(signal)
if reason:
lines.append("---")
lines.append("## 💡 综合分析")
lines.append("")
lines.append(f"{reason}")
lines.append("")
# === 页脚 ===
lines.append("---")
lines.append("*仅供参考,不构成投资建议*")
return "\n".join(lines)
def _add_timeframe_section(
self,
lines: list,
timeframe_label: str,
emoji: str,
opportunity: Dict[str, Any],
recommendation: str,
signal: Dict[str, Any] = None
):
"""
添加单个时间级别的分析区块
Args:
lines: 输出行列表
timeframe_label: 时间级别标签
emoji: emoji图标
opportunity: 该时间级别的交易机会
recommendation: 该时间级别的操作建议
signal: 完整信号数据用于获取量化评分等
"""
lines.append(f"### {emoji} {timeframe_label}")
lines.append("")
exists = opportunity.get('exists', False)
if exists:
direction = opportunity.get('direction', 'UNKNOWN')
entry = opportunity.get('entry_price', 0)
stop = opportunity.get('stop_loss', 0)
tp = opportunity.get('take_profit', 0)
reasoning = opportunity.get('reasoning', '')
# 方向标识
direction_emoji = "🟢" if direction == "LONG" else "🔴" if direction == "SHORT" else ""
lines.append(f"{direction_emoji} **方向**: {direction}")
lines.append("")
# 价格信息
if entry and stop and tp:
lines.append(f"**入场**: ${entry:,.2f}")
lines.append(f"**止损**: ${stop:,.2f}")
lines.append(f"**止盈**: ${tp:,.2f}")
# 风险回报比
risk = abs(entry - stop)
reward = abs(tp - entry)
rr = reward / risk if risk > 0 else 0
lines.append(f"**风险回报**: 1:{rr:.1f}")
lines.append("")
# 理由
if reasoning:
lines.append(f"💭 {reasoning}")
lines.append("")
else:
# 无交易机会时,显示关键数据支撑
# 尝试从市场分析中获取该周期的数据
if signal:
market_analysis = signal.get('market_analysis', {})
# 显示量化评分(如果有)
quant_signal = signal.get('quantitative_signal', {})
if quant_signal:
composite = quant_signal.get('composite_score', 0)
lines.append(f"📊 **量化评分**: {composite:.1f}")
scores = quant_signal.get('scores', {})
if scores:
lines.append(f"- 趋势: {scores.get('trend', 0):.0f} | 动量: {scores.get('momentum', 0):.0f} | 订单流: {scores.get('orderflow', 0):.0f}")
lines.append("")
# 显示关键价格和技术指标
if market_analysis:
trend = market_analysis.get('trend', {})
momentum = market_analysis.get('momentum', {})
if trend or momentum:
lines.append(f"📈 **技术状态**:")
if trend:
lines.append(f"- 趋势: {trend.get('direction', 'unknown')} ({trend.get('strength', 'weak')})")
if momentum:
lines.append(f"- RSI: {momentum.get('rsi', 50):.0f} ({momentum.get('rsi_status', '中性')})")
lines.append(f"- MACD: {momentum.get('macd_signal', 'unknown')}")
lines.append("")
# 操作建议
if recommendation:
lines.append(f"💭 **操作建议**: {recommendation}")
lines.append("")
else:
lines.append("💭 暂无明确交易机会")
lines.append("")
def _get_best_trade_plan(self, signal: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""
获取最优交易计划优先中长线盈利空间更大
优先级
1. 中长线机会 (swing) - 4h/1d/1w级别盈利空间2%-5%+适合通知
2. 日内机会 (intraday) - 5m/15m/1h级别盈利空间0.5%-1%
3. 量化信号价格位 - 回退选项
Returns:
{
'entry': float,
'stop_loss': float,
'take_profit': float,
'type': 'swing'|'intraday'|'quant' # 交易类型
} or None
"""
llm = signal.get('llm_signal')
if llm and isinstance(llm, dict):
opportunities = llm.get('opportunities', {})
# 优先1中长线机会盈利空间大适合手机通知
swing = opportunities.get('swing', {})
if swing.get('exists'):
entry = swing.get('entry_price')
stop = swing.get('stop_loss')
tp = swing.get('take_profit')
if entry and stop and tp:
return {
'entry': float(entry),
'stop_loss': float(stop),
'take_profit': float(tp),
'type': 'swing'
}
# 优先2日内机会如果没有中长线机会
intraday = opportunities.get('intraday', {})
if intraday.get('exists'):
entry = intraday.get('entry_price')
stop = intraday.get('stop_loss')
tp = intraday.get('take_profit')
if entry and stop and tp:
return {
'entry': float(entry),
'stop_loss': float(stop),
'take_profit': float(tp),
'type': 'intraday'
}
# 回退到量化信号的价格位
levels = signal.get('levels', {})
entry = levels.get('entry')
stop = levels.get('stop_loss')
tp = levels.get('take_profit_1')
if entry and stop and tp:
return {
'entry': float(entry),
'stop_loss': float(stop),
'take_profit': float(tp),
'type': 'quant'
}
return None
def _get_brief_reason(self, signal: Dict[str, Any]) -> str:
"""
获取简短的信号原因1-2句话
Returns:
简短原因描述
"""
reasons = []
# 优先使用LLM的推理截取前100字
llm = signal.get('llm_signal')
if llm and isinstance(llm, dict):
llm_reasoning = llm.get('reasoning', '')
if llm_reasoning:
# 取第一句话或前100字
brief = llm_reasoning.split('')[0] + ''
if len(brief) > 100:
brief = brief[:100] + '...'
return brief
# 如果有日内机会的说明
opportunities = llm.get('opportunities', {})
intraday = opportunities.get('intraday', {})
if intraday.get('exists') and intraday.get('reasoning'):
brief = intraday['reasoning']
if len(brief) > 100:
brief = brief[:100] + '...'
return brief
# 回退到量化信号的推理
quant = signal.get('quantitative_signal', {})
quant_reasoning = quant.get('reasoning', '')
if quant_reasoning:
return quant_reasoning
# 默认
return signal.get('recommendation', '系统分析建议关注')
def send_error(self, error_msg: str, context: Optional[str] = None) -> bool:
"""
发送错误通知
Args:
error_msg: 错误消息
context: 错误上下文
Returns:
是否发送成功
"""
if not self.enabled:
return False
lines = []
lines.append("# ❌ 系统错误通知")
lines.append("")
lines.append(f"**时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
lines.append(f"**错误**: {error_msg}")
if context:
lines.append("")
lines.append(f"**上下文**: {context}")
lines.append("")
lines.append("---")
lines.append("*请及时检查系统状态*")
markdown = "\n".join(lines)
return self.send_markdown("系统错误通知", markdown, at_all=True)
def get_stats(self) -> Dict[str, Any]:
"""获取统计信息"""
return {
'enabled': self.enabled,
'total_sent': self.stats['total_sent'],
'total_failed': self.stats['total_failed'],
'success_rate': (
self.stats['total_sent'] / (self.stats['total_sent'] + self.stats['total_failed'])
if (self.stats['total_sent'] + self.stats['total_failed']) > 0
else 0
),
'last_send_time': self.stats['last_send_time']
}

222
output/latest_signal.json Executable file
View File

@ -0,0 +1,222 @@
{
"timestamp": "2025-12-02T14:50:51.220024",
"aggregated_signal": {
"timestamp": "2025-12-02T14:50:51.218349",
"final_signal": "HOLD",
"final_confidence": 0.3,
"consensus": "CONSENSUS_HOLD",
"agreement_score": 0.3,
"quantitative_signal": {
"signal_type": "HOLD",
"signal": "HOLD",
"confidence": 0.0,
"composite_score": 17.5,
"scores": {
"trend": 0.0,
"momentum": -30,
"orderflow": 100,
"breakout": 0
}
},
"llm_signal": {
"signal_type": "HOLD",
"signal": "HOLD",
"confidence": 0.6,
"reasoning": "多周期分析显示市场处于严重分歧状态短期5m/15m强劲看涨中期4h/1d明确看跌长期1d/1w方向矛盾。这种分歧导致整体信号为HOLD。短期上涨面临中期趋势阻力和超买技术指标压制上行空间可能受限。交易机会仅限于快进快出的日内多头中长线需等待趋势共振。",
"key_factors": [
"多周期趋势严重分歧",
"短期RSI超买与中期下跌趋势冲突",
"价格处于4小时关键压力位$89,177",
"成交量在短期放量但日线缩量",
"周线长期趋势方向待定"
],
"opportunities": {
"short_term_5m_15m_1h": {
"exists": true,
"direction": "LONG",
"entry_price": 89158.5,
"stop_loss": 88800.0,
"take_profit": 91592.0,
"reasoning": "5m和15m周期呈现强劲上涨趋势量化评分28.4/40.9MACD金叉成交量放大但RSI均超买>80。1h周期趋势下跌但MACD金叉扩大价格接近1h压力位$91,592。短期存在基于小周期动量延续的做多机会但需警惕超买回调风险。"
},
"medium_term_4h_1d": {
"exists": false,
"direction": null,
"entry_price": 0,
"stop_loss": 0,
"take_profit": 0,
"reasoning": "4h和1d周期均显示强劲下跌趋势量化评分-33.4/-23.4与短期上涨趋势形成严重分歧。4h压力位$89,177与当前价格$89,158.5几乎重合构成关键阻力。日线MACD虽金叉但趋势向下RSI中性偏弱缺乏明确的中期反转或延续信号建议观望。"
},
"long_term_1d_1w": {
"exists": false,
"direction": null,
"entry_price": 0,
"stop_loss": 0,
"take_profit": 0,
"reasoning": "日线下跌趋势与周线上涨趋势方向严重冲突。周线量化评分仅4.3信号模糊MACD死叉扩大RSI弱势。日线趋势向下但MACD金叉显示长期趋势不明朗处于关键抉择期。无明确的长期趋势交易机会需等待日线与周线趋势共振。"
},
"ambush": {
"exists": true,
"price_level": 86207.0,
"reasoning": "等待价格回调至1h关键支撑位$86,207附近。该位置接近4h支撑$86,261若价格能在此企稳并出现1h或4h周期的反弹信号如RSI从超卖区回升、MACD金叉可考虑作为中期做多埋伏点博弈日线下跌趋势中的反弹或反转。"
},
"intraday": {
"exists": true,
"direction": "LONG",
"entry_price": 89158.5,
"stop_loss": 88800.0,
"take_profit": 91592.0,
"reasoning": "5m和15m周期呈现强劲上涨趋势量化评分28.4/40.9MACD金叉成交量放大但RSI均超买>80。1h周期趋势下跌但MACD金叉扩大价格接近1h压力位$91,592。短期存在基于小周期动量延续的做多机会但需警惕超买回调风险。"
},
"swing": {
"exists": false,
"direction": null,
"entry_price": 0,
"stop_loss": 0,
"take_profit": 0,
"reasoning": "4h和1d周期均显示强劲下跌趋势量化评分-33.4/-23.4与短期上涨趋势形成严重分歧。4h压力位$89,177与当前价格$89,158.5几乎重合构成关键阻力。日线MACD虽金叉但趋势向下RSI中性偏弱缺乏明确的中期反转或延续信号建议观望。"
}
},
"recommendations_by_timeframe": {
"short_term": "短期(5m/15m/1h)存在基于小周期动量的日内做多机会但RSI已严重超买风险较高。建议轻仓快进快出严格设置止损于$88,800基于5m ATR目标看向1h压力位$91,592。若价格无法有效突破当前4h压力$89,177应果断离场。",
"medium_term": "中期(4h/1d)趋势向下但当前价格处于4h关键压力位且与短期上涨动能背离。无明确的中期波段入场点建议观望。可关注价格能否站稳$89,177上方以挑战日线压力$93,080或回落至$86,200-$86,800支撑区域寻找企稳信号。",
"long_term": "长期(1d/1w)趋势矛盾,日线下跌与周线上涨形成拉锯。周线支撑$88,909已被短暂跌破长期方向待定。建议长期投资者保持观望等待日线趋势当前下跌与周线趋势当前上涨出现明确一致信号后再做布局或利用ambush点位分批建仓。"
},
"trade_type": "MULTI_TIMEFRAME",
"risk_level": "HIGH"
},
"levels": {
"current_price": 89179.2,
"entry": 89179.2,
"stop_loss": 88999.95,
"take_profit_1": 90395.95,
"take_profit_2": 90395.95,
"take_profit_3": 90395.95,
"take_profit_1_range": {
"quant": 89199.9,
"llm": 91592.0,
"diff_pct": 2.65
}
},
"risk_reward_ratio": 6.79,
"recommendation": "量化和AI分析均建议观望,等待更好的机会",
"warnings": [
"⚠️ 量化信号置信度较低"
]
},
"market_analysis": {
"price": 89199.9,
"trend": {
"direction": "unknown",
"strength": 0
},
"momentum": {
"rsi": 50,
"rsi_status": "中性偏弱",
"rsi_trend": "中性",
"macd_signal": "死叉收窄",
"macd_hist": 0
}
},
"quantitative_signal": {
"timestamp": "2025-12-02T14:50:04.306308",
"signal_type": "HOLD",
"signal_strength": 0.17,
"composite_score": 17.5,
"confidence": 0.0,
"consensus_score": 0.0,
"scores": {
"trend": 0.0,
"momentum": -30,
"orderflow": 100,
"breakout": 0
},
"levels": {
"current_price": 89199.9,
"entry": 89199.9,
"stop_loss": 89199.9,
"take_profit_1": 89199.9,
"take_profit_2": 89199.9,
"take_profit_3": 89199.9
},
"risk_reward_ratio": 0,
"reasoning": "趋势unknown (0); RSI=50; MACD 死叉收窄; 订单流: 强买方主导"
},
"llm_signal": {
"timestamp": "2025-12-02T14:50:51.218012",
"signal_type": "HOLD",
"confidence": 0.6,
"trade_type": "MULTI_TIMEFRAME",
"reasoning": "多周期分析显示市场处于严重分歧状态短期5m/15m强劲看涨中期4h/1d明确看跌长期1d/1w方向矛盾。这种分歧导致整体信号为HOLD。短期上涨面临中期趋势阻力和超买技术指标压制上行空间可能受限。交易机会仅限于快进快出的日内多头中长线需等待趋势共振。",
"opportunities": {
"short_term_5m_15m_1h": {
"exists": true,
"direction": "LONG",
"entry_price": 89158.5,
"stop_loss": 88800.0,
"take_profit": 91592.0,
"reasoning": "5m和15m周期呈现强劲上涨趋势量化评分28.4/40.9MACD金叉成交量放大但RSI均超买>80。1h周期趋势下跌但MACD金叉扩大价格接近1h压力位$91,592。短期存在基于小周期动量延续的做多机会但需警惕超买回调风险。"
},
"medium_term_4h_1d": {
"exists": false,
"direction": null,
"entry_price": 0,
"stop_loss": 0,
"take_profit": 0,
"reasoning": "4h和1d周期均显示强劲下跌趋势量化评分-33.4/-23.4与短期上涨趋势形成严重分歧。4h压力位$89,177与当前价格$89,158.5几乎重合构成关键阻力。日线MACD虽金叉但趋势向下RSI中性偏弱缺乏明确的中期反转或延续信号建议观望。"
},
"long_term_1d_1w": {
"exists": false,
"direction": null,
"entry_price": 0,
"stop_loss": 0,
"take_profit": 0,
"reasoning": "日线下跌趋势与周线上涨趋势方向严重冲突。周线量化评分仅4.3信号模糊MACD死叉扩大RSI弱势。日线趋势向下但MACD金叉显示长期趋势不明朗处于关键抉择期。无明确的长期趋势交易机会需等待日线与周线趋势共振。"
},
"ambush": {
"exists": true,
"price_level": 86207.0,
"reasoning": "等待价格回调至1h关键支撑位$86,207附近。该位置接近4h支撑$86,261若价格能在此企稳并出现1h或4h周期的反弹信号如RSI从超卖区回升、MACD金叉可考虑作为中期做多埋伏点博弈日线下跌趋势中的反弹或反转。"
},
"intraday": {
"exists": true,
"direction": "LONG",
"entry_price": 89158.5,
"stop_loss": 88800.0,
"take_profit": 91592.0,
"reasoning": "5m和15m周期呈现强劲上涨趋势量化评分28.4/40.9MACD金叉成交量放大但RSI均超买>80。1h周期趋势下跌但MACD金叉扩大价格接近1h压力位$91,592。短期存在基于小周期动量延续的做多机会但需警惕超买回调风险。"
},
"swing": {
"exists": false,
"direction": null,
"entry_price": 0,
"stop_loss": 0,
"take_profit": 0,
"reasoning": "4h和1d周期均显示强劲下跌趋势量化评分-33.4/-23.4与短期上涨趋势形成严重分歧。4h压力位$89,177与当前价格$89,158.5几乎重合构成关键阻力。日线MACD虽金叉但趋势向下RSI中性偏弱缺乏明确的中期反转或延续信号建议观望。"
}
},
"recommendations_by_timeframe": {
"short_term": "短期(5m/15m/1h)存在基于小周期动量的日内做多机会但RSI已严重超买风险较高。建议轻仓快进快出严格设置止损于$88,800基于5m ATR目标看向1h压力位$91,592。若价格无法有效突破当前4h压力$89,177应果断离场。",
"medium_term": "中期(4h/1d)趋势向下但当前价格处于4h关键压力位且与短期上涨动能背离。无明确的中期波段入场点建议观望。可关注价格能否站稳$89,177上方以挑战日线压力$93,080或回落至$86,200-$86,800支撑区域寻找企稳信号。",
"long_term": "长期(1d/1w)趋势矛盾,日线下跌与周线上涨形成拉锯。周线支撑$88,909已被短暂跌破长期方向待定。建议长期投资者保持观望等待日线趋势当前下跌与周线趋势当前上涨出现明确一致信号后再做布局或利用ambush点位分批建仓。"
},
"levels": {
"current_price": 89158.5,
"entry": 89158.5,
"stop_loss": 88800.0,
"take_profit_1": 91592.0,
"take_profit_2": 91592.0,
"take_profit_3": 91592.0
},
"risk_level": "HIGH",
"key_factors": [
"多周期趋势严重分歧",
"短期RSI超买与中期下跌趋势冲突",
"价格处于4小时关键压力位$89,177",
"成交量在短期放量但日线缩量",
"周线长期趋势方向待定"
],
"raw_response": "```json\n{\n \"signal\": \"HOLD\",\n \"confidence\": 0.6,\n \"opportunities\": {\n \"short_term_5m_15m_1h\": {\n \"exists\": true,\n \"timeframe_label\": \"短期 (5m/15m/1h)\",\n \"direction\": \"LONG\",\n \"entry_price\": 89158.5,\n \"stop_loss\": 88800.0,\n \"take_profit\": 91592.0,\n \"reasoning\": \"5m和15m周期呈现强劲上涨趋势量化评分28.4/40.9MACD金叉成交量放大但RSI均超买>80。1h周期趋势下跌但MACD金叉扩大价格接近1h压力位$91,592。短期存在基于小周期动量延续的做多机会但需警惕超买回调风险。\",\n \"持仓时间\": \"几分钟到几小时\"\n },\n \"medium_term_4h_1d\": {\n \"exists\": false,\n \"timeframe_label\": \"中期 (4h/1d)\",\n \"direction\": null,\n \"entry_price\": null,\n \"stop_loss\": null,\n \"take_profit\": null,\n \"reasoning\": \"4h和1d周期均显示强劲下跌趋势量化评分-33.4/-23.4与短期上涨趋势形成严重分歧。4h压力位$89,177与当前价格$89,158.5几乎重合构成关键阻力。日线MACD虽金叉但趋势向下RSI中性偏弱缺乏明确的中期反转或延续信号建议观望。\",\n \"持仓时间\": \"数天到一周\"\n },\n \"long_term_1d_1w\": {\n \"exists\": false,\n \"timeframe_label\": \"长期 (1d/1w)\",\n \"direction\": null,\n \"entry_price\": null,\n \"stop_loss\": null,\n \"take_profit\": null,\n \"reasoning\": \"日线下跌趋势与周线上涨趋势方向严重冲突。周线量化评分仅4.3信号模糊MACD死叉扩大RSI弱势。日线趋势向下但MACD金叉显示长期趋势不明朗处于关键抉择期。无明确的长期趋势交易机会需等待日线与周线趋势共振。\",\n \"持仓时间\": \"数周到数月\"\n },\n \"ambush\": {\n \"exists\": true,\n \"price_level\": 86207.0,\n \"reasoning\": \"等待价格回调至1h关键支撑位$86,207附近。该位置接近4h支撑$86,261若价格能在此企稳并出现1h或4h周期的反弹信号如RSI从超卖区回升、MACD金叉可考虑作为中期做多埋伏点博弈日线下跌趋势中的反弹或反转。\",\n \"timeframe\": \"基于1小时和4小时级别的关键支撑位\"\n }\n },\n \"recommendations_by_timeframe\": {\n \"short_term\": \"短期(5m/15m/1h)存在基于小周期动量的日内做多机会但RSI已严重超买风险较高。建议轻仓快进快出严格设置止损于$88,800基于5m ATR目标看向1h压力位$91,592。若价格无法有效突破当前4h压力$89,177应果断离场。\",\n \"medium_term\": \"中期(4h/1d)趋势向下但当前价格处于4h关键压力位且与短期上涨动能背离。无明确的中期波段入场点建议观望。可关注价格能否站稳$89,177上方以挑战日线压力$93,080或回落至$86,200-$86,800支撑区域寻找企稳信号。\",\n \"long_term\": \"长期(1d/1w)趋势矛盾,日线下跌与周线上涨形成拉锯。周线支撑$88,909已被短暂跌破长期方向待定。建议长期投资者保持观望等待日线趋势当前下跌与周线趋势当前上涨出现明确一致信号后再做布局或利用ambush点位分批建仓。\"\n },\n \"reasoning\": \"多周期分析显示市场处于严重分歧状态短期5m/15m强劲看涨中期4h/1d明确看跌长期1d/1w方向矛盾。这种分歧导致整体信号为HOLD。短期上涨面临中期趋势阻力和超买技术指标压制上行空间可能受限。交易机会仅限于快进快出的日内多头中长线需等待趋势共振。\",\n \"risk_level\": \"HIGH\",\n \"key_factors\": [\"多周期趋势严重分歧\", \"短期RSI超买与中期下跌趋势冲突\", \"价格处于4小时关键压力位$89,177\", \"成交量在短期放量但日线缩量\", \"周线长期趋势方向待定\"]\n}\n```",
"risk_reward_ratio": 6.79
}
}

71
preview_message.py Normal file
View File

@ -0,0 +1,71 @@
#!/usr/bin/env python3
"""Preview DingTalk message format using saved signal"""
import json
import sys
# Add current directory to path
sys.path.insert(0, '/Users/aaron/source_code/tradus-ai/realtime-ingestion')
from notifiers.dingtalk import DingTalkNotifier
# Load the saved signal
with open('/Users/aaron/source_code/tradus-ai/realtime-ingestion/output/latest_signal.json', 'r') as f:
signal = json.load(f)
# Create DingTalk notifier (without webhook for preview only)
notifier = DingTalkNotifier(webhook_url=None, enabled=False)
# Debug: Check signal structure
print("Signal keys:", list(signal.keys()))
# Format the message
aggregated_signal = signal.get('aggregated_signal', {})
if not aggregated_signal:
print("ERROR: aggregated_signal is empty!")
aggregated_signal = signal # Maybe the whole signal is the aggregated_signal
print("Aggregated signal keys:", list(aggregated_signal.keys()) if aggregated_signal else "None")
try:
markdown = notifier._format_signal_markdown(aggregated_signal)
except Exception as e:
print(f"ERROR formatting markdown: {e}")
import traceback
traceback.print_exc()
markdown = "Error formatting message"
print("=" * 80)
print("📱 DINGTALK MESSAGE PREVIEW")
print("=" * 80)
print(markdown)
print("=" * 80)
# Show data structure for debugging
print("\n\n")
print("=" * 80)
print("📊 DATA STRUCTURE DEBUG")
print("=" * 80)
llm_signal = aggregated_signal.get('llm_signal', {})
opportunities = llm_signal.get('opportunities', {})
recommendations = llm_signal.get('recommendations_by_timeframe', {})
print("\nopportunities keys:")
for key in opportunities.keys():
print(f" - {key}")
print("\nshort_term_5m_15m_1h:")
short_term = opportunities.get('short_term_5m_15m_1h', {})
print(f" exists: {short_term.get('exists')}")
print(f" direction: {short_term.get('direction')}")
print(f" reasoning: {short_term.get('reasoning', '')[:100]}...")
print("\nmedium_term_4h_1d:")
medium_term = opportunities.get('medium_term_4h_1d', {})
print(f" exists: {medium_term.get('exists')}")
print(f" reasoning: {medium_term.get('reasoning', '')[:100]}...")
print("\nrecommendations_by_timeframe:")
print(f" short_term: {recommendations.get('short_term', '')[:100]}...")
print(f" medium_term: {recommendations.get('medium_term', '')[:100]}...")
print(f" long_term: {recommendations.get('long_term', '')[:100]}...")

36
redis.conf Normal file
View File

@ -0,0 +1,36 @@
# Redis Configuration for Real-time Data Ingestion
# Network
bind 0.0.0.0
protected-mode no
port 6379
# Persistence
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data
# Memory Management
maxmemory 512mb
maxmemory-policy allkeys-lru
# Append Only File (AOF) - Disabled for performance
appendonly no
# Logging
loglevel notice
logfile ""
# Stream Configuration
stream-node-max-bytes 4096
stream-node-max-entries 100
# Performance
tcp-backlog 511
timeout 0
tcp-keepalive 300

29
requirements.txt Normal file
View File

@ -0,0 +1,29 @@
# WebSocket client
websockets==12.0
# Redis async client (includes aioredis functionality)
redis[hiredis]==5.0.1
# Configuration management (use pre-built wheels)
pydantic>=2.0,<3.0
pydantic-settings>=2.0,<3.0
# Environment variables
python-dotenv==1.0.0
# Fast JSON serialization
orjson==3.9.10
# Data analysis and technical indicators
pandas==2.1.4
numpy==1.26.2
ta==0.11.0
# LLM clients (optional - only if you want LLM decision making)
anthropic==0.40.0
openai==1.58.1
# HTTP client for notifications
requests==2.31.0
# Note: asyncio is part of Python standard library, no need to install

115
run_analysis.sh Normal file
View File

@ -0,0 +1,115 @@
#!/bin/bash
# 运行量化+LLM分析可控制Gate
#
# 用法:
# ./run_analysis.sh # 使用Gate智能过滤
# ./run_analysis.sh --no-gate # 强制调用LLM绕过Gate
# ./run_analysis.sh --help # 显示帮助
set -e
# 颜色定义
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# 显示帮助
show_help() {
echo "📊 交易信号分析工具"
echo ""
echo "用法:"
echo " ./run_analysis.sh [选项]"
echo ""
echo "选项:"
echo " --no-gate, -n 强制调用LLM绕过Gate每次都分析"
echo " --gate, -g 使用Gate默认智能过滤低质量信号"
echo " --help, -h 显示此帮助信息"
echo ""
echo "示例:"
echo " ./run_analysis.sh # 智能模式(推荐)"
echo " ./run_analysis.sh --no-gate # 强制LLM分析"
echo ""
echo "说明:"
echo " • Gate模式: 只有综合得分≥5.0的信号才会调用LLM节省成本"
echo " • No-Gate模式: 每次都调用LLM适合测试或重要行情"
exit 0
}
# 解析参数
USE_GATE=true
if [ $# -gt 0 ]; then
case "$1" in
--no-gate|-n)
USE_GATE=false
;;
--gate|-g)
USE_GATE=true
;;
--help|-h)
show_help
;;
*)
echo "❌ 未知参数: $1"
echo "使用 --help 查看帮助"
exit 1
;;
esac
fi
# 显示运行模式
echo -e "${BLUE}🚀 开始生成交易信号...${NC}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
if [ "$USE_GATE" = false ]; then
echo -e "${YELLOW}⚠️ 模式: 强制LLM分析绕过Gate${NC}"
echo " 每次都会调用 DeepSeek AI"
else
echo -e "${GREEN}✅ 模式: 智能Gate推荐${NC}"
echo " 只有高质量信号才会调用 LLM"
fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# 检查 scheduler 容器是否运行
if ! docker compose ps scheduler 2>/dev/null | grep -q "running"; then
echo -e "${YELLOW}⚠️ scheduler 容器未运行,正在启动...${NC}"
docker compose --profile scheduler up -d
echo "⏳ 等待服务就绪..."
sleep 5
fi
# 运行分析
if [ "$USE_GATE" = false ]; then
# 绕过Gate临时设置极低阈值
docker compose exec scheduler python -c "
import os
os.environ['LLM_MIN_COMPOSITE_SCORE'] = '0.0' # 绕过Gate
# 运行分析
import sys
sys.path.insert(0, '/app')
from scripts.generate_trading_signal import main
main()
"
else
# 正常使用Gate
docker compose exec scheduler python /app/scripts/generate_trading_signal.py
fi
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo -e "${GREEN}✅ 信号生成完成!${NC}"
echo ""
echo "📊 查看结果:"
echo " ./view_signal.sh"
echo " cat output/latest_signal.json | python -m json.tool"
echo ""
echo "📱 钉钉通知:"
if docker compose logs scheduler --tail 20 | grep -q "钉钉消息发送成功"; then
echo -e " ${GREEN}✅ 已发送${NC}"
else
echo -e " ${YELLOW}⚠️ 未发送可能是HOLD信号${NC}"
fi

25
run_llm.sh Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
# 运行量化+LLM完整分析Gate关闭每次都调用LLM
echo "🚀 运行完整分析(量化 + DeepSeek AI"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "⚠️ Gate已关闭 - 每次都会调用 DeepSeek"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# 检查容器
if ! docker compose ps scheduler 2>/dev/null | grep -q "running"; then
echo "⚠️ 启动 scheduler..."
docker compose --profile scheduler up -d
sleep 5
fi
# 临时降低Gate阈值到0强制调用LLM并发送钉钉消息
docker compose exec scheduler bash -c '
export LLM_MIN_COMPOSITE_SCORE=0.0
python /app/scripts/generate_trading_signal.py --send-dingtalk
'
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ 完成!查看结果: ./view_signal.sh"

27
run_signal.sh Executable file
View File

@ -0,0 +1,27 @@
#!/bin/bash
# 运行交易信号生成器(量化 + LLM 一体分析)
set -e # 遇到错误立即退出
echo "🚀 开始生成交易信号..."
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# 检查 scheduler 容器是否运行
if ! docker compose ps scheduler | grep -q "running"; then
echo "⚠️ scheduler 容器未运行,正在启动..."
docker compose --profile scheduler up -d
echo "✅ 等待服务就绪..."
sleep 5
fi
# 运行信号生成API 配置已在 docker-compose.yml 中)
docker compose exec scheduler python /app/scripts/generate_trading_signal.py
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ 信号生成完成!"
echo ""
echo "📊 查看完整结果:"
echo " cat output/latest_signal.json | python -m json.tool"
echo ""
echo "📱 如果是 BUY/SELL 信号,已自动发送钉钉通知"

15
run_signal_smart.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
# 运行交易信号生成器(智能门控 - 只在高质量信号时调用AI节省成本
# 设置 Deepseek API
export OPENAI_API_KEY='sk-9f6b56f08796435d988cf202e37f6ee3'
export OPENAI_BASE_URL='https://api.deepseek.com'
export LLM_GATE_ENABLED=true # 启用智能门控节省95%成本
# 运行信号生成
docker compose exec ingestion bash -c "
export OPENAI_API_KEY='$OPENAI_API_KEY'
export OPENAI_BASE_URL='$OPENAI_BASE_URL'
export LLM_GATE_ENABLED=$LLM_GATE_ENABLED
python scripts/generate_trading_signal.py
"

243
scheduler.py Normal file
View File

@ -0,0 +1,243 @@
"""
Signal Generation Scheduler - 定时生成交易信号
每隔指定时间间隔自动运行量化分析和LLM决策
"""
import asyncio
import logging
import signal
import sys
from datetime import datetime
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent))
from config.settings import settings
from analysis.engine import MarketAnalysisEngine
from signals.quantitative import QuantitativeSignalGenerator
from signals.llm_decision import LLMDecisionMaker
from signals.llm_gate import LLMGate
from signals.aggregator import SignalAggregator
from notifiers.dingtalk import DingTalkNotifier
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class SignalScheduler:
"""定时信号生成调度器"""
def __init__(self, interval_minutes: int = 5):
"""
Args:
interval_minutes: 生成信号的时间间隔分钟
"""
self.interval_minutes = interval_minutes
self.is_running = False
# Initialize components
self.engine = MarketAnalysisEngine()
self.quant_generator = QuantitativeSignalGenerator()
# Initialize LLM gate
self.llm_gate = None
if settings.LLM_GATE_ENABLED:
self.llm_gate = LLMGate(
min_candles=settings.LLM_MIN_CANDLES,
min_composite_score=settings.LLM_MIN_COMPOSITE_SCORE,
max_calls_per_day=settings.LLM_MAX_CALLS_PER_DAY,
min_call_interval_minutes=settings.LLM_MIN_INTERVAL_MINUTES,
)
# Initialize LLM decision maker
self.llm_maker = LLMDecisionMaker(provider='openai')
# Initialize DingTalk notifier
import os
dingtalk_webhook = os.getenv('DINGTALK_WEBHOOK')
dingtalk_secret = os.getenv('DINGTALK_SECRET')
self.dingtalk = DingTalkNotifier(
webhook_url=dingtalk_webhook,
secret=dingtalk_secret,
enabled=bool(dingtalk_webhook)
)
logger.info(f"🤖 Signal Scheduler 初始化完成 - 每{interval_minutes}分钟生成一次信号")
async def generate_signal_once(self) -> dict:
"""执行一次信号生成"""
try:
logger.info("=" * 80)
logger.info(f"📊 开始生成交易信号 - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
logger.info("=" * 80)
# Step 1: Market analysis
analysis = self.engine.analyze_current_market(timeframe='5m')
if 'error' in analysis:
logger.warning(f"⚠️ 市场分析失败: {analysis['error']}")
return None
logger.info(f"✅ 市场分析完成 - 价格: ${analysis['current_price']:,.2f}, 趋势: {analysis['trend_analysis'].get('direction')}")
# Step 2: Quantitative signal
quant_signal = self.quant_generator.generate_signal(analysis)
logger.info(f"📈 量化信号: {quant_signal['signal_type']} (得分: {quant_signal['composite_score']:.1f})")
# Step 3: Check LLM gate and generate LLM decision
llm_signal = None
should_call_llm = True
if self.llm_gate:
should_call_llm, gate_reason = self.llm_gate.should_call_llm(quant_signal, analysis)
if should_call_llm:
logger.info(f"✅ LLM Gate: PASSED - {gate_reason}")
else:
logger.info(f"🚫 LLM Gate: BLOCKED - {gate_reason}")
# Call LLM if gate passed
if should_call_llm:
llm_context = self.engine.get_llm_context(format='full')
llm_signal = self.llm_maker.generate_decision(llm_context, analysis)
if llm_signal.get('enabled', True):
logger.info(f"🤖 LLM信号: {llm_signal['signal_type']} (置信度: {llm_signal.get('confidence', 0):.2%})")
else:
logger.info("⚠️ LLM未启用 (无API key)")
# Step 4: Aggregate signals
aggregated = SignalAggregator.aggregate_signals(quant_signal, llm_signal)
logger.info(f"🎯 最终信号: {aggregated['final_signal']} (置信度: {aggregated['final_confidence']:.2%})")
# Step 5: Save to file
output_file = Path(__file__).parent / 'output' / 'latest_signal.json'
output_file.parent.mkdir(exist_ok=True)
import json
output_data = {
'timestamp': datetime.now().isoformat(),
'aggregated_signal': aggregated,
'market_analysis': {
'price': analysis['current_price'],
'trend': analysis['trend_analysis'],
'momentum': analysis['momentum'],
},
'quantitative_signal': quant_signal,
'llm_signal': llm_signal if llm_signal and llm_signal.get('enabled', True) else None,
}
with open(output_file, 'w') as f:
json.dump(output_data, f, indent=2, ensure_ascii=False)
logger.info(f"💾 信号已保存到: {output_file}")
# Step 6: Send DingTalk notification
try:
# 发送通知的条件:
# 1. BUY/SELL 明确信号
# 2. HOLD信号但有日内交易机会
final_signal = aggregated.get('final_signal', 'HOLD')
should_notify = False
notify_reason = ""
if final_signal in ['BUY', 'SELL']:
should_notify = True
notify_reason = f"明确{final_signal}信号"
elif final_signal == 'HOLD':
# 检查是否有日内机会
llm_signal = aggregated.get('llm_signal')
if llm_signal and isinstance(llm_signal, dict):
opportunities = llm_signal.get('opportunities', {})
short_term = opportunities.get('short_term_5m_15m_1h', {})
if short_term.get('exists', False):
should_notify = True
direction = short_term.get('direction', 'N/A')
notify_reason = f"HOLD信号但存在短期{direction}机会"
if should_notify:
logger.info(f"📱 发送钉钉通知 - {notify_reason}")
sent = self.dingtalk.send_signal(aggregated)
if sent:
logger.info(f"✅ 钉钉通知发送成功")
else:
logger.warning(f"⚠️ 钉钉通知发送失败或未配置")
else:
logger.info(f" HOLD信号且无日内机会跳过钉钉通知")
except Exception as e:
logger.error(f"❌ 钉钉通知发送异常: {e}", exc_info=True)
logger.info("=" * 80)
return aggregated
except Exception as e:
logger.error(f"❌ 信号生成失败: {e}", exc_info=True)
return None
async def run(self):
"""启动调度器主循环"""
self.is_running = True
logger.info(f"🚀 Signal Scheduler 启动 - 每{self.interval_minutes}分钟生成信号")
# 立即生成一次
await self.generate_signal_once()
# 定时循环
while self.is_running:
try:
# 等待指定时间间隔
await asyncio.sleep(self.interval_minutes * 60)
# 生成信号
await self.generate_signal_once()
except asyncio.CancelledError:
logger.info("调度器收到取消信号")
break
except Exception as e:
logger.error(f"调度器错误: {e}", exc_info=True)
await asyncio.sleep(60) # 错误后等待1分钟再继续
logger.info("🛑 Signal Scheduler 已停止")
def stop(self):
"""停止调度器"""
self.is_running = False
async def main():
"""主入口"""
# 从环境变量或默认值获取间隔
import os
interval = int(os.getenv('SIGNAL_INTERVAL_MINUTES', '5'))
scheduler = SignalScheduler(interval_minutes=interval)
# Setup signal handlers for graceful shutdown
def signal_handler(sig, _frame):
logger.info(f"收到信号 {sig},正在关闭...")
scheduler.stop()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Start scheduler
await scheduler.run()
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
logger.info("用户中断")
except Exception as e:
logger.error(f"致命错误: {e}", exc_info=True)
sys.exit(1)

View File

@ -0,0 +1,350 @@
#!/usr/bin/env python3
"""
Generate Trading Signal - Combine quantitative analysis and LLM decision making
"""
import sys
import json
import logging
import os
import argparse
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from config.settings import settings
from analysis.engine import MarketAnalysisEngine
from signals.quantitative import QuantitativeSignalGenerator
from signals.llm_decision import LLMDecisionMaker
from signals.llm_gate import LLMGate
from signals.aggregator import SignalAggregator
from notifiers.dingtalk import DingTalkNotifier
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
def print_section(title: str, width: int = 80):
"""Print section header"""
print(f"\n{'=' * width}")
print(f"{title:^{width}}")
print(f"{'=' * width}")
def print_signal(signal: dict, title: str):
"""Pretty print a signal"""
print(f"\n{title}")
print("-" * 60)
print(f"Signal: {signal['signal_type']}")
print(f"Confidence: {signal.get('confidence', 0):.2%}")
# Display trade type if available (from LLM)
if 'trade_type' in signal:
trade_type = signal['trade_type']
trade_type_display = {
'INTRADAY': '📊 日内交易',
'SWING': '📈 中长线交易',
'NONE': '⏸️ 观望'
}.get(trade_type, trade_type)
print(f"Trade Type: {trade_type_display}")
if 'composite_score' in signal:
print(f"Composite Score: {signal['composite_score']:.1f}")
if 'scores' in signal:
print("\nComponent Scores:")
for component, score in signal['scores'].items():
print(f" {component:12}: {score:>6.1f}")
if 'levels' in signal:
levels = signal['levels']
print(f"\nPrice Levels:")
print(f" Current: ${levels.get('current_price', 0):>10,.2f}")
print(f" Entry: ${levels.get('entry', 0):>10,.2f}")
print(f" Stop: ${levels.get('stop_loss', 0):>10,.2f}")
print(f" Target 1: ${levels.get('take_profit_1', 0):>10,.2f}")
print(f" Target 2: ${levels.get('take_profit_2', 0):>10,.2f}")
print(f" Target 3: ${levels.get('take_profit_3', 0):>10,.2f}")
if 'risk_reward_ratio' in signal:
rr = signal['risk_reward_ratio']
if rr > 0:
print(f"\nRisk/Reward: 1:{rr:.2f}")
# Display opportunities breakdown (from LLM)
if 'opportunities' in signal:
opps = signal['opportunities']
# Intraday opportunity
if opps.get('intraday', {}).get('exists'):
intra = opps['intraday']
print(f"\n📊 日内交易机会:")
print(f" 方向: {intra.get('direction', 'N/A')}")
if intra.get('entry_price'):
print(f" 入场: ${intra['entry_price']:,.2f}")
if intra.get('stop_loss'):
print(f" 止损: ${intra['stop_loss']:,.2f}")
if intra.get('take_profit'):
print(f" 止盈: ${intra['take_profit']:,.2f}")
if intra.get('reasoning'):
print(f" 说明: {intra['reasoning']}")
# Swing opportunity
if opps.get('swing', {}).get('exists'):
swing = opps['swing']
print(f"\n📈 中长线交易机会:")
print(f" 方向: {swing.get('direction', 'N/A')}")
if swing.get('entry_price'):
print(f" 入场: ${swing['entry_price']:,.2f}")
if swing.get('stop_loss'):
print(f" 止损: ${swing['stop_loss']:,.2f}")
if swing.get('take_profit'):
print(f" 止盈: ${swing['take_profit']:,.2f}")
if swing.get('reasoning'):
print(f" 说明: {swing['reasoning']}")
# Ambush opportunity
if opps.get('ambush', {}).get('exists'):
ambush = opps['ambush']
print(f"\n📌 埋伏点位:")
if ambush.get('price_level'):
print(f" 埋伏价位: ${ambush['price_level']:,.2f}")
if ambush.get('reasoning'):
print(f" 说明: {ambush['reasoning']}")
if 'reasoning' in signal:
print(f"\nReasoning: {signal['reasoning']}")
def print_aggregated_signal(aggregated: dict):
"""Print aggregated signal"""
print_section("📊 AGGREGATED TRADING SIGNAL")
print(f"\n🎯 Final Signal: {aggregated['final_signal']}")
print(f"📈 Confidence: {aggregated['final_confidence']:.2%}")
print(f"🤝 Consensus: {aggregated['consensus']}")
print(f"✅ Agreement Score: {aggregated['agreement_score']:.2%}")
# Quantitative signal
print("\n" + "" * 80)
quant = aggregated['quantitative_signal']
print(f"🔢 QUANTITATIVE SIGNAL: {quant.get('signal_type', quant.get('signal', 'HOLD'))} (confidence: {quant.get('confidence', 0):.2%})")
print(f" Composite Score: {quant.get('composite_score', 0):.1f}")
if 'scores' in quant:
scores = quant['scores']
print(f" Trend: {scores.get('trend', 0):>6.1f} | "
f"Momentum: {scores.get('momentum', 0):>6.1f} | "
f"OrderFlow: {scores.get('orderflow', 0):>6.1f} | "
f"Breakout: {scores.get('breakout', 0):>6.1f}")
# LLM signal
print("\n" + "" * 80)
llm = aggregated.get('llm_signal')
if llm and isinstance(llm, dict):
trade_type_icon = {
'INTRADAY': '📊',
'SWING': '📈',
'AMBUSH': '📌',
'NONE': '⏸️'
}.get(llm.get('trade_type', 'NONE'), '')
trade_type_text = {
'INTRADAY': '日内交易',
'SWING': '中长线',
'AMBUSH': '埋伏',
'NONE': '观望'
}.get(llm.get('trade_type', 'NONE'), llm.get('trade_type', 'N/A'))
print(f"🤖 LLM SIGNAL: {llm.get('signal_type', llm.get('signal', 'HOLD'))} (confidence: {llm.get('confidence', 0):.2%})")
print(f" Trade Type: {trade_type_icon} {trade_type_text}")
# Display opportunities if available
if 'opportunities' in llm:
opps = llm['opportunities']
if opps.get('intraday', {}).get('exists'):
intra = opps['intraday']
print(f" 📊 日内: {intra.get('direction')} @ ${intra.get('entry_price', 0):,.0f}")
if opps.get('swing', {}).get('exists'):
swing = opps['swing']
print(f" 📈 中长线: {swing.get('direction')} @ ${swing.get('entry_price', 0):,.0f}")
if opps.get('ambush', {}).get('exists'):
ambush = opps['ambush']
print(f" 📌 埋伏: ${ambush.get('price_level', 0):,.0f}")
print(f" Reasoning: {llm.get('reasoning', 'N/A')[:200]}")
if llm.get('key_factors'):
print(f" Key Factors: {', '.join(llm['key_factors'][:3])}")
else:
print("🤖 LLM SIGNAL: Not available (no API key configured)")
# Final levels
print("\n" + "" * 80)
levels = aggregated['levels']
print("💰 RECOMMENDED LEVELS:")
print(f" Current Price: ${levels['current_price']:>10,.2f}")
print(f" Entry: ${levels['entry']:>10,.2f}")
print(f" Stop Loss: ${levels['stop_loss']:>10,.2f}")
print(f" Take Profit 1: ${levels['take_profit_1']:>10,.2f}")
print(f" Take Profit 2: ${levels['take_profit_2']:>10,.2f}")
print(f" Take Profit 3: ${levels['take_profit_3']:>10,.2f}")
rr = aggregated.get('risk_reward_ratio', 0)
if rr > 0:
print(f"\n Risk/Reward Ratio: 1:{rr:.2f}")
# Recommendation
print("\n" + "" * 80)
print(f"💡 RECOMMENDATION:")
print(f" {aggregated['recommendation']}")
# Warnings
if aggregated.get('warnings'):
print("\n" + "" * 80)
print("⚠️ WARNINGS:")
for warning in aggregated['warnings']:
print(f" {warning}")
print("\n" + "=" * 80)
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description='Generate trading signals')
parser.add_argument('--send-dingtalk', action='store_true',
help='Send notification to DingTalk')
args = parser.parse_args()
print_section("🚀 TRADING SIGNAL GENERATOR", 80)
# Initialize components
logger.info("Initializing analysis engine...")
engine = MarketAnalysisEngine()
logger.info("Initializing signal generators...")
quant_generator = QuantitativeSignalGenerator()
# Initialize DingTalk notifier if requested
dingtalk = None
if args.send_dingtalk:
dingtalk_webhook = os.getenv('DINGTALK_WEBHOOK')
dingtalk_secret = os.getenv('DINGTALK_SECRET')
dingtalk = DingTalkNotifier(
webhook_url=dingtalk_webhook,
secret=dingtalk_secret,
enabled=bool(dingtalk_webhook)
)
# Initialize LLM gate (极简门控 - 频率为主,量化初筛)
llm_gate = None
if settings.LLM_GATE_ENABLED:
logger.info("Initializing simplified LLM gate...")
llm_gate = LLMGate(
min_candles=settings.LLM_MIN_CANDLES,
min_composite_score=settings.LLM_MIN_COMPOSITE_SCORE,
max_calls_per_day=settings.LLM_MAX_CALLS_PER_DAY,
min_call_interval_minutes=settings.LLM_MIN_INTERVAL_MINUTES,
)
# Try to initialize LLM (will be disabled if no API key)
# Use 'openai' provider - supports OpenAI, Deepseek, and other OpenAI-compatible APIs
llm_maker = LLMDecisionMaker(provider='openai') # or 'claude'
# Step 1: Perform market analysis
print_section("1⃣ MARKET ANALYSIS")
analysis = engine.analyze_current_market(timeframe='5m')
if 'error' in analysis:
print(f"❌ Error: {analysis['error']}")
print("\n💡 Tip: Wait for more data to accumulate (need at least 200 candles)")
return
print(f"✅ Analysis complete")
print(f" Price: ${analysis['current_price']:,.2f}")
print(f" Trend: {analysis['trend_analysis'].get('direction', 'unknown')}")
print(f" RSI: {analysis['momentum'].get('rsi', 0):.1f}")
print(f" MACD: {analysis['momentum'].get('macd_signal', 'unknown')}")
# Step 2: Generate quantitative signal
print_section("2⃣ QUANTITATIVE SIGNAL")
quant_signal = quant_generator.generate_signal(analysis)
print_signal(quant_signal, "📊 Quantitative Analysis")
# Step 3: Check LLM gate and generate LLM decision
print_section("3⃣ LLM DECISION")
llm_signal = None
should_call_llm = True
gate_reason = "LLM gate disabled"
# Check LLM gate prerequisites
if llm_gate:
should_call_llm, gate_reason = llm_gate.should_call_llm(quant_signal, analysis)
if should_call_llm:
print(f"\n✅ LLM Gate: PASSED")
print(f" Reason: {gate_reason}")
else:
print(f"\n❌ LLM Gate: BLOCKED")
print(f" Reason: {gate_reason}")
print(f"\n💡 LLM will NOT be called. Using quantitative signal only.")
print(f" Quantitative score: {quant_signal.get('composite_score', 0):.1f}")
print(f" Quantitative confidence: {quant_signal.get('confidence', 0):.2%}")
# Call LLM only if gate passed
if should_call_llm:
llm_context = engine.get_llm_context(format='full')
llm_signal = llm_maker.generate_decision(llm_context, analysis)
if llm_signal.get('enabled', True):
print_signal(llm_signal, "🤖 LLM Analysis")
else:
print("\n🤖 LLM Analysis: Disabled (no API key)")
print(" Set ANTHROPIC_API_KEY or OPENAI_API_KEY to enable")
else:
# LLM blocked by gate, use None (aggregator will use quant-only)
print("\n🤖 LLM Analysis: Skipped (gate blocked)")
# Step 4: Aggregate signals
print_section("4⃣ SIGNAL AGGREGATION")
aggregated = SignalAggregator.aggregate_signals(quant_signal, llm_signal)
print_aggregated_signal(aggregated)
# Step 5: Export to JSON
output_file = Path(__file__).parent.parent / 'output' / 'latest_signal.json'
output_file.parent.mkdir(exist_ok=True)
output_data = {
'aggregated_signal': aggregated,
'market_analysis': {
'price': analysis['current_price'],
'trend': analysis['trend_analysis'],
'momentum': analysis['momentum'],
},
'quantitative_signal': quant_signal,
'llm_signal': llm_signal if llm_signal and llm_signal.get('enabled', True) else None,
}
with open(output_file, 'w') as f:
json.dump(output_data, f, indent=2, ensure_ascii=False)
print(f"\n💾 Signal saved to: {output_file}")
# Send DingTalk notification if enabled
if dingtalk:
print(f"\n📱 Sending DingTalk notification...")
success = dingtalk.send_signal(aggregated)
if success:
print(f"✅ DingTalk notification sent successfully")
else:
print(f"❌ Failed to send DingTalk notification")
print_section("✅ SIGNAL GENERATION COMPLETE", 80)
if __name__ == "__main__":
main()

50
scripts/monitor.sh Executable file
View File

@ -0,0 +1,50 @@
#!/bin/bash
# Real-time monitoring script for ingestion system
echo "=================================================="
echo " Binance Data Ingestion - Real-time Monitor"
echo "=================================================="
echo ""
# Check if Docker is running
if ! docker info > /dev/null 2>&1; then
echo "❌ Docker is not running"
exit 1
fi
# Detect Docker Compose command
if command -v docker-compose &> /dev/null; then
DOCKER_COMPOSE="docker-compose"
else
DOCKER_COMPOSE="docker compose"
fi
echo "📊 Container Status:"
$DOCKER_COMPOSE ps
echo ""
echo "🔍 Stream Statistics:"
docker exec tradus-redis redis-cli << EOF
KEYS binance:raw:*
XLEN binance:raw:kline:5m
XLEN binance:raw:depth:20
XLEN binance:raw:trade
EOF
echo ""
echo "💾 Redis Memory Usage:"
docker exec tradus-redis redis-cli INFO memory | grep used_memory_human
echo ""
echo "📈 Latest K-line Data (last 3):"
docker exec tradus-redis redis-cli XREVRANGE binance:raw:kline:5m + - COUNT 3
echo ""
echo "📋 Application Logs (last 20 lines):"
$DOCKER_COMPOSE logs --tail=20 ingestion
echo ""
echo "=================================================="
echo " Monitoring complete"
echo "=================================================="

128
scripts/run_analysis.py Executable file
View File

@ -0,0 +1,128 @@
#!/usr/bin/env python3
"""
Run market analysis and display results
"""
import sys
import json
import logging
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from analysis.engine import MarketAnalysisEngine
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
def print_section(title: str, content: dict, indent: int = 0):
"""Pretty print a section"""
indent_str = " " * indent
print(f"\n{indent_str}{'=' * 60}")
print(f"{indent_str}{title}")
print(f"{indent_str}{'=' * 60}")
for key, value in content.items():
if isinstance(value, dict):
print(f"{indent_str}{key}:")
for k, v in value.items():
print(f"{indent_str} {k}: {v}")
elif isinstance(value, list):
print(f"{indent_str}{key}: {', '.join(map(str, value)) if value else '[]'}")
else:
print(f"{indent_str}{key}: {value}")
def main():
print("🚀 Starting Market Analysis Engine...")
engine = MarketAnalysisEngine()
# Check data availability
print("\n📊 Checking data availability...")
status = engine.check_data_availability()
print(json.dumps(status, indent=2, ensure_ascii=False))
# Perform analysis
print("\n🔍 Performing market analysis...")
analysis = engine.analyze_current_market(timeframe='5m')
if 'error' in analysis:
print(f"❌ Error: {analysis['error']}")
return
# Display results
print_section("📈 MARKET OVERVIEW", {
'Symbol': analysis['symbol'],
'Timeframe': analysis['timeframe'],
'Current Price': f"${analysis['current_price']:,.2f}",
'Timestamp': analysis['timestamp'],
})
print_section("📊 TREND ANALYSIS", analysis['trend_analysis'])
print_section("💰 SUPPORT & RESISTANCE", {
'Nearest Support': analysis['support_resistance'].get('nearest_support'),
'Nearest Resistance': analysis['support_resistance'].get('nearest_resistance'),
'All Support': analysis['support_resistance'].get('support', []),
'All Resistance': analysis['support_resistance'].get('resistance', []),
})
print_section("⚡ MOMENTUM", analysis['momentum'])
print_section("📉 KEY INDICATORS", {
'RSI': analysis['indicators']['rsi'],
'MACD': analysis['indicators']['macd'],
'MACD Histogram': analysis['indicators']['macd_hist'],
'ADX': analysis['indicators']['adx'],
'ATR': analysis['indicators']['atr'],
'BB Width': analysis['indicators']['bb_width'],
'Volume Ratio': analysis['indicators']['volume_ratio'],
})
# Order flow if available
if 'orderflow' in analysis:
print_section("💸 ORDER FLOW", {
'Imbalance': f"{analysis['orderflow']['imbalance']['imbalance_pct']}%",
'Status': analysis['orderflow']['imbalance']['status'],
'Pressure': analysis['orderflow']['imbalance']['pressure'],
'Strength': analysis['orderflow']['strength']['strength'],
'Large Bids': analysis['orderflow']['large_orders']['large_bids_count'],
'Large Asks': analysis['orderflow']['large_orders']['large_asks_count'],
})
print_section("🏦 LIQUIDITY", {
'Spread': f"{analysis['orderflow']['liquidity']['spread_pct']:.4f}%",
'Best Bid': f"${analysis['orderflow']['liquidity']['best_bid']:,.2f}",
'Best Ask': f"${analysis['orderflow']['liquidity']['best_ask']:,.2f}",
})
# Breakout detection
if analysis['breakout'].get('has_breakout'):
print_section("🚨 BREAKOUT DETECTED", analysis['breakout'])
elif 'approaching' in analysis['breakout']:
print_section("⚠️ APPROACHING KEY LEVEL", analysis['breakout'])
# Multi-timeframe
print("\n\n🕐 MULTI-TIMEFRAME ANALYSIS")
print("=" * 60)
mtf = engine.get_multi_timeframe_analysis()
for tf, data in mtf.items():
print(f"\n{tf:>4}: {data['trend']:^6} | Strength: {data['strength']:^8} | RSI: {data['rsi']:>5.1f} | ADX: {data['adx']:>5.1f}")
# LLM Context
print("\n\n🤖 LLM CONTEXT (Simplified)")
print("=" * 60)
llm_context = engine.get_llm_context(format='simplified')
print(json.dumps(llm_context, indent=2, ensure_ascii=False))
print("\n✅ Analysis complete!")
if __name__ == "__main__":
main()

95
scripts/test_redis_read.py Executable file
View File

@ -0,0 +1,95 @@
#!/usr/bin/env python3
"""
Test script to read data from Redis Streams
"""
import redis
import orjson
import sys
def test_read_streams():
"""Read and display data from all Redis Streams"""
# Connect to Redis
try:
r = redis.Redis(host='localhost', port=6379, db=0, decode_responses=False)
r.ping()
print("✓ Connected to Redis\n")
except Exception as e:
print(f"✗ Failed to connect to Redis: {e}")
sys.exit(1)
# Stream keys to read
streams = [
'binance:raw:kline:5m',
'binance:raw:depth:20',
'binance:raw:trade',
]
print("=" * 80)
print("Reading data from Redis Streams")
print("=" * 80)
for stream_key in streams:
print(f"\n📊 Stream: {stream_key}")
print("-" * 80)
try:
# Get stream length
length = r.xlen(stream_key)
print(f"Stream length: {length}")
if length == 0:
print("No data available yet\n")
continue
# Read last 3 messages
messages = r.xrevrange(stream_key, count=3)
for i, (msg_id, fields) in enumerate(messages, 1):
print(f"\n[Message {i}]")
print(f"ID: {msg_id.decode()}")
# Parse JSON data
data = orjson.loads(fields[b'data'])
# Display based on stream type
if 'kline' in stream_key:
kline = data.get('k', {})
print(f"Symbol: {data.get('s')}")
print(f"Open: {kline.get('o')}")
print(f"High: {kline.get('h')}")
print(f"Low: {kline.get('l')}")
print(f"Close: {kline.get('c')}")
print(f"Volume: {kline.get('v')}")
print(f"Closed: {kline.get('x')}")
elif 'depth' in stream_key:
print(f"Symbol: {data.get('s')}")
print(f"Event time: {data.get('E')}")
print(f"First update ID: {data.get('U')}")
print(f"Last update ID: {data.get('u')}")
bids = data.get('b', [])[:3]
asks = data.get('a', [])[:3]
print(f"Top 3 bids: {bids}")
print(f"Top 3 asks: {asks}")
elif 'trade' in stream_key:
print(f"Symbol: {data.get('s')}")
print(f"Price: {data.get('p')}")
print(f"Quantity: {data.get('q')}")
print(f"Time: {data.get('T')}")
print(f"Buyer is maker: {data.get('m')}")
print(f"Received at: {data.get('_received_at')}")
except Exception as e:
print(f"Error reading stream {stream_key}: {e}")
print("\n" + "=" * 80)
print("✓ Test completed")
print("=" * 80)
if __name__ == "__main__":
test_read_streams()

13
signals/__init__.py Normal file
View File

@ -0,0 +1,13 @@
"""
Trading Signal Generation Module
"""
from .quantitative import QuantitativeSignalGenerator
from .llm_decision import LLMDecisionMaker
from .aggregator import SignalAggregator
__all__ = [
'QuantitativeSignalGenerator',
'LLMDecisionMaker',
'SignalAggregator',
]

347
signals/aggregator.py Normal file
View File

@ -0,0 +1,347 @@
"""
Signal Aggregator - Combine quantitative and LLM signals
"""
import logging
from typing import Dict, Any, List, Optional
from datetime import datetime
logger = logging.getLogger(__name__)
class SignalAggregator:
"""Aggregate and compare signals from multiple sources"""
@staticmethod
def aggregate_signals(
quant_signal: Dict[str, Any],
llm_signal: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Aggregate quantitative and LLM signals
Args:
quant_signal: Signal from QuantitativeSignalGenerator
llm_signal: Optional signal from LLMDecisionMaker
Returns:
Aggregated signal with consensus analysis
"""
# If no LLM signal, return quant signal only
if not llm_signal or not llm_signal.get('enabled', True):
return {
'timestamp': datetime.now().isoformat(),
'final_signal': quant_signal['signal_type'],
'final_confidence': quant_signal.get('confidence', 0.5),
'quantitative_signal': quant_signal,
'llm_signal': None,
'consensus': 'QUANT_ONLY',
'agreement_score': 1.0,
'levels': quant_signal.get('levels', {}),
'risk_reward_ratio': quant_signal.get('risk_reward_ratio', 0),
'recommendation': SignalAggregator._generate_recommendation(
quant_signal, None
),
'warnings': SignalAggregator._check_warnings(quant_signal, None, 1.0),
}
# Extract signals
quant_type = quant_signal.get('signal_type', 'HOLD')
llm_type = llm_signal.get('signal_type', 'HOLD')
quant_confidence = quant_signal.get('confidence', 0.5)
llm_confidence = llm_signal.get('confidence', 0.5)
# Calculate agreement
agreement = SignalAggregator._calculate_agreement(
quant_type, llm_type, quant_confidence, llm_confidence
)
# Determine final signal
final_signal, final_confidence, consensus_type = SignalAggregator._determine_final_signal(
quant_type, llm_type, quant_confidence, llm_confidence, agreement
)
# Combine levels
final_levels = SignalAggregator._combine_levels(quant_signal, llm_signal)
# Build aggregated signal
aggregated = {
'timestamp': datetime.now().isoformat(),
'final_signal': final_signal,
'final_confidence': round(final_confidence, 2),
'consensus': consensus_type,
'agreement_score': round(agreement, 2),
'quantitative_signal': {
'signal_type': quant_type,
'signal': quant_type, # Keep both for compatibility
'confidence': quant_confidence,
'composite_score': quant_signal.get('composite_score', 0),
'scores': quant_signal.get('scores', {}),
},
'llm_signal': {
'signal_type': llm_type,
'signal': llm_type, # Keep both for compatibility
'confidence': llm_confidence,
'reasoning': llm_signal.get('reasoning', ''),
'key_factors': llm_signal.get('key_factors', []),
# Multi-timeframe analysis fields
'opportunities': llm_signal.get('opportunities', {}),
'recommendations_by_timeframe': llm_signal.get('recommendations_by_timeframe', {}),
'trade_type': llm_signal.get('trade_type', ''),
'risk_level': llm_signal.get('risk_level', 'MEDIUM'),
},
'levels': final_levels,
'risk_reward_ratio': SignalAggregator._calculate_rr_ratio(final_levels),
'recommendation': SignalAggregator._generate_recommendation(
quant_signal, llm_signal
),
'warnings': SignalAggregator._check_warnings(
quant_signal, llm_signal, agreement
),
}
logger.info(
f"Aggregated signal: {final_signal} (confidence: {final_confidence:.2f}, "
f"consensus: {consensus_type}, agreement: {agreement:.2f})"
)
return aggregated
@staticmethod
def _calculate_agreement(
quant_signal: str,
llm_signal: str,
quant_confidence: float,
llm_confidence: float
) -> float:
"""
Calculate agreement score between signals (0-1)
1.0 = Perfect agreement
0.5 = Neutral (one HOLD)
0.0 = Complete disagreement
"""
# Signal direction agreement
if quant_signal == llm_signal:
direction_agreement = 1.0
elif quant_signal == 'HOLD' or llm_signal == 'HOLD':
direction_agreement = 0.5 # Neutral
else:
direction_agreement = 0.0 # Opposite signals
# Confidence alignment (higher when both are confident)
avg_confidence = (quant_confidence + llm_confidence) / 2
# Combined agreement
agreement = direction_agreement * avg_confidence
return agreement
@staticmethod
def _determine_final_signal(
quant_signal: str,
llm_signal: str,
quant_confidence: float,
llm_confidence: float,
agreement: float
) -> tuple:
"""
Determine final signal from two sources
Returns:
(final_signal, final_confidence, consensus_type)
"""
# Perfect agreement
if quant_signal == llm_signal:
if quant_signal != 'HOLD':
return (
quant_signal,
(quant_confidence + llm_confidence) / 2,
'STRONG_CONSENSUS'
)
else:
return (
'HOLD',
(quant_confidence + llm_confidence) / 2,
'CONSENSUS_HOLD'
)
# One is HOLD
if quant_signal == 'HOLD':
return (
llm_signal,
llm_confidence * 0.7, # Reduce confidence
'LLM_LEADING'
)
elif llm_signal == 'HOLD':
return (
quant_signal,
quant_confidence * 0.7,
'QUANT_LEADING'
)
# Conflicting signals (BUY vs SELL)
else:
# Use higher confidence signal
if quant_confidence > llm_confidence:
return (
quant_signal,
quant_confidence * 0.5, # Significantly reduce confidence
'CONFLICT_QUANT_WINS'
)
elif llm_confidence > quant_confidence:
return (
llm_signal,
llm_confidence * 0.5,
'CONFLICT_LLM_WINS'
)
else:
# Equal confidence - default to HOLD
return (
'HOLD',
0.3,
'CONFLICT_HOLD'
)
@staticmethod
def _combine_levels(
quant_signal: Dict[str, Any],
llm_signal: Dict[str, Any]
) -> Dict[str, Any]:
"""Combine price levels from both signals"""
quant_levels = quant_signal.get('levels', {})
llm_levels = llm_signal.get('levels', {})
# Average the levels if both exist
combined = {}
for key in ['current_price', 'entry', 'stop_loss', 'take_profit_1', 'take_profit_2', 'take_profit_3']:
quant_val = quant_levels.get(key, 0)
llm_val = llm_levels.get(key, 0)
if quant_val > 0 and llm_val > 0:
# Average both
combined[key] = round((quant_val + llm_val) / 2, 2)
elif quant_val > 0:
combined[key] = quant_val
elif llm_val > 0:
combined[key] = llm_val
else:
combined[key] = 0
# Add range if values differ significantly
for key in ['entry', 'stop_loss', 'take_profit_1']:
quant_val = quant_levels.get(key, 0)
llm_val = llm_levels.get(key, 0)
if quant_val > 0 and llm_val > 0:
diff_pct = abs(quant_val - llm_val) / ((quant_val + llm_val) / 2) * 100
if diff_pct > 1: # More than 1% difference
combined[f'{key}_range'] = {
'quant': quant_val,
'llm': llm_val,
'diff_pct': round(diff_pct, 2),
}
return combined
@staticmethod
def _calculate_rr_ratio(levels: Dict[str, Any]) -> float:
"""Calculate risk-reward ratio from levels"""
entry = levels.get('entry', 0)
stop_loss = levels.get('stop_loss', 0)
take_profit = levels.get('take_profit_1', 0)
if entry == 0 or stop_loss == 0 or take_profit == 0:
return 0
risk = abs(entry - stop_loss)
reward = abs(take_profit - entry)
if risk == 0:
return 0
return round(reward / risk, 2)
@staticmethod
def _generate_recommendation(
quant_signal: Dict[str, Any],
llm_signal: Optional[Dict[str, Any]]
) -> str:
"""Generate human-readable recommendation"""
quant_type = quant_signal.get('signal_type', 'HOLD')
if not llm_signal:
quant_conf = quant_signal.get('confidence', 0)
quant_reasoning = quant_signal.get('reasoning', '')
if quant_type == 'BUY' and quant_conf > 0.7:
return f"强烈建议做多: {quant_reasoning}"
elif quant_type == 'BUY':
return f"考虑做多: {quant_reasoning}"
elif quant_type == 'SELL' and quant_conf > 0.7:
return f"强烈建议做空: {quant_reasoning}"
elif quant_type == 'SELL':
return f"考虑做空: {quant_reasoning}"
else:
return f"观望: {quant_reasoning}"
llm_type = llm_signal.get('signal_type', 'HOLD')
if quant_type == llm_type and quant_type != 'HOLD':
return f"量化和AI分析一致建议{quant_type}: 高置信度交易机会"
elif quant_type == llm_type and quant_type == 'HOLD':
return "量化和AI分析均建议观望,等待更好的机会"
elif quant_type == 'HOLD':
return f"AI建议{llm_type},但量化信号不强,建议谨慎"
elif llm_type == 'HOLD':
return f"量化建议{quant_type},但AI建议观望,建议谨慎"
else:
return f"⚠️ 信号冲突: 量化建议{quant_type},AI建议{llm_type},强烈建议观望"
@staticmethod
def _check_warnings(
quant_signal: Dict[str, Any],
llm_signal: Optional[Dict[str, Any]],
agreement: float
) -> List[str]:
"""Check for warnings and risk factors"""
warnings = []
# Low agreement warning
if llm_signal and agreement < 0.3:
warnings.append("⚠️ 量化和AI信号严重分歧,建议观望")
# Low confidence warning
quant_conf = quant_signal.get('confidence', 0)
if quant_conf < 0.4:
warnings.append("⚠️ 量化信号置信度较低")
if llm_signal:
llm_conf = llm_signal.get('confidence', 0)
if llm_conf < 0.4:
warnings.append("⚠️ AI信号置信度较低")
# Risk-reward warning
quant_levels = quant_signal.get('levels', {})
rr = quant_signal.get('risk_reward_ratio', 0)
if rr > 0 and rr < 1.5:
warnings.append(f"⚠️ 风险回报比偏低 ({rr}:1), 建议至少1.5:1")
# Large level discrepancy
if llm_signal:
quant_levels = quant_signal.get('levels', {})
llm_levels = llm_signal.get('levels', {})
for key in ['entry', 'stop_loss']:
quant_val = quant_levels.get(key, 0)
llm_val = llm_levels.get(key, 0)
if quant_val > 0 and llm_val > 0:
diff_pct = abs(quant_val - llm_val) / ((quant_val + llm_val) / 2) * 100
if diff_pct > 2:
warnings.append(
f"⚠️ {key}建议差异较大: 量化${quant_val:.2f} vs AI${llm_val:.2f} ({diff_pct:.1f}%)"
)
return warnings

647
signals/llm_decision.py Normal file
View File

@ -0,0 +1,647 @@
"""
LLM Decision Maker - Use Claude/GPT for trading decisions
"""
import logging
import json
import re
from typing import Dict, Any, Optional
from datetime import datetime
import os
logger = logging.getLogger(__name__)
class LLMDecisionMaker:
"""Generate trading decisions using LLM (Claude or OpenAI)"""
def __init__(self, provider: str = 'claude', api_key: Optional[str] = None):
"""
Initialize LLM decision maker
Args:
provider: 'claude' or 'openai'
api_key: API key (or use environment variable)
"""
self.provider = provider.lower()
self.api_key = api_key or self._get_api_key()
if not self.api_key:
logger.warning(f"No API key found for {provider}. LLM decisions will be disabled.")
self.enabled = False
else:
self.enabled = True
self._init_client()
def _get_api_key(self) -> Optional[str]:
"""Get API key from environment"""
if self.provider == 'claude':
return os.getenv('ANTHROPIC_API_KEY')
elif self.provider == 'openai':
return os.getenv('OPENAI_API_KEY')
return None
def _init_client(self):
"""Initialize LLM client"""
try:
if self.provider == 'claude':
import anthropic
self.client = anthropic.Anthropic(api_key=self.api_key)
self.model = "claude-3-5-sonnet-20241022"
elif self.provider == 'openai':
import openai
# Support custom base URL (for Deepseek, etc.)
base_url = os.getenv('OPENAI_BASE_URL')
if base_url:
self.client = openai.OpenAI(
api_key=self.api_key,
base_url=base_url
)
# Use appropriate model for the endpoint
if 'deepseek' in base_url.lower():
self.model = "deepseek-chat"
logger.info("Using Deepseek API endpoint")
else:
self.model = "gpt-4-turbo-preview"
else:
self.client = openai.OpenAI(api_key=self.api_key)
self.model = "gpt-4-turbo-preview"
logger.info(f"Initialized {self.provider} client with model {self.model}")
except ImportError as e:
logger.error(f"Failed to import {self.provider} library: {e}")
self.enabled = False
except Exception as e:
logger.error(f"Failed to initialize {self.provider} client: {e}")
self.enabled = False
def generate_decision(
self,
market_context: Dict[str, Any],
analysis: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Generate trading decision using LLM
Args:
market_context: LLM context from LLMContextBuilder
analysis: Optional full analysis for additional details
Returns:
Decision dict with signal, reasoning, and levels
"""
if not self.enabled:
return self._disabled_response()
try:
# Build prompt
prompt = self._build_prompt(market_context, analysis)
# Log the complete prompt being sent to LLM
logger.info("=" * 80)
logger.info("📤 完整的 LLM 提示词 (发送给 Deepseek):")
logger.info("=" * 80)
logger.info(prompt)
logger.info("=" * 80)
# Call LLM
response_text = self._call_llm(prompt)
# Log the LLM response
logger.info("=" * 80)
logger.info("📥 LLM 原始响应:")
logger.info("=" * 80)
logger.info(response_text)
logger.info("=" * 80)
# Parse response
decision = self._parse_response(response_text, market_context)
logger.info(
f"LLM decision: {decision['signal_type']} "
f"(confidence: {decision.get('confidence', 0):.2f})"
)
return decision
except Exception as e:
logger.error(f"Error generating LLM decision: {e}", exc_info=True)
logger.debug(f"Market context: {market_context}")
return self._error_response(str(e))
def _build_prompt(
self,
market_context: Dict[str, Any],
analysis: Optional[Dict[str, Any]]
) -> str:
"""Build trading decision prompt"""
# Extract context elements
market_state = market_context.get('market_state', {})
key_prices = market_context.get('key_prices', {})
momentum = market_context.get('momentum', {})
signal_consensus = market_context.get('signal_consensus', 0.5)
current_price = market_context.get('current_price', 0)
# Build structured prompt
prompt = f"""你是一个专业的加密货币交易分析师。基于以下多时间周期市场分析数据,提供分层次的交易建议。
## 当前价格
${current_price:,.2f}
## 请提供以下内容 (使用JSON格式):
{{
"signal": "BUY" | "SELL" | "HOLD",
"confidence": 0.0-1.0,
// 分时间级别的交易机会分析
"opportunities": {{
"short_term_5m_15m_1h": {{
"exists": true/false,
"timeframe_label": "短期 (5m/15m/1h)",
"direction": "LONG" | "SHORT" | null,
"entry_price": 进场价格数值或null,
"stop_loss": 止损价格数值或null,
"take_profit": 止盈价格数值或null,
"reasoning": "短期日内机会说明",
"持仓时间": "几分钟到几小时"
}},
"medium_term_4h_1d": {{
"exists": true/false,
"timeframe_label": "中期 (4h/1d)",
"direction": "LONG" | "SHORT" | null,
"entry_price": 进场价格数值或null,
"stop_loss": 止损价格数值或null,
"take_profit": 止盈价格数值或null,
"reasoning": "中期波段机会说明",
"持仓时间": "数天到一周"
}},
"long_term_1d_1w": {{
"exists": true/false,
"timeframe_label": "长期 (1d/1w)",
"direction": "LONG" | "SHORT" | null,
"entry_price": 进场价格数值或null,
"stop_loss": 止损价格数值或null,
"take_profit": 止盈价格数值或null,
"reasoning": "长期趋势机会说明",
"持仓时间": "数周到数月"
}},
"ambush": {{
"exists": true/false,
"price_level": 埋伏价格数值或null,
"reasoning": "埋伏点位说明 (等待回调/反弹到关键位)",
"timeframe": "基于哪个时间级别的关键位"
}}
}},
// 分级别操作建议必填即使某级别无机会也要说明原因
"recommendations_by_timeframe": {{
"short_term": "短期(5m/15m/1h)操作建议",
"medium_term": "中期(4h/1d)操作建议",
"long_term": "长期(1d/1w)操作建议"
}},
// 综合分析
"reasoning": "多周期综合分析 (3-5句话说明各周期是否一致)",
"risk_level": "LOW" | "MEDIUM" | "HIGH",
"key_factors": ["影响因素1", "影响因素2", ...]
}}
**输出说明**:
1. **signal**: 主要交易信号 (BUY/SELL/HOLD)
2. **confidence**: 对主要信号的信心度 (0-1)
3. **opportunities**: 分时间级别详细分析
- **short_term_5m_15m_1h**: 短期日内交易机会 (持仓几分钟到几小时)
- 基于5m/15m/1h周期共振
- 止损: 5m/15m ATR × 1.5, 通常0.3%-0.5%
- 止盈: 1h压力/支撑位, 风险回报比1:2
- **medium_term_4h_1d**: 中期波段交易机会 (持仓数天到一周)
- 基于4h/1d周期趋势
- 止损: 4h ATR × 1.5, 通常1%-2%
- 止盈: 日线关键位, 风险回报比1:2.5
- **long_term_1d_1w**: 长期趋势交易机会 (持仓数周到数月)
- 基于1d/1w周期趋势
- 止损: 日线ATR × 1.5, 通常2%-4%
- 止盈: 周线关键位, 风险回报比1:3
- **ambush**: 埋伏点位机会
- 基于日线/周线关键支撑压力位
- 等待价格到达后再决定入场
4. **recommendations_by_timeframe**: 各级别操作建议必填
- 即使某级别无明确机会,也要说明原因和观望理由
5. **reasoning**: 多周期综合分析,说明各周期是否一致,存在哪些分歧
**重要原则**:
1. **平等对待所有时间级别** - 不要偏向任何周期,根据量化评分客观分析
2. **可以同时存在多级别机会** - 例如: 短期做多(日内) + 中期观望 + 长期做空(趋势)
3. **各级别独立分析** - 短期中期长期分别给出建议,不要混淆
4. **必须填写recommendations_by_timeframe** - 即使是HOLD也要说明理由
5. **止损止盈必须匹配时间级别** - 短期用小止损,长期用大止损
6. **响应必须是有效的JSON格式** - 不要包含注释
"""
# Add comprehensive multi-timeframe analysis if available
if 'multi_timeframe' in market_context:
mtf = market_context['multi_timeframe']
prompt += f"\n## 多时间框架技术分析 (完整指标)\n\n"
# Define timeframe order and display names
tf_order = [
('5m', '5分钟'),
('15m', '15分钟'),
('1h', '1小时'),
('4h', '4小时'),
('1d', '日线'),
('1w', '周线')
]
for tf_key, tf_name in tf_order:
if tf_key not in mtf:
continue
data = mtf[tf_key]
quant = data.get('quantitative', {})
prompt += f"### {tf_name}周期 ({tf_key})\n"
# ===== NEW: 量化评分优先展示 =====
prompt += f"**量化评分**: {quant.get('composite_score', 0):.1f} (信号: {quant.get('signal_type', 'HOLD')}, 置信度: {quant.get('confidence', 0):.0%})\n"
prompt += f"- 趋势得分: {quant.get('trend_score', 0):.1f} | 动量得分: {quant.get('momentum_score', 0):.1f} | 订单流: {quant.get('orderflow_score', 0):.1f}\n"
# 原有技术指标
prompt += f"- 趋势: {data.get('trend_direction', '未知')} (强度: {data.get('trend_strength', 'weak')})\n"
prompt += f"- RSI: {data.get('rsi', 50):.1f} ({data.get('rsi_status', '中性')})\n"
prompt += f"- MACD: {data.get('macd_signal', '未知')} (柱状图: {data.get('macd_hist', 0):.2f})\n"
# Support/Resistance
support = data.get('support')
resistance = data.get('resistance')
support_str = f"${support:,.0f}" if support else ""
resistance_str = f"${resistance:,.0f}" if resistance else ""
prompt += f"- 支撑位: {support_str} | 压力位: {resistance_str}\n"
# Volatility
atr = data.get('atr', 0)
atr_pct = data.get('atr_pct', 0)
prompt += f"- 波动率: ATR ${atr:.2f} ({atr_pct:.2f}%)\n"
# Volume
vol_ratio = data.get('volume_ratio', 1)
vol_status = "放量" if vol_ratio > 1.2 else "缩量" if vol_ratio < 0.8 else "正常"
prompt += f"- 成交量: {vol_status} (比率: {vol_ratio:.2f}x)\n"
prompt += "\n"
# Add cross-timeframe analysis insights
prompt += "### 多周期分析方法\n\n"
prompt += "#### 📊 分时间级别交易框架\n\n"
prompt += "**1⃣ 短期交易 (short_term_5m_15m_1h)** - 持仓: 几分钟到几小时\n\n"
prompt += "判断标准:\n"
prompt += "- ✅ **短周期共振**: 5m/15m/1h趋势方向一致\n"
prompt += "- ✅ **动量确认**: 5m MACD金叉/死叉 + 15m MACD同向\n"
prompt += "- ✅ **RSI信号**: 5m/15m RSI从超卖(<30)反弹或超买(>70)回落\n"
prompt += "- ✅ **价格位置**: 触及1h或4h支撑/压力位后反弹\n"
prompt += "- ⚠️ **大趋势**: 日线/周线至少不强烈相反\n"
prompt += "- ✅ **成交量**: 5m/15m放量确认突破/反转\n\n"
prompt += "入场条件:\n"
prompt += "- 做多: 5m/15m/1h上涨 + 5m金叉 + 价格>1h支撑 + 放量\n"
prompt += "- 做空: 5m/15m/1h下跌 + 5m死叉 + 价格<1h压力 + 放量\n\n"
prompt += "止盈止损:\n"
prompt += "- 止损: 5m ATR × 1.5 或15m最近低/高点, 约0.3%-0.5%\n"
prompt += "- 止盈: 1h压力/支撑位, 风险回报比≥1:2\n"
prompt += "- 策略: 快进快出, 达成50%目标后移动止损到成本\n\n"
prompt += "**2⃣ 中期交易 (medium_term_4h_1d)** - 持仓: 数天到一周\n\n"
prompt += "判断标准:\n"
prompt += "- ✅ **中周期趋势**: 4h/1d方向一致且趋势明显\n"
prompt += "- ✅ **量化评分**: 4h和1d的量化综合得分方向一致\n"
prompt += "- ✅ **MACD共振**: 日线金叉/死叉 + 周线趋势确认\n"
prompt += "- ✅ **关键位突破**: 突破或回踩日线/周线支撑压力位\n"
prompt += "- ✅ **RSI位置**: 日线RSI从超卖(<30)反转或超买(>70)回落\n"
prompt += "- ✅ **入场时机**: 4h/1h回调到位,提供更好入场点\n"
prompt += "- ✅ **成交量**: 日线放量突破确认趋势\n\n"
prompt += "入场条件:\n"
prompt += "- 做多: 日线+周线上涨 + 日线金叉 + 4h回调到日线支撑 + 1h反弹\n"
prompt += "- 做空: 日线+周线下跌 + 日线死叉 + 4h反弹到日线压力 + 1h回落\n\n"
prompt += "止盈止损:\n"
prompt += "- 止损: 4h ATR × 1.5, 约1%-2%\n"
prompt += "- 止盈: 日线关键位, 风险回报比≥1:2.5\n"
prompt += "- 策略: 波段持仓,关注日线趋势变化\n\n"
prompt += "**3⃣ 长期交易 (long_term_1d_1w)** - 持仓: 数周到数月\n\n"
prompt += "判断标准:\n"
prompt += "- ✅ **大周期趋势**: 1d/1w方向一致且强劲(strong/moderate)\n"
prompt += "- ✅ **量化评分**: 日线和周线的量化综合得分方向一致且分值高\n"
prompt += "- ✅ **周线MACD**: 周线金叉/死叉确认趋势\n"
prompt += "- ✅ **关键位突破**: 突破周线/月线级别支撑压力位\n"
prompt += "- ✅ **趋势确认**: 多个大周期指标共振,形成明确趋势\n\n"
prompt += "入场条件:\n"
prompt += "- 做多: 日线+周线上涨 + 周线金叉 + 日线回调到周线支撑 + 4h反弹\n"
prompt += "- 做空: 日线+周线下跌 + 周线死叉 + 日线反弹到周线压力 + 4h回落\n\n"
prompt += "止盈止损:\n"
prompt += "- 止损: 日线ATR × 1.5, 约2%-4%\n"
prompt += "- 止盈: 周线压力/支撑位, 风险回报比≥1:3\n"
prompt += "- 策略: 长期持仓,趋势不破不出,移动止损锁定利润\n\n"
prompt += "**4⃣ 埋伏点位 (ambush)** - 提前布局等待机会\n\n"
prompt += "适用场景:\n"
prompt += "- 📌 **当前位置不佳**: 价格处于中间位置,没有好的入场点\n"
prompt += "- 📌 **关键位等待**: 有明确的日线/周线支撑压力位可等待\n"
prompt += "- 📌 **趋势延续**: 大周期趋势明确,等待回调/反弹入场\n"
prompt += "- 📌 **反转布局**: 价格接近关键转折点,等待突破确认\n\n"
prompt += "埋伏位置示例:\n"
prompt += "- 做多埋伏: 等待回调到周线/日线支撑位 (例: 价格90500,埋伏88900)\n"
prompt += "- 做空埋伏: 等待反弹到周线/日线压力位 (例: 价格90500,埋伏93000)\n"
prompt += "- 突破埋伏: 等待突破关键位后回踩 (例: 突破91000后回踩90800)\n\n"
prompt += "埋伏策略:\n"
prompt += "- 基于: 日线/周线的关键支撑压力位\n"
prompt += "- 触发: 价格到达埋伏位 + 短周期(1h/4h)出现反转信号\n"
prompt += "- 止损: 埋伏位下方/上方1-2个ATR\n"
prompt += "- 止盈: 下一个日线/周线关键位\n\n"
prompt += "**5⃣ 观望情况** - recommendations_by_timeframe中标注\n"
prompt += "- ❌ 某周期趋势不明确或震荡\n"
prompt += "- ❌ 量化评分接近0 (无明确方向)\n"
prompt += "- ❌ 多个周期趋势严重分歧\n"
prompt += "- ❌ 成交量萎缩,市场缺乏动能\n"
prompt += "- ❌ 价格在关键位之间震荡\n\n"
prompt += "#### 🎯 关键分析要点\n"
prompt += "1. **平等对待各周期** - 周线、日线、小时级别都重要,根据持仓时间选择\n"
prompt += "2. **利用量化评分** - 每个周期都有量化综合得分,优先参考这个数值\n"
prompt += "3. **分级别独立分析** - 短期、中期、长期可以有不同甚至相反的建议\n"
prompt += "4. **趋势共振**: 同级别内多周期一致时,信号最强\n"
prompt += "5. **分歧利用**: 短期看多+长期看空 = 日内做多但不持仓过夜\n"
prompt += "6. **必须填写所有级别建议** - recommendations_by_timeframe三个字段都要填\n\n"
return prompt
def _call_llm(self, prompt: str) -> str:
"""Call LLM API"""
if self.provider == 'claude':
return self._call_claude(prompt)
elif self.provider == 'openai':
return self._call_openai(prompt)
else:
raise ValueError(f"Unsupported provider: {self.provider}")
def _call_claude(self, prompt: str) -> str:
"""Call Claude API"""
try:
response = self.client.messages.create(
model=self.model,
max_tokens=1500,
temperature=0.7,
messages=[
{"role": "user", "content": prompt}
]
)
return response.content[0].text
except Exception as e:
logger.error(f"Claude API error: {e}")
raise
def _call_openai(self, prompt: str) -> str:
"""Call OpenAI API"""
try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{
"role": "system",
"content": "You are a professional cryptocurrency trading analyst. Provide trading advice in JSON format."
},
{"role": "user", "content": prompt}
],
max_tokens=1500,
temperature=0.7,
)
return response.choices[0].message.content
except Exception as e:
logger.error(f"OpenAI API error: {e}")
raise
def _parse_response(
self,
response_text: str,
market_context: Dict[str, Any]
) -> Dict[str, Any]:
"""Parse LLM response into structured decision"""
# Try to extract JSON from response
json_match = re.search(r'\{[\s\S]*\}', response_text)
if not json_match:
logger.warning("No JSON found in LLM response, using fallback parsing")
return self._fallback_parse(response_text, market_context)
try:
llm_decision = json.loads(json_match.group())
logger.debug(f"Parsed LLM JSON: {llm_decision}")
except json.JSONDecodeError as e:
logger.warning(f"Failed to parse JSON: {e}, using fallback")
logger.debug(f"JSON match was: {json_match.group()[:500]}")
return self._fallback_parse(response_text, market_context)
# Helper function to safely convert to float
def safe_float(value, default=0.0):
"""Safely convert value to float, handling None and invalid values"""
if value is None:
return default
try:
return float(value)
except (ValueError, TypeError):
return default
# Parse opportunities structure (support both old and new format)
opportunities = llm_decision.get('opportunities', {})
# Try new format first
short_term = opportunities.get('short_term_5m_15m_1h', {})
medium_term = opportunities.get('medium_term_4h_1d', {})
long_term = opportunities.get('long_term_1d_1w', {})
ambush = opportunities.get('ambush', {})
# Fallback to old format for backward compatibility
if not short_term and not medium_term and not long_term:
intraday = opportunities.get('intraday', {})
swing = opportunities.get('swing', {})
# Map old format to new format
short_term = intraday
medium_term = swing
long_term = {}
# Determine primary levels (priority: short > medium > long)
entry = market_context.get('current_price', 0)
stop_loss = 0
take_profit = 0
if short_term.get('exists'):
entry = safe_float(short_term.get('entry_price'), market_context.get('current_price', 0))
stop_loss = safe_float(short_term.get('stop_loss'), 0)
take_profit = safe_float(short_term.get('take_profit'), 0)
elif medium_term.get('exists'):
entry = safe_float(medium_term.get('entry_price'), market_context.get('current_price', 0))
stop_loss = safe_float(medium_term.get('stop_loss'), 0)
take_profit = safe_float(medium_term.get('take_profit'), 0)
elif long_term.get('exists'):
entry = safe_float(long_term.get('entry_price'), market_context.get('current_price', 0))
stop_loss = safe_float(long_term.get('stop_loss'), 0)
take_profit = safe_float(long_term.get('take_profit'), 0)
# Get recommendations by timeframe
recommendations = llm_decision.get('recommendations_by_timeframe', {})
# Validate and structure decision
decision = {
'timestamp': datetime.now().isoformat(),
'signal_type': llm_decision.get('signal', 'HOLD').upper(),
'confidence': safe_float(llm_decision.get('confidence'), 0.5),
'trade_type': 'MULTI_TIMEFRAME', # New format uses multiple timeframes
'reasoning': llm_decision.get('reasoning', ''),
# New opportunities breakdown (multi-timeframe)
'opportunities': {
'short_term_5m_15m_1h': {
'exists': short_term.get('exists', False),
'direction': short_term.get('direction'),
'entry_price': safe_float(short_term.get('entry_price'), 0),
'stop_loss': safe_float(short_term.get('stop_loss'), 0),
'take_profit': safe_float(short_term.get('take_profit'), 0),
'reasoning': short_term.get('reasoning', '')
},
'medium_term_4h_1d': {
'exists': medium_term.get('exists', False),
'direction': medium_term.get('direction'),
'entry_price': safe_float(medium_term.get('entry_price'), 0),
'stop_loss': safe_float(medium_term.get('stop_loss'), 0),
'take_profit': safe_float(medium_term.get('take_profit'), 0),
'reasoning': medium_term.get('reasoning', '')
},
'long_term_1d_1w': {
'exists': long_term.get('exists', False),
'direction': long_term.get('direction'),
'entry_price': safe_float(long_term.get('entry_price'), 0),
'stop_loss': safe_float(long_term.get('stop_loss'), 0),
'take_profit': safe_float(long_term.get('take_profit'), 0),
'reasoning': long_term.get('reasoning', '')
},
'ambush': {
'exists': ambush.get('exists', False),
'price_level': safe_float(ambush.get('price_level'), 0),
'reasoning': ambush.get('reasoning', '')
},
# Keep old format for backward compatibility
'intraday': {
'exists': short_term.get('exists', False),
'direction': short_term.get('direction'),
'entry_price': safe_float(short_term.get('entry_price'), 0),
'stop_loss': safe_float(short_term.get('stop_loss'), 0),
'take_profit': safe_float(short_term.get('take_profit'), 0),
'reasoning': short_term.get('reasoning', '')
},
'swing': {
'exists': medium_term.get('exists', False) or long_term.get('exists', False),
'direction': medium_term.get('direction') or long_term.get('direction'),
'entry_price': safe_float(medium_term.get('entry_price') or long_term.get('entry_price'), 0),
'stop_loss': safe_float(medium_term.get('stop_loss') or long_term.get('stop_loss'), 0),
'take_profit': safe_float(medium_term.get('take_profit') or long_term.get('take_profit'), 0),
'reasoning': medium_term.get('reasoning', '') or long_term.get('reasoning', '')
},
},
# Recommendations by timeframe
'recommendations_by_timeframe': {
'short_term': recommendations.get('short_term', ''),
'medium_term': recommendations.get('medium_term', ''),
'long_term': recommendations.get('long_term', '')
},
# Primary levels (for backward compatibility)
'levels': {
'current_price': market_context.get('current_price', 0),
'entry': entry,
'stop_loss': stop_loss,
'take_profit_1': take_profit,
'take_profit_2': take_profit,
'take_profit_3': take_profit,
},
'risk_level': llm_decision.get('risk_level', 'MEDIUM'),
'key_factors': llm_decision.get('key_factors', []),
'raw_response': response_text,
}
# Calculate risk-reward ratio
entry = decision['levels']['entry']
stop_loss = decision['levels']['stop_loss']
tp1 = decision['levels']['take_profit_1']
if entry and stop_loss and tp1 and entry != stop_loss:
risk = abs(entry - stop_loss)
reward = abs(tp1 - entry)
decision['risk_reward_ratio'] = round(reward / risk, 2) if risk > 0 else 0
else:
decision['risk_reward_ratio'] = 0
return decision
def _fallback_parse(
self,
response_text: str,
market_context: Dict[str, Any]
) -> Dict[str, Any]:
"""Fallback parsing when JSON extraction fails"""
# Simple keyword-based signal extraction
text_lower = response_text.lower()
if 'buy' in text_lower or '买入' in response_text or '做多' in response_text:
signal_type = 'BUY'
confidence = 0.6
elif 'sell' in text_lower or '卖出' in response_text or '做空' in response_text:
signal_type = 'SELL'
confidence = 0.6
else:
signal_type = 'HOLD'
confidence = 0.5
return {
'timestamp': datetime.now().isoformat(),
'signal_type': signal_type,
'confidence': confidence,
'reasoning': response_text[:500], # First 500 chars
'levels': {
'current_price': market_context.get('current_price', 0),
'entry': 0,
'stop_loss': 0,
'take_profit_1': 0,
'take_profit_2': 0,
'take_profit_3': 0,
},
'risk_level': 'MEDIUM',
'time_horizon': 'MEDIUM',
'key_factors': [],
'raw_response': response_text,
'warning': 'Fallback parsing used - levels not available',
}
def _disabled_response(self) -> Dict[str, Any]:
"""Return response when LLM is disabled"""
return {
'timestamp': datetime.now().isoformat(),
'signal_type': 'HOLD',
'confidence': 0,
'reasoning': 'LLM decision maker is disabled (no API key)',
'enabled': False,
}
def _error_response(self, error_msg: str) -> Dict[str, Any]:
"""Return error response"""
return {
'timestamp': datetime.now().isoformat(),
'signal_type': 'HOLD',
'confidence': 0,
'reasoning': f'Error generating decision: {error_msg}',
'error': error_msg,
}

255
signals/llm_gate.py Normal file
View File

@ -0,0 +1,255 @@
"""
LLM Gate - 极简门控系统以频率控制为主
核心原则:
1. 频率限制 - 每天最多12次间隔15分钟核心控制
2. 数据基本可用 - 至少100根K线基础指标完整
3. 信号基本质量 - 综合得分15只过滤完全中性的信号
"""
import logging
import os
import json
from typing import Dict, Any, Tuple, Optional
from datetime import datetime, timedelta
from pathlib import Path
logger = logging.getLogger(__name__)
class LLMGate:
"""
极简 LLM 门控系统 - 以频率控制为主量化初筛为辅
设计原则:
- 频率限制是核心防止过度调用
- 量化分析做初步筛选过滤完全中性信号
- 尽可能让LLM有机会深度分析
"""
def __init__(
self,
# 数据要求
min_candles: int = 100, # 最少K线数量
# 信号质量(极简 - 只检查综合得分)
min_composite_score: float = 15.0, # 最小综合得分(过滤完全中性信号)
# 频率限制(核心控制!)
max_calls_per_day: int = 12, # 每天最多调用次数
min_call_interval_minutes: int = 15, # 最小调用间隔(分钟)
# 状态存储
state_file: str = '/app/data/llm_gate_state.json',
):
"""
初始化极简 LLM Gate
Args:
min_candles: 最少K线数量
min_composite_score: 最小综合得分唯一的质量检查
max_calls_per_day: 每天最多调用次数
min_call_interval_minutes: 最小调用间隔
state_file: 状态文件路径
"""
# 数据要求
self.min_candles = min_candles
# 信号质量(极简)
self.min_composite_score = min_composite_score
# 频率限制
self.max_calls_per_day = max_calls_per_day
self.min_call_interval_minutes = min_call_interval_minutes
# 状态管理
self.state_file = state_file
self.state = self._load_state()
logger.info(
f"🚦 LLM Gate 初始化 (极简模式): "
f"每天最多{max_calls_per_day}次, "
f"间隔≥{min_call_interval_minutes}分钟, "
f"综合得分≥{min_composite_score} (唯一质量检查)"
)
def should_call_llm(
self,
quant_signal: Dict[str, Any],
analysis: Dict[str, Any]
) -> Tuple[bool, str]:
"""
判断是否应该调用 LLM优化版简化检查主要靠频率限制
检查顺序 (快速失败原则):
1. 频率限制 (核心!)
2. 数据基本可用性
3. 信号基本质量 (量化初筛)
Returns:
(should_call, reason)
"""
# Check 1: 频率限制 (核心控制!)
freq_check, freq_reason = self._check_frequency_limit()
if not freq_check:
logger.info(f"🚫 LLM Gate: 频率限制 - {freq_reason}")
return False, freq_reason
# Check 2: 数据基本可用性(简化版)
data_check, data_reason = self._check_data_sufficiency(analysis)
if not data_check:
logger.info(f"🚫 LLM Gate: 数据不足 - {data_reason}")
return False, data_reason
# Check 3: 信号基本质量(量化初筛,门槛很低)
quality_check, quality_reason = self._check_signal_quality(quant_signal, analysis)
if not quality_check:
logger.info(f"🚫 LLM Gate: 信号质量不足 - {quality_reason}")
return False, quality_reason
# ✅ 所有检查通过 - 让 LLM 进行深度分析
logger.info(
f"✅ LLM Gate: PASSED! "
f"{quality_reason}, "
f"今日已调用{self.state['today_calls']}/{self.max_calls_per_day}"
)
# 记录本次调用
self._record_call()
return True, f"量化初筛通过: {quality_reason}"
def _check_frequency_limit(self) -> Tuple[bool, str]:
"""检查频率限制"""
now = datetime.now()
today_str = now.strftime('%Y-%m-%d')
# 重置每日计数
if self.state.get('last_date') != today_str:
self.state['last_date'] = today_str
self.state['today_calls'] = 0
self._save_state()
# Check 1: 每日调用次数
if self.state['today_calls'] >= self.max_calls_per_day:
return False, f"今日已调用{self.state['today_calls']}次,达到上限{self.max_calls_per_day}"
# Check 2: 调用间隔
last_call_time = self.state.get('last_call_time')
if last_call_time:
last_call = datetime.fromisoformat(last_call_time)
elapsed = (now - last_call).total_seconds() / 60 # 转为分钟
if elapsed < self.min_call_interval_minutes:
return False, f"距离上次调用仅{elapsed:.1f}分钟,需≥{self.min_call_interval_minutes}分钟"
return True, "频率检查通过"
def _check_data_sufficiency(self, analysis: Dict[str, Any]) -> Tuple[bool, str]:
"""检查数据充足性 (提高到200根K线)"""
metadata = analysis.get('metadata', {})
candle_count = metadata.get('candle_count', 0)
if candle_count < self.min_candles:
return False, f"K线数量不足: {candle_count}/{self.min_candles}"
# 确保所有必要的指标都已计算
required_keys = ['trend_analysis', 'momentum', 'support_resistance', 'orderflow']
for key in required_keys:
if key not in analysis:
return False, f"缺少关键指标: {key}"
return True, f"数据充足: {candle_count}根K线"
def _check_signal_quality(
self,
quant_signal: Dict[str, Any],
analysis: Dict[str, Any]
) -> Tuple[bool, str]:
"""
检查信号质量极简版 - 只检查综合得分
只要量化分析给出了明确信号不是完全中性就让LLM来深度分析
"""
# 唯一检查: 综合得分强度 (门槛非常低,只过滤完全中性的信号)
composite_score = abs(quant_signal.get('composite_score', 0))
if composite_score < self.min_composite_score:
return False, f"综合得分不足: {composite_score:.1f} < {self.min_composite_score}"
# ✅ 通过 - 其他所有检查都删除了
signal_type = quant_signal.get('signal_type', 'HOLD')
return True, f"信号类型: {signal_type}, 综合得分: {composite_score:.1f}"
def _load_state(self) -> Dict[str, Any]:
"""加载状态文件"""
# 确保目录存在
state_path = Path(self.state_file)
state_path.parent.mkdir(parents=True, exist_ok=True)
if state_path.exists():
try:
with open(self.state_file, 'r') as f:
return json.load(f)
except Exception as e:
logger.warning(f"加载状态文件失败: {e}")
# 默认状态
return {
'last_date': '',
'today_calls': 0,
'last_call_time': None,
'total_calls': 0,
}
def _save_state(self):
"""保存状态文件"""
try:
with open(self.state_file, 'w') as f:
json.dump(self.state, f, indent=2)
except Exception as e:
logger.error(f"保存状态文件失败: {e}")
def _record_call(self):
"""记录本次调用"""
now = datetime.now()
self.state['today_calls'] += 1
self.state['total_calls'] = self.state.get('total_calls', 0) + 1
self.state['last_call_time'] = now.isoformat()
self._save_state()
logger.info(
f"📝 记录LLM调用: 今日第{self.state['today_calls']}次, "
f"累计第{self.state['total_calls']}"
)
def get_stats(self) -> Dict[str, Any]:
"""获取统计信息"""
now = datetime.now()
today_str = now.strftime('%Y-%m-%d')
# 重置每日计数
if self.state.get('last_date') != today_str:
today_calls = 0
else:
today_calls = self.state['today_calls']
# 计算距离上次调用的时间
last_call_time = self.state.get('last_call_time')
if last_call_time:
last_call = datetime.fromisoformat(last_call_time)
minutes_since_last = (now - last_call).total_seconds() / 60
else:
minutes_since_last = None
return {
'today_calls': today_calls,
'max_calls_per_day': self.max_calls_per_day,
'remaining_calls_today': max(0, self.max_calls_per_day - today_calls),
'total_calls': self.state.get('total_calls', 0),
'last_call_time': last_call_time,
'minutes_since_last_call': minutes_since_last,
'can_call_now': minutes_since_last is None or minutes_since_last >= self.min_call_interval_minutes,
}

547
signals/quantitative.py Normal file
View File

@ -0,0 +1,547 @@
"""
Quantitative Signal Generator - Rule-based trading signals
"""
import logging
from typing import Dict, Any, List, Optional
from datetime import datetime
logger = logging.getLogger(__name__)
class QuantitativeSignalGenerator:
"""Generate trading signals based on quantitative analysis"""
@staticmethod
def generate_signal(analysis: Dict[str, Any]) -> Dict[str, Any]:
"""
Generate trading signal from market analysis
Args:
analysis: Complete market analysis from MarketAnalysisEngine
Returns:
Signal dict with direction, strength, entry/exit levels
"""
if 'error' in analysis:
return QuantitativeSignalGenerator._no_signal("Insufficient data")
# Extract components
trend = analysis.get('trend_analysis', {})
momentum = analysis.get('momentum', {})
sr_levels = analysis.get('support_resistance', {})
breakout = analysis.get('breakout', {})
orderflow = analysis.get('orderflow', {})
current_price = analysis.get('current_price', 0)
# Calculate individual signal scores
trend_score = QuantitativeSignalGenerator._calculate_trend_score(trend)
momentum_score = QuantitativeSignalGenerator._calculate_momentum_score(momentum)
orderflow_score = QuantitativeSignalGenerator._calculate_orderflow_score(orderflow)
breakout_score = QuantitativeSignalGenerator._calculate_breakout_score(breakout)
# Composite signal score (-100 to +100)
# Positive = bullish, Negative = bearish
composite_score = (
trend_score * 0.35 +
momentum_score * 0.25 +
orderflow_score * 0.25 +
breakout_score * 0.15
)
# Determine signal direction and strength
signal_type = QuantitativeSignalGenerator._determine_signal_type(composite_score)
signal_strength = abs(composite_score) / 100 # 0-1 scale
# Calculate entry/exit levels
entry_level = QuantitativeSignalGenerator._calculate_entry_level(
current_price, signal_type, sr_levels
)
stop_loss = QuantitativeSignalGenerator._calculate_stop_loss(
current_price, signal_type, analysis.get('indicators', {}), sr_levels
)
take_profit_levels = QuantitativeSignalGenerator._calculate_take_profit(
current_price, signal_type, sr_levels, stop_loss
)
# Calculate consensus score (0-1) - 多个指标的一致性程度
consensus_score = QuantitativeSignalGenerator._calculate_consensus_score(
trend_score, momentum_score, orderflow_score, breakout_score
)
# Build signal
signal = {
'timestamp': datetime.now().isoformat(),
'signal_type': signal_type, # 'BUY', 'SELL', 'HOLD'
'signal_strength': round(signal_strength, 2),
'composite_score': round(composite_score, 1),
'confidence': QuantitativeSignalGenerator._calculate_confidence(
trend, momentum, orderflow
),
'consensus_score': round(consensus_score, 2), # 共识得分 (关键指标)
'scores': {
'trend': round(trend_score, 1),
'momentum': round(momentum_score, 1),
'orderflow': round(orderflow_score, 1),
'breakout': round(breakout_score, 1),
},
'levels': {
'current_price': current_price,
'entry': entry_level,
'stop_loss': stop_loss,
'take_profit_1': take_profit_levels[0],
'take_profit_2': take_profit_levels[1],
'take_profit_3': take_profit_levels[2],
},
'risk_reward_ratio': QuantitativeSignalGenerator._calculate_rr_ratio(
entry_level, stop_loss, take_profit_levels[0]
),
'reasoning': QuantitativeSignalGenerator._generate_reasoning(
signal_type, trend, momentum, orderflow, breakout
),
}
logger.info(
f"Generated signal: {signal_type} (strength: {signal_strength:.2f}, "
f"composite: {composite_score:.1f})"
)
return signal
@staticmethod
def _calculate_trend_score(trend: Dict[str, Any]) -> float:
"""Calculate trend score (-100 to +100)"""
direction = trend.get('direction', '震荡')
strength = trend.get('strength', 'weak')
adx = trend.get('adx', 0)
ema_alignment = trend.get('ema_alignment', 'neutral')
score = 0
# Direction
if direction == '上涨':
score += 50
elif direction == '下跌':
score -= 50
# Strength multiplier
if strength == 'strong':
score *= 1.5
elif strength == 'moderate':
score *= 1.2
elif strength == 'weak':
score *= 0.7
# ADX confirmation
if adx > 25:
score *= 1.2
elif adx < 15:
score *= 0.6
# EMA alignment
if ema_alignment == 'bullish' and score > 0:
score *= 1.1
elif ema_alignment == 'bearish' and score < 0:
score *= 1.1
return max(-100, min(100, score))
@staticmethod
def _calculate_momentum_score(momentum: Dict[str, Any]) -> float:
"""Calculate momentum score (-100 to +100)"""
rsi = momentum.get('rsi', 50)
macd_signal = momentum.get('macd_signal', '')
rsi_trend = momentum.get('rsi_trend', '中性')
score = 0
# RSI score
if rsi > 70:
score -= 30 # Overbought - bearish
elif rsi > 60:
score += 20 # Strong but not overbought
elif rsi > 50:
score += 10
elif rsi > 40:
score -= 10
elif rsi > 30:
score -= 20
else:
score += 30 # Oversold - bullish
# MACD signal
if '金叉' in macd_signal:
if '扩大' in macd_signal:
score += 40
else:
score += 20
elif '死叉' in macd_signal:
if '扩大' in macd_signal:
score -= 40
else:
score -= 20
# RSI trend
if rsi_trend == '上升中':
score += 15
elif rsi_trend == '下降中':
score -= 15
return max(-100, min(100, score))
@staticmethod
def _calculate_orderflow_score(orderflow: Optional[Dict[str, Any]]) -> float:
"""Calculate order flow score (-100 to +100)"""
if not orderflow:
return 0
imbalance = orderflow.get('imbalance', {})
strength = orderflow.get('strength', {})
large_orders = orderflow.get('large_orders', {})
score = 0
# Imbalance
imbalance_pct = imbalance.get('imbalance_pct', 0)
score += imbalance_pct # -100 to +100
# Strength confirmation
strength_val = strength.get('strength_score', 0)
score = (score + strength_val) / 2 # Average
# Large order bias
large_bid_count = large_orders.get('large_bids_count', 0)
large_ask_count = large_orders.get('large_asks_count', 0)
if large_bid_count > large_ask_count * 1.5:
score += 15
elif large_ask_count > large_bid_count * 1.5:
score -= 15
return max(-100, min(100, score))
@staticmethod
def _calculate_breakout_score(breakout: Dict[str, Any]) -> float:
"""Calculate breakout score (-100 to +100)"""
if not breakout.get('has_breakout'):
return 0
breakout_type = breakout.get('type', '')
if breakout_type == 'resistance_breakout':
return 80 # Strong bullish
elif breakout_type == 'support_breakdown':
return -80 # Strong bearish
# Approaching key level (not yet broken)
if 'approaching' in breakout:
approaching = breakout.get('approaching')
if approaching == 'resistance':
return 20 # Cautiously bullish
elif approaching == 'support':
return -20 # Cautiously bearish
return 0
@staticmethod
def _determine_signal_type(composite_score: float) -> str:
"""Determine signal type from composite score"""
if composite_score > 40:
return 'BUY'
elif composite_score < -40:
return 'SELL'
else:
return 'HOLD'
@staticmethod
def _calculate_confidence(
trend: Dict[str, Any],
momentum: Dict[str, Any],
orderflow: Optional[Dict[str, Any]]
) -> float:
"""
Calculate signal confidence (0-1)
High confidence when multiple indicators align
"""
alignments = 0
total_checks = 0
# Trend-momentum alignment
trend_dir = trend.get('direction', '震荡')
rsi = momentum.get('rsi', 50)
total_checks += 1
if (trend_dir == '上涨' and 50 < rsi < 70) or \
(trend_dir == '下跌' and 30 < rsi < 50):
alignments += 1
# MACD-trend alignment
macd_signal = momentum.get('macd_signal', '')
total_checks += 1
if (trend_dir == '上涨' and '金叉' in macd_signal) or \
(trend_dir == '下跌' and '死叉' in macd_signal):
alignments += 1
# Order flow alignment (if available)
if orderflow:
imbalance = orderflow.get('imbalance', {})
pressure = imbalance.get('pressure', 'neutral')
total_checks += 1
if (trend_dir == '上涨' and 'buy' in pressure) or \
(trend_dir == '下跌' and 'sell' in pressure):
alignments += 1
# ADX strength confirmation
adx = trend.get('adx', 0)
total_checks += 1
if adx > 20:
alignments += 1
confidence = alignments / total_checks if total_checks > 0 else 0.5
return round(confidence, 2)
@staticmethod
def _calculate_entry_level(
current_price: float,
signal_type: str,
sr_levels: Dict[str, Any]
) -> float:
"""Calculate optimal entry level"""
if signal_type == 'BUY':
# Try to enter at support or current price
support = sr_levels.get('nearest_support')
if support and current_price - support < current_price * 0.005: # Within 0.5%
return support
return current_price
elif signal_type == 'SELL':
# Try to enter at resistance or current price
resistance = sr_levels.get('nearest_resistance')
if resistance and resistance - current_price < current_price * 0.005:
return resistance
return current_price
else: # HOLD
return current_price
@staticmethod
def _calculate_stop_loss(
current_price: float,
signal_type: str,
indicators: Dict[str, Any],
sr_levels: Dict[str, Any]
) -> float:
"""Calculate stop loss level using ATR and S/R"""
atr = indicators.get('atr', current_price * 0.01) # Default 1% ATR
if signal_type == 'BUY':
# Stop loss below support or 1.5 * ATR
support = sr_levels.get('nearest_support')
atr_stop = current_price - (atr * 1.5)
if support and support < current_price:
# Use the lower of support-buffer or ATR stop
support_stop = support * 0.998 # 0.2% below support
return min(support_stop, atr_stop)
return atr_stop
elif signal_type == 'SELL':
# Stop loss above resistance or 1.5 * ATR
resistance = sr_levels.get('nearest_resistance')
atr_stop = current_price + (atr * 1.5)
if resistance and resistance > current_price:
# Use the higher of resistance+buffer or ATR stop
resistance_stop = resistance * 1.002 # 0.2% above resistance
return max(resistance_stop, atr_stop)
return atr_stop
else: # HOLD
return current_price
@staticmethod
def _calculate_take_profit(
current_price: float,
signal_type: str,
sr_levels: Dict[str, Any],
stop_loss: float
) -> List[float]:
"""
Calculate 3 take profit levels
Returns:
[TP1, TP2, TP3]
"""
risk = abs(current_price - stop_loss)
if signal_type == 'BUY':
# Use resistance levels or risk-reward ratios
resistance = sr_levels.get('nearest_resistance')
tp1 = resistance if resistance and resistance > current_price else current_price + (risk * 1.5)
tp2 = current_price + (risk * 2.5)
tp3 = current_price + (risk * 4.0)
elif signal_type == 'SELL':
# Use support levels or risk-reward ratios
support = sr_levels.get('nearest_support')
tp1 = support if support and support < current_price else current_price - (risk * 1.5)
tp2 = current_price - (risk * 2.5)
tp3 = current_price - (risk * 4.0)
else: # HOLD
return [current_price, current_price, current_price]
return [round(tp, 2) for tp in [tp1, tp2, tp3]]
@staticmethod
def _calculate_rr_ratio(entry: float, stop_loss: float, take_profit: float) -> float:
"""Calculate risk-reward ratio"""
risk = abs(entry - stop_loss)
reward = abs(take_profit - entry)
if risk == 0:
return 0
return round(reward / risk, 2)
@staticmethod
def _generate_reasoning(
signal_type: str,
trend: Dict[str, Any],
momentum: Dict[str, Any],
orderflow: Optional[Dict[str, Any]],
breakout: Dict[str, Any]
) -> str:
"""Generate human-readable reasoning for the signal"""
reasons = []
# Trend
trend_dir = trend.get('direction', '震荡')
trend_strength = trend.get('strength', 'weak')
reasons.append(f"趋势{trend_dir} ({trend_strength})")
# Momentum
rsi = momentum.get('rsi', 50)
macd_signal = momentum.get('macd_signal', '')
reasons.append(f"RSI={rsi:.0f}")
if macd_signal:
reasons.append(f"MACD {macd_signal}")
# Order flow
if orderflow:
imbalance = orderflow.get('imbalance', {})
status = imbalance.get('status', '')
if status:
reasons.append(f"订单流: {status}")
# Breakout
if breakout.get('has_breakout'):
breakout_type = breakout.get('type', '')
if breakout_type == 'resistance_breakout':
reasons.append("突破压力位")
elif breakout_type == 'support_breakdown':
reasons.append("跌破支撑位")
return "; ".join(reasons)
@staticmethod
def _calculate_consensus_score(
trend_score: float,
momentum_score: float,
orderflow_score: float,
breakout_score: float
) -> float:
"""
计算多个指标的共识得分 (0-1)
共识得分反映各个指标方向的一致性:
- 1.0 = 所有指标完全一致 (都强烈看多或看空)
- 0.5 = 指标方向混杂
- 0.0 = 指标完全矛盾
这是LLM Gate的关键指标只有共识0.75时才考虑调用LLM
"""
# 将各个分数归一化到方向: +1 (看多), 0 (中性), -1 (看空)
def normalize_direction(score: float, threshold: float = 10.0) -> float:
"""将分数转换为方向值"""
if score > threshold:
return min(score / 50, 1.0) # 最大1.0
elif score < -threshold:
return max(score / 50, -1.0) # 最小-1.0
else:
return 0.0 # 中性
# 获取各个指标的方向
trend_dir = normalize_direction(trend_score)
momentum_dir = normalize_direction(momentum_score)
orderflow_dir = normalize_direction(orderflow_score)
breakout_dir = normalize_direction(breakout_score, threshold=5.0) # 突破阈值较低
# 计算方向的一致性
# 方法: 计算各方向与主导方向的相关性
# 计算加权平均方向 (趋势和订单流权重较高)
weighted_avg_dir = (
trend_dir * 0.40 +
momentum_dir * 0.25 +
orderflow_dir * 0.25 +
breakout_dir * 0.10
)
# 如果加权平均接近0,说明没有明确方向,共识度低
if abs(weighted_avg_dir) < 0.2:
return 0.0
# 计算各指标与主导方向的一致性
alignments = []
# 趋势一致性
if abs(trend_dir) > 0.3: # 趋势有方向
alignment = 1.0 if (trend_dir * weighted_avg_dir) > 0 else 0.0
alignments.append(alignment * 0.40) # 趋势权重40%
# 动量一致性
if abs(momentum_dir) > 0.3:
alignment = 1.0 if (momentum_dir * weighted_avg_dir) > 0 else 0.0
alignments.append(alignment * 0.30) # 动量权重30%
# 订单流一致性
if abs(orderflow_dir) > 0.3:
alignment = 1.0 if (orderflow_dir * weighted_avg_dir) > 0 else 0.0
alignments.append(alignment * 0.25) # 订单流权重25%
# 突破一致性
if abs(breakout_dir) > 0.3:
alignment = 1.0 if (breakout_dir * weighted_avg_dir) > 0 else 0.0
alignments.append(alignment * 0.05) # 突破权重5%
# 如果没有任何指标有明确方向
if not alignments:
return 0.0
# 计算总共识度
consensus = sum(alignments)
# 额外加分: 如果主导方向很强 (>0.6)
if abs(weighted_avg_dir) > 0.6:
consensus = min(consensus * 1.2, 1.0)
return round(max(0.0, min(1.0, consensus)), 2)
@staticmethod
def _no_signal(reason: str) -> Dict[str, Any]:
"""Return a HOLD signal with reason"""
return {
'timestamp': datetime.now().isoformat(),
'signal_type': 'HOLD',
'signal_strength': 0,
'composite_score': 0,
'confidence': 0,
'consensus_score': 0, # 添加共识得分
'reasoning': reason,
}

34
start_system.sh Executable file
View File

@ -0,0 +1,34 @@
#!/bin/bash
# 启动完整的交易系统(数据采集 + 定时信号生成)
echo "🚀 启动 Tradus AI 交易系统..."
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# 启动所有服务
docker compose --profile scheduler up -d
echo ""
echo "⏳ 等待服务就绪..."
sleep 5
# 检查服务状态
echo ""
echo "📊 服务状态:"
docker compose ps
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ 系统启动完成!"
echo ""
echo "📝 组件说明:"
echo " • Redis: 数据存储"
echo " • Ingestion: Binance 实时数据采集"
echo " • Scheduler: 每5分钟自动生成交易信号"
echo ""
echo "📱 钉钉通知: BUY/SELL 信号会自动推送"
echo ""
echo "📖 常用命令:"
echo " 查看日志: docker compose logs -f scheduler"
echo " 手动生成信号: ./run_signal.sh"
echo " 查看最新信号: cat output/latest_signal.json | python -m json.tool"
echo " 停止系统: ./stop_system.sh"

14
stop_system.sh Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash
# 停止交易系统
echo "🛑 停止 Tradus AI 交易系统..."
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
docker compose --profile scheduler down
echo ""
echo "✅ 系统已停止"
echo ""
echo "💡 提示:"
echo " 重新启动: ./start_system.sh"
echo " 完全清理: docker compose --profile scheduler down -v"

48
view_data.sh Executable file
View File

@ -0,0 +1,48 @@
#!/bin/bash
# 查看实时采集的数据
echo "════════════════════════════════════════════════════════════════"
echo " 📡 实时数据监控 "
echo "════════════════════════════════════════════════════════════════"
echo ""
# 获取当前价格
echo "📊 当前 BTC 价格:"
docker compose exec ingestion python3 -c "
import redis, orjson
r = redis.Redis(host='redis', port=6379, decode_responses=False)
messages = r.xrevrange('binance:raw:kline:5m', count=1)
if messages:
msg_id, fields = messages[0]
data = orjson.loads(fields[b'data'])
k = data['k']
print(f\" \${float(k['c']):>12,.2f} (最新)\")
print(f\" \${float(k['h']):>12,.2f} (5分钟最高)\")
print(f\" \${float(k['l']):>12,.2f} (5分钟最低)\")
print(f\" 成交量: {float(k['v']):.2f} BTC\")
else:
print(' 数据加载中...')
" 2>/dev/null
echo ""
echo "─────────────────────────────────────────────────────────────────"
echo ""
# 数据流状态
echo "📈 数据流状态:"
for stream in "binance:raw:kline:5m" "binance:raw:kline:15m" "binance:raw:kline:1h" "binance:raw:trade" "binance:raw:depth:20"; do
count=$(docker exec tradus-redis redis-cli XLEN $stream)
name=$(echo $stream | cut -d: -f3-)
printf " %-15s: %'6d 条消息\n" "$name" "$count"
done
echo ""
echo "─────────────────────────────────────────────────────────────────"
echo ""
# 服务状态
echo "🚀 服务状态:"
docker compose ps | grep -E "(tradus-redis|tradus-ingestion)" | awk '{print " "$1": "$6}'
echo ""
echo "════════════════════════════════════════════════════════════════"

10
view_logs.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
# 查看系统日志
SERVICE=${1:-scheduler} # 默认查看 scheduler
echo "📋 查看 $SERVICE 日志 (按 Ctrl+C 退出)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
docker compose logs -f --tail 50 $SERVICE

45
view_signal.sh Executable file
View File

@ -0,0 +1,45 @@
#!/bin/bash
# 查看最新交易信号
set -e
echo "📊 最新交易信号"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
if [ ! -f "output/latest_signal.json" ]; then
echo "❌ 未找到信号文件"
echo "💡 运行 ./run_signal.sh 生成新信号"
exit 1
fi
# 使用 jq 格式化显示(如果安装了)
if command -v jq &> /dev/null; then
cat output/latest_signal.json | jq '
{
"信号": .final_signal,
"置信度": (.final_confidence * 100 | tostring + "%"),
"共识": .consensus,
"当前价格": ("$" + (.levels.current_price | tostring)),
"生成时间": .generated_at,
"量化信号": {
"信号": .quantitative_signal.signal,
"综合得分": .quantitative_signal.composite_score,
"推理": .quantitative_signal.reasoning
},
"LLM信号": (
if .llm_signal then {
"信号": .llm_signal.signal,
"置信度": (.llm_signal.confidence * 100 | tostring + "%"),
"推理": .llm_signal.reasoning
} else "未启用" end
)
}'
else
# 使用 Python 格式化
cat output/latest_signal.json | python -m json.tool
fi
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "💡 查看完整信号: cat output/latest_signal.json | python -m json.tool"