# examples/reinforcement_learning_trading.py
"""
强化学习交易智能体示例
使用深度Q网络(DQN)和策略梯度方法进行自动交易
"""

import sys
import numpy as np
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from typing import Dict, List, Tuple, Optional
import warnings
warnings.filterwarnings('ignore')

# 强化学习相关库
try:
    import tensorflow as tf
    from tensorflow.keras.models import Sequential, Model
    from tensorflow.keras.layers import Dense, Dropout, Input
    from tensorflow.keras.optimizers import Adam
    from collections import deque
    import random
    TENSORFLOW_AVAILABLE = True
except ImportError:
    TENSORFLOW_AVAILABLE = False
    print("⚠️ TensorFlow未安装，强化学习模型将无法使用")

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from src.services.market_data_service import MarketDataService

class TradingEnvironment:
    """交易环境"""
    
    def __init__(self, data: pd.DataFrame, initial_balance: float = 10000, 
                 transaction_cost: float = 0.001, max_position: int = 100):
        self.data = data
        self.initial_balance = initial_balance
        self.transaction_cost = transaction_cost
        self.max_position = max_position
        
        # 状态变量
        self.current_step = 0
        self.balance = initial_balance
        self.position = 0  # 持仓数量
        self.total_trades = 0
        self.trade_history = []
        
        # 预处理数据
        self._prepare_data()
        
    def _prepare_data(self):
        """准备环境数据"""
        # 计算技术指标作为状态特征
        self.data['returns'] = self.data['close'].pct_change()
        self.data['sma_5'] = self.data['close'].rolling(5).mean()
        self.data['sma_20'] = self.data['close'].rolling(20).mean()
        self.data['rsi'] = self._calculate_rsi(self.data['close'])
        self.data['macd'] = self._calculate_macd(self.data['close'])
        
        # 标准化特征
        features = ['returns', 'sma_5', 'sma_20', 'rsi', 'macd']
        for feature in features:
            self.data[f'{feature}_norm'] = (self.data[feature] - self.data[feature].mean()) / self.data[feature].std()
        
        self.data = self.data.dropna()
        
    def _calculate_rsi(self, prices: pd.Series, period: int = 14) -> pd.Series:
        """计算RSI"""
        delta = prices.diff()
        gain = delta.where(delta > 0, 0)
        loss = -delta.where(delta < 0, 0)
        avg_gain = gain.rolling(period).mean()
        avg_loss = loss.rolling(period).mean()
        rs = avg_gain / avg_loss
        return 100 - (100 / (1 + rs))
    
    def _calculate_macd(self, prices: pd.Series) -> pd.Series:
        """计算MACD"""
        ema12 = prices.ewm(span=12).mean()
        ema26 = prices.ewm(span=26).mean()
        return ema12 - ema26
    
    def reset(self) -> np.ndarray:
        """重置环境"""
        self.current_step = 0
        self.balance = self.initial_balance
        self.position = 0
        self.total_trades = 0
        self.trade_history = []
        return self.get_state()
    
    def get_state(self) -> np.ndarray:
        """获取当前状态"""
        if self.current_step >= len(self.data):
            return np.zeros(8)
        
        row = self.data.iloc[self.current_step]
        
        # 价格特征
        price_features = [
            row['returns_norm'],
            row['sma_5_norm'],
            row['sma_20_norm'],
            row['rsi_norm'],
            row['macd_norm']
        ]
        
        # 账户状态
        account_features = [
            self.balance / self.initial_balance - 1,  # 余额变化比例
            self.position / self.max_position,  # 持仓比例
            len(self.trade_history) / 100  # 交易次数
        ]
        
        return np.array(price_features + account_features, dtype=np.float32)
    
    def step(self, action: int) -> Tuple[np.ndarray, float, bool, Dict]:
        """执行动作"""
        if self.current_step >= len(self.data) - 1:
            return self.get_state(), 0, True, {}
        
        current_price = self.data.iloc[self.current_step]['close']
        next_price = self.data.iloc[self.current_step + 1]['close']
        
        # 执行交易动作
        reward = self._execute_action(action, current_price, next_price)
        
        # 更新步骤
        self.current_step += 1
        
        # 检查是否结束
        done = self.current_step >= len(self.data) - 1
        
        # 获取新状态
        next_state = self.get_state()
        
        # 信息字典
        info = {
            'balance': self.balance,
            'position': self.position,
            'total_trades': self.total_trades,
            'portfolio_value': self.get_portfolio_value()
        }
        
        return next_state, reward, done, info
    
    def _execute_action(self, action: int, current_price: float, next_price: float) -> float:
        """执行交易动作并计算奖励
        
        Actions:
        0: Hold (持有)
        1: Buy (买入)
        2: Sell (卖出)
        """
        
        old_portfolio_value = self.get_portfolio_value()
        
        if action == 1:  # Buy
            # 计算可买入数量
            max_buy = min(
                int(self.balance / (current_price * (1 + self.transaction_cost))),
                self.max_position - self.position
            )
            
            if max_buy > 0:
                cost = max_buy * current_price * (1 + self.transaction_cost)
                self.balance -= cost
                self.position += max_buy
                self.total_trades += 1
                
                self.trade_history.append({
                    'step': self.current_step,
                    'action': 'BUY',
                    'quantity': max_buy,
                    'price': current_price,
                    'cost': cost
                })
        
        elif action == 2:  # Sell
            if self.position > 0:
                revenue = self.position * current_price * (1 - self.transaction_cost)
                self.balance += revenue
                sold_quantity = self.position
                self.position = 0
                self.total_trades += 1
                
                self.trade_history.append({
                    'step': self.current_step,
                    'action': 'SELL',
                    'quantity': sold_quantity,
                    'price': current_price,
                    'revenue': revenue
                })
        
        # 计算奖励
        new_portfolio_value = self.get_portfolio_value()
        reward = (new_portfolio_value - old_portfolio_value) / old_portfolio_value
        
        # 添加风险调整
        if abs(self.position / self.max_position) > 0.8:
            reward -= 0.01  # 惩罚过度集中持仓
        
        return reward
    
    def get_portfolio_value(self) -> float:
        """获取投资组合总价值"""
        if self.current_step >= len(self.data):
            return self.balance
        
        current_price = self.data.iloc[self.current_step]['close']
        return self.balance + self.position * current_price

class DQNAgent:
    """深度Q网络智能体"""
    
    def __init__(self, state_size: int, action_size: int, learning_rate: float = 0.001):
        self.state_size = state_size
        self.action_size = action_size
        self.learning_rate = learning_rate
        
        # 超参数
        self.epsilon = 1.0  # 探索率
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.memory = deque(maxlen=2000)
        self.batch_size = 32
        self.gamma = 0.95  # 折扣因子
        
        # 神经网络
        self.q_network = self._build_model()
        self.target_network = self._build_model()
        self.update_target_network()
        
    def _build_model(self) -> Model:
        """构建Q网络"""
        model = Sequential([
            Dense(128, input_dim=self.state_size, activation='relu'),
            Dropout(0.2),
            Dense(64, activation='relu'),
            Dropout(0.2),
            Dense(32, activation='relu'),
            Dense(self.action_size, activation='linear')
        ])
        
        model.compile(optimizer=Adam(learning_rate=self.learning_rate), loss='mse')
        return model
    
    def update_target_network(self):
        """更新目标网络"""
        self.target_network.set_weights(self.q_network.get_weights())
    
    def remember(self, state: np.ndarray, action: int, reward: float, 
                next_state: np.ndarray, done: bool):
        """存储经验"""
        self.memory.append((state, action, reward, next_state, done))
    
    def act(self, state: np.ndarray) -> int:
        """选择动作"""
        if np.random.random() <= self.epsilon:
            return random.randrange(self.action_size)
        
        q_values = self.q_network.predict(state.reshape(1, -1), verbose=0)
        return np.argmax(q_values[0])
    
    def replay(self):
        """经验回放"""
        if len(self.memory) < self.batch_size:
            return
        
        batch = random.sample(self.memory, self.batch_size)
        states = np.array([experience[0] for experience in batch])
        actions = np.array([experience[1] for experience in batch])
        rewards = np.array([experience[2] for experience in batch])
        next_states = np.array([experience[3] for experience in batch])
        dones = np.array([experience[4] for experience in batch])
        
        # 计算目标Q值
        target_q_values = self.target_network.predict(next_states, verbose=0)
        max_target_q_values = np.max(target_q_values, axis=1)
        
        targets = rewards + (self.gamma * max_target_q_values * (1 - dones))
        
        # 获取当前Q值
        current_q_values = self.q_network.predict(states, verbose=0)
        
        # 更新目标
        for i in range(self.batch_size):
            current_q_values[i][actions[i]] = targets[i]
        
        # 训练网络
        self.q_network.fit(states, current_q_values, epochs=1, verbose=0)
        
        # 衰减探索率
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

class PolicyGradientAgent:
    """策略梯度智能体"""
    
    def __init__(self, state_size: int, action_size: int, learning_rate: float = 0.001):
        self.state_size = state_size
        self.action_size = action_size
        self.learning_rate = learning_rate
        
        # 存储经验
        self.states = []
        self.actions = []
        self.rewards = []
        
        # 神经网络
        self.policy_network = self._build_policy_network()
        
    def _build_policy_network(self) -> Model:
        """构建策略网络"""
        model = Sequential([
            Dense(128, input_dim=self.state_size, activation='relu'),
            Dropout(0.2),
            Dense(64, activation='relu'),
            Dropout(0.2),
            Dense(self.action_size, activation='softmax')
        ])
        
        model.compile(optimizer=Adam(learning_rate=self.learning_rate), loss='categorical_crossentropy')
        return model
    
    def act(self, state: np.ndarray) -> int:
        """根据策略选择动作"""
        probabilities = self.policy_network.predict(state.reshape(1, -1), verbose=0)[0]
        return np.random.choice(self.action_size, p=probabilities)
    
    def remember(self, state: np.ndarray, action: int, reward: float):
        """记录经验"""
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
    
    def learn(self):
        """学习和更新策略"""
        if len(self.states) == 0:
            return
        
        # 计算折扣奖励
        discounted_rewards = self._discount_rewards(self.rewards)
        
        # 标准化奖励
        discounted_rewards = (discounted_rewards - np.mean(discounted_rewards)) / (np.std(discounted_rewards) + 1e-8)
        
        # 转换为训练数据
        states = np.array(self.states)
        actions_onehot = tf.keras.utils.to_categorical(self.actions, self.action_size)
        
        # 加权损失
        sample_weights = discounted_rewards
        
        # 训练网络
        self.policy_network.fit(states, actions_onehot, sample_weight=sample_weights, epochs=1, verbose=0)
        
        # 清空经验
        self.states = []
        self.actions = []
        self.rewards = []
    
    def _discount_rewards(self, rewards: List[float], gamma: float = 0.99) -> np.ndarray:
        """计算折扣奖励"""
        discounted = np.zeros_like(rewards, dtype=np.float32)
        running_add = 0
        
        for i in reversed(range(len(rewards))):
            running_add = running_add * gamma + rewards[i]
            discounted[i] = running_add
        
        return discounted

class TradingBacktester:
    """交易回测器"""
    
    @staticmethod
    def run_backtest(agent, env: TradingEnvironment, episodes: int = 100) -> Dict:
        """运行回测"""
        episode_rewards = []
        episode_trades = []
        episode_returns = []
        
        print(f"🔄 开始回测 {episodes} 轮...")
        
        for episode in range(episodes):
            state = env.reset()
            total_reward = 0
            initial_value = env.get_portfolio_value()
            
            while True:
                action = agent.act(state)
                next_state, reward, done, info = env.step(action)
                
                # 训练智能体（如果支持）
                if hasattr(agent, 'remember'):
                    agent.remember(state, action, reward, next_state, done)
                elif hasattr(agent, 'states'):
                    agent.remember(state, action, reward)
                
                state = next_state
                total_reward += reward
                
                if done:
                    break
            
            # 学习
            if hasattr(agent, 'replay'):
                agent.replay()
            elif hasattr(agent, 'learn'):
                agent.learn()
            
            # 记录结果
            final_value = env.get_portfolio_value()
            episode_return = (final_value - initial_value) / initial_value
            
            episode_rewards.append(total_reward)
            episode_trades.append(env.total_trades)
            episode_returns.append(episode_return)
            
            if (episode + 1) % 20 == 0:
                avg_reward = np.mean(episode_rewards[-20:])
                avg_return = np.mean(episode_returns[-20:])
                print(f"   Episode {episode + 1}: Avg Reward = {avg_reward:.4f}, Avg Return = {avg_return:.2%}")
        
        return {
            'episode_rewards': episode_rewards,
            'episode_trades': episode_trades,
            'episode_returns': episode_returns,
            'final_portfolio_value': env.get_portfolio_value(),
            'total_trades': env.total_trades,
            'trade_history': env.trade_history
        }
    
    @staticmethod
    def analyze_results(results: Dict) -> Dict:
        """分析回测结果"""
        returns = np.array(results['episode_returns'])
        
        analysis = {
            'total_return': returns[-1],
            'average_return': np.mean(returns),
            'volatility': np.std(returns),
            'sharpe_ratio': np.mean(returns) / np.std(returns) if np.std(returns) > 0 else 0,
            'max_drawdown': np.min(returns),
            'win_rate': len(returns[returns > 0]) / len(returns),
            'total_trades': results['total_trades'],
            'average_trades_per_episode': np.mean(results['episode_trades'])
        }
        
        return analysis

def main():
    """主函数演示强化学习交易"""
    
    if not TENSORFLOW_AVAILABLE:
        print("❌ TensorFlow未安装，无法运行强化学习示例")
        print("   请运行: pip install tensorflow")
        return
    
    print("🤖 强化学习交易智能体系统")
    print("=" * 50)
    
    # 初始化服务
    market_service = MarketDataService()
    
    # 获取数据
    symbol = 'AAPL'
    print(f"📊 获取 {symbol} 历史数据...")
    
    start_date = datetime.now() - timedelta(days=500)
    end_date = datetime.now()
    
    data = market_service.get_stock_price(
        symbol,
        start_date=start_date,
        end_date=end_date,
        interval='1d'
    )
    
    if data.empty:
        print("❌ 未能获取数据")
        return
    
    print(f"✅ 获取到 {len(data)} 条数据记录")
    
    try:
        # 创建交易环境
        print(f"\n🏢 创建交易环境...")
        env = TradingEnvironment(data, initial_balance=10000, transaction_cost=0.001)
        
        state_size = len(env.get_state())
        action_size = 3  # Hold, Buy, Sell
        
        print(f"   状态空间大小: {state_size}")
        print(f"   动作空间大小: {action_size}")
        
        # 创建智能体
        agents = {
            'DQN': DQNAgent(state_size, action_size),
            'PolicyGradient': PolicyGradientAgent(state_size, action_size)
        }
        
        print(f"\n🤖 创建了 {len(agents)} 个智能体")
        
        # 训练和回测智能体
        results = {}
        
        for agent_name, agent in agents.items():
            print(f"\n🎯 训练 {agent_name} 智能体...")
            
            # 训练
            agent_results = TradingBacktester.run_backtest(agent, env, episodes=100)
            results[agent_name] = agent_results
            
            # 分析结果
            analysis = TradingBacktester.analyze_results(agent_results)
            
            print(f"   {agent_name} 训练完成:")
            print(f"     总收益: {analysis['total_return']:.2%}")
            print(f"     夏普比率: {analysis['sharpe_ratio']:.3f}")
            print(f"     胜率: {analysis['win_rate']:.2%}")
            print(f"     总交易次数: {analysis['total_trades']}")
        
        # 基准策略：买入持有
        print(f"\n📊 基准策略 (买入持有):")
        buy_hold_return = (data['close'].iloc[-1] - data['close'].iloc[0]) / data['close'].iloc[0]
        print(f"   买入持有收益: {buy_hold_return:.2%}")
        
        # 策略对比
        print(f"\n📈 策略对比表:")
        print("-" * 60)
        print(f"{'策略':<15} {'总收益':<10} {'夏普比率':<10} {'胜率':<10} {'交易次数':<10}")
        print("-" * 60)
        
        for agent_name, agent_results in results.items():
            analysis = TradingBacktester.analyze_results(agent_results)
            print(f"{agent_name:<15} {analysis['total_return']:<10.2%} "
                  f"{analysis['sharpe_ratio']:<10.3f} {analysis['win_rate']:<10.2%} "
                  f"{analysis['total_trades']:<10}")
        
        print(f"{'买入持有':<15} {buy_hold_return:<10.2%} {'N/A':<10} {'N/A':<10} {'2':<10}")
        
        # 找出最佳策略
        best_agent = max(results.items(), 
                        key=lambda x: TradingBacktester.analyze_results(x[1])['total_return'])
        best_name, best_results = best_agent
        best_analysis = TradingBacktester.analyze_results(best_results)
        
        print(f"\n🏆 最佳策略: {best_name}")
        print(f"   总收益: {best_analysis['total_return']:.2%}")
        print(f"   夏普比率: {best_analysis['sharpe_ratio']:.3f}")
        print(f"   最大回撤: {best_analysis['max_drawdown']:.2%}")
        
        # 交易历史分析
        print(f"\n📋 交易历史分析 ({best_name}):")
        trade_history = best_results['trade_history']
        
        if trade_history:
            buy_trades = [t for t in trade_history if t['action'] == 'BUY']
            sell_trades = [t for t in trade_history if t['action'] == 'SELL']
            
            print(f"   买入交易: {len(buy_trades)} 次")
            print(f"   卖出交易: {len(sell_trades)} 次")
            
            if buy_trades:
                avg_buy_price = np.mean([t['price'] for t in buy_trades])
                print(f"   平均买入价格: ${avg_buy_price:.2f}")
            
            if sell_trades:
                avg_sell_price = np.mean([t['price'] for t in sell_trades])
                print(f"   平均卖出价格: ${avg_sell_price:.2f}")
                
                if buy_trades:
                    avg_profit_per_trade = avg_sell_price - avg_buy_price
                    print(f"   平均每笔交易利润: ${avg_profit_per_trade:.2f}")
        
        # 实时交易建议
        print(f"\n💡 实时交易建议:")
        current_state = env.get_state()
        
        for agent_name, agent in agents.items():
            action = agent.act(current_state)
            action_names = ['持有', '买入', '卖出']
            print(f"   {agent_name}: {action_names[action]}")
        
        print(f"\n⚠️  风险提示:")
        print(f"   强化学习交易存在高风险，仅供学习研究")
        print(f"   实际交易前请充分测试和验证策略")
        print(f"   建议结合其他分析方法和风险管理")
        
    except Exception as e:
        print(f"❌ 强化学习交易过程中发生错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()
