import torch
import tushare as ts
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import TimeSeriesSplit
import backtrader as bt
import matplotlib.pyplot as plt

import pandas as pd
import tushare as ts
from sklearn.preprocessing import StandardScaler

from sklearn.svm import SVR, SVC


def plot_rewards(rewards, save_path='./model/training_rewards.png', window_size=50):
    """改进后的可视化函数，解决维度问题"""
    import matplotlib.pyplot as plt

    plt.figure(figsize=(12, 6))

    # 原始奖励曲线
    plt.plot(rewards, alpha=0.3, label='Raw Reward', color='gray')

    # 动态调整窗口大小
    valid_window = min(window_size, len(rewards))
    if valid_window > 1:
        # 计算有效平滑数据
        smooth_rewards = np.convolve(
            rewards,
            np.ones(valid_window) / valid_window,
            mode='valid'
        )

        # 生成匹配的x轴坐标
        x_smooth = range(valid_window - 1, len(rewards))

        plt.plot(
            x_smooth,
            smooth_rewards,
            label=f'{valid_window}ep Moving Avg',
            color='royalblue',
            linewidth=2
        )

    # 自动设置坐标轴范围
    if len(rewards) > 0:
        plt.title(f'Training Progress ({len(rewards)} episodes)', fontsize=14)
        plt.xlabel('Episode', fontsize=12)
        plt.ylabel('Total Reward', fontsize=12)
        plt.grid(True, alpha=0.3)
        plt.legend()

        # 动态设置y轴范围
        y_min = min(rewards) * 0.95 if min(rewards) < 0 else min(rewards) * 1.05
        y_max = max(rewards) * 1.05 if max(rewards) > 0 else max(rewards) * 0.95
        plt.ylim(y_min, y_max)
    else:
        plt.title("No Training Data", fontsize=14)

    plt.tight_layout()
    plt.savefig(save_path, dpi=150)
    print(f"奖励曲线已保存至 {save_path}")
    plt.close()


class RLStrategy(bt.Strategy):
    params = (
        ('env', None),
        ('agent', None),
        ('train_mode', True),  # 训练模式开关
        ('verbose', False),  # 独立控制台日志开关
        ('batch_size', 32),  # 新增批量大小参数
    )

    def __init__(self):
        self.episode_reward = 0
        self.trade_count = 0
        self.episode_buffer = []  # 替换为episode缓冲区
        self._init_logging()
        self.warmup_steps = 30  # 对应初始30天观察期
        # 训练模式优化
        if self.p.train_mode:
            self.p.verbose = False
            # 预热计数器


    def next(self):
        # 预热期不执行任何操作
        if len(self) <= self.warmup_steps:
            return

        env = self.p.env
        current_step = env.current_step
        # print('current step    ',current_step,'    self data:',len(self.data))
        # if current_step >= env.max_step:
        #     return

        try:
            # 获取决策数据
            current_price = env.data['close'].iloc[current_step]
            state = env.get_state()

            # 动作选择（保持探索）
            action = self.p.agent.act(state)

            # 执行交易
            pre_balance = env.balance.clone()
            pre_position = env.position.clone()
            self._execute_trade(action, current_price)

            # 环境交互
            next_state, reward, done, _ = env.step(action)
            self.episode_reward += reward

            # 训练模式数据收集
            if self.p.train_mode:
                # 存储完整轨迹数据
                self.episode_buffer.append((
                    state,
                    action,
                    reward,
                    next_state,
                    done
                ))

                # 动态批量更新：当缓冲区足够时立即训练
                if len(self.episode_buffer) >= self.p.batch_size:
                    self._train_from_buffer()
                    self.episode_buffer = []  # 清空缓冲区
                    # print('current step    ', current_step, '    self data:', len(self))
            else:

                self._log_trading_details(pre_balance, pre_position,
                                          env.balance, env.position,
                                          action, current_price, reward)

            # Episode结束处理
            if done:
                if self.p.train_mode:
                    # 训练剩余数据
                    if len(self.episode_buffer) > 0:
                        self._train_from_buffer()
                        self.episode_buffer = []
                    # 可选：添加episode结束回调
                    self.p.agent.update_epsilon()  # 如果agent有探索率衰减
                else:
                    self._log_episode_summary(current_price)
                self.episode_reward = 0

        except Exception as e:
            if not self.p.train_mode:
                print(f'Step {current_step} Error: {str(e)}')

    def _train_from_buffer(self):
        """使用当前episode数据进行训练"""
        # 转换数据格式
        states, actions, rewards, next_states, dones = zip(*self.episode_buffer)

        # 转换为Tensor
        states = torch.FloatTensor(np.array(states)).to(self.p.agent.device)
        actions = torch.LongTensor(actions).to(self.p.agent.device)
        rewards = torch.FloatTensor(rewards).to(self.p.agent.device)
        next_states = torch.FloatTensor(np.array(next_states)).to(self.p.agent.device)
        dones = torch.FloatTensor(dones).to(self.p.agent.device)

        # 调用agent的训练方法
        self.p.agent.train_batch(states, actions, rewards, next_states, dones)

    def _execute_trade(self, action, current_price):
        """执行交易（添加流动性检查）"""
        # 保持原有逻辑，但添加流动性保护
        if action in [0, 1, 2]:  # 卖出
            if self.p.env.position > 0:
                # 计算实际可卖数量
                available = self.p.env.position.item()
                sell_ratios = [1.0, 0.5, 0.3]
                sell_qty = min(available * sell_ratios[action], available)
                self.sell(size=sell_qty)
                self.trade_count += 1
        elif action in [3, 4, 5]:  # 买入
            # 计算可用资金（考虑交易费用）
            max_buy = (self.p.env.balance.item() - 1000) / (current_price * 1.0002)
            if max_buy > 0:
                buy_ratios = [0.3, 0.5, 1.0]
                buy_qty = int(max_buy * buy_ratios[action - 3])
                if buy_qty > 0:
                    self.buy(size=buy_qty)
                    self.trade_count += 1

    def _init_logging(self):
        """初始化日志组件（仅在非训练模式生效）"""
        if not self.p.train_mode:
            from tabulate import tabulate
            self.tabulate = tabulate
            self.log_headers = [
                "时间", "价格", "动作", "数量", "仓位变化",
                "费用", "总资产", "现金", "仓位比例", "累计奖励"
            ]
    def _log_trading_details(self, pre_bal, pre_pos, new_bal, new_pos,
                             action, price, reward):
        """增强的日志功能"""
        if self.p.verbose and not self.p.train_mode:

            action_types = ["全卖", "卖50%", "卖30%", "买30%", "买50%", "全买", "持有"]
            log_entry = [
                self.data.datetime.date(0).isoformat(),
                f"{price:.2f}",
                action_types[action],
                f"{abs(pre_pos.item() - new_pos.item()):>12.2f}",
                f"{pre_pos.item():.2f} → {new_pos.item():.2f}",
                f"{(pre_bal - new_bal).abs().item():>12,.2f}",
                f"{(new_bal + new_pos * price).item():>15,.2f}",
                f"{new_bal.item():>15,.2f}",
                f"{(new_pos * price / (new_bal + new_pos * price)).item():>8.1%}",
                f"{self.episode_reward:>8.2f}"
            ]

            # 更新日志头
            if not hasattr(self, 'value_header_added'):
                self.log_headers.append("状态价值")
                self.value_header_added = True

            print(f"\n{'=' * 34} 交易执行 {'=' * 34}")
            print(self.tabulate([log_entry],
                                headers=self.log_headers,
                                tablefmt="presto",
                                numalign="right"))

    def _log_episode_summary(self, final_price):
        """非训练模式专用汇总"""
        if self.p.verbose:
            portfolio_value = self.p.env.balance.item() + \
                              self.p.env.position.item() * final_price
            summary = [
                ["总交易次数", f"{self.trade_count:>10}"],
                ["最终资产价值", f"{portfolio_value:>15,.2f}"],
                # ["初始资产价值", f"{self.p.env.init_balance:>15,.2f}"],
                ["年化收益率", f"{(portfolio_value / self.p.env.init_balance) ** (252 / (len(self)-30)) - 1:>10.1%}"]
            ]
            print(f"\n{'=' * 34} 周期汇总 {'=' * 34}")
            print(self.tabulate(summary,
                                headers=["项目", "数值"],
                                tablefmt="pretty",  # 更适合双列显示
                                numalign="right"))
            self.trade_count = 0

def back_trade(feature_df):
    # ================== Backtrader回测 ==================
    # 自定义数据类（解决AttributeError关键）
    class SignalData(bt.feeds.PandasData):
        lines = ('signal',)  # 新增信号线
        params = (
            ('datetime', None),
            ('open', 'open'),
            ('high', 'high'),
            ('low', 'low'),
            ('close', 'close'),
            ('volume', 'volume'),
            ('signal', 'signal'),
        )

    class HalfCashSizer(bt.Sizer):
        """自定义Sizer，每次买入使用可用资金的一半"""

        def _getsizing(self, comminfo, cash, data, isbuy):
            if isbuy:
                # 计算可用现金的一半
                cash_to_use = cash * 0.9
                price = data.close[0]
                if price == 0:
                    return 0  # 避免除以零错误
                # 计算考虑手续费后的最大可买数量
                size = comminfo.getsize(price, cash_to_use)
                return size
            else:
                # 平仓时返回当前持仓数量
                return self.broker.getposition(data).size

    class MLStrategy(bt.Strategy):
        params = (
            ('printlog', True),
            ('stake', 1000),  # 保留参数但不使用
            ('risk_pct', 0),  # 保留参数但不使用
        )

        def __init__(self):
            self.signal = self.data.lines.signal
            self.order = None
            # 设置自定义的Sizer
            self.sizer = HalfCashSizer()

        def next(self):
            if self.order:
                return

            # 多头开仓信号
            if self.signal[0] == 1 and not self.position:
                self.order = self.buy()  # 数量由HalfCashSizer自动计算

            # 平仓信号
            elif self.signal[0] == 0 and self.position:
                self.order = self.close()

        def notify_order(self, order):
            if order.status in [order.Completed]:
                if order.isbuy():
                    executed_cash = order.executed.price * order.executed.size
                    self.log(
                        f'BUY EXECUTED, Price: {order.executed.price:.2f}, Size: {order.executed.size}, Cost: {executed_cash:.2f}')
                elif order.issell():
                    self.log(f'SELL EXECUTED, Price: {order.executed.price:.2f}')
                self.order = None

        def log(self, txt, dt=None, doprint=False):
            if self.params.printlog or doprint:
                dt = dt or self.datas[0].datetime.date(0)
                print(f'{dt.isoformat()}, {txt}')

    # ================== 回测引擎配置 ==================
    cerebro = bt.Cerebro()

    # 数据预处理
    numeric_cols = ['open', 'high', 'low', 'close', 'volume', 'signal']
    feature_df[numeric_cols] = feature_df[numeric_cols].apply(pd.to_numeric, errors='coerce')
    feature_df = feature_df.dropna(subset=numeric_cols)
    feature_df['signal'] = feature_df['signal'].astype(int)

    # 加载数据
    data = SignalData(dataname=feature_df)
    cerebro.adddata(data)

    # 策略参数
    cerebro.addstrategy(MLStrategy)
    cerebro.broker.setcash(1000000)
    cerebro.broker.setcommission(commission=0.001)

    # 添加分析器
    cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='sharpe')
    cerebro.addanalyzer(bt.analyzers.DrawDown, _name='drawdown')

    # ================== 运行回测 ==================
    print('初始资金: %.2f' % cerebro.broker.getvalue())
    results = cerebro.run()
    print('最终资金: %.2f' % cerebro.broker.getvalue())

    # 输出分析结果
    strat = results[0]
    print('夏普比率:', strat.analyzers.sharpe.get_analysis())
    print('最大回撤:', strat.analyzers.drawdown.get_analysis()['max']['drawdown'])

    # 可视化
    cerebro.plot(style='candlestick')

