# ================== 数据加载 ==================
import os

import numpy as np
import pandas as pd
import 数据预处理
import 读取数据
import backtrader as bt

from 多因子.框架.回测 import RLStrategy, plot_rewards
from 多因子.框架.策略 import TradingEnv, A2CAgent

# ================== 头部设置 ==================
import matplotlib

from 多因子.框架.预测 import predict


# 加载训练数据
def load_and_preprocess_data(code, start, end,ex_data = None):
    # 获取原始数据
    df = 读取数据.load_data(code, start, end,ex_data)

    # 计算技术指标
    df = 数据预处理.add_technical_indicators(df)
    return df.dropna(how='any')

# ================== 回测配置 ==================
def create_cerebro(data, env, agent, train_mode=True):
    cerebro = bt.Cerebro()

    # 转换索引为列（Backtrader需要明确的时间列）
    datafeed = bt.feeds.PandasData(
        dataname=data.reset_index()[['trade_date', 'open', 'high', 'low', 'close', 'volume']],
        datetime=0,  # 第一列为datetime
        open=1,
        high=2,
        low=3,
        close=4,
        volume=5,
        openinterest=-1
    )

    cerebro.adddata(datafeed)
    cerebro.addstrategy(RLStrategy, env=env, agent=agent, train_mode=train_mode,batch_size=64)
    cerebro.broker.setcash(env.init_balance)
    cerebro.broker.setcommission(commission=0.001)
    return cerebro

# ================== 训练流程 ==================
if __name__ == '__main__':
    # 股票代码和时间范围
    stock_codes = [
        '601137.SH'
    ]

    start_date = '20120101'
    end_date = '20250326'

    test_stock_codes = '000063.SZ'
    test_start_date = '20241001'
    test_end_date = '20250327'
    dfs = []
    for code in stock_codes:
        try:
            df = load_and_preprocess_data(code, start_date, end_date)
            dfs.append(df)
            print(f"成功加载 {code}，时间范围: {df.index.min().date()} 至 {df.index.max().date()}")
        except Exception as e:
            print(f"加载 {code} 失败: {str(e)}")
            continue

    # 合并数据（保留原始时间顺序）
    combined_data = pd.concat(dfs)
    # 训练参数
    num_episodes = 100
    best_reward = -np.inf

    # ================== 模型训练 ==================
    import 模型训练

    # 确保模型输入特征一致
    features = ['price_ma5_pct', 'price_ma10_pct', 'price_ma20_pct', 'price_ma30_pct', 'volume_ma5_pct', 'volume_ma10_pct', 'volume_ma20_pct', 'volume_ma30_pct']

    # 训练时获取模型和归一化器
    # lstm_model, scaler = 模型训练.LSTM(combined_data, features)

    svm_model = 模型训练.SVMModel(features)
    metrics = svm_model.train(combined_data)
    # 初始化环境和智能体
    env = TradingEnv(combined_data, svm_model,features)
    agent = A2CAgent(state_dim=len(env.get_state()), action_dim=7)
    episode_rewards = []
    # 训练循环
    for episode in range(num_episodes):
        # 环境重置（保持时间序列顺序）
        env.reset()

        # 创建新Cerebro实例
        cerebro = create_cerebro(combined_data, env, agent, train_mode=True)

        # 运行回测
        cerebro.run()

        # 记录当前episode奖励
        current_reward = env.total_reward
        episode_rewards.append(current_reward)

        # 保存最佳模型
        # if env.total_reward > best_reward:
        if True:
            best_reward = env.total_reward
            os.makedirs('./model/ac', exist_ok=True)  # 关键修复：自动创建目录
            agent.save(f'./model/ac/best_ac_agent_ep{episode}.pth')

        # 打印进度
        print(f"Episode {episode + 1}/{num_episodes} | Total Reward: {env.total_reward:.2f}")

    # 执行可视化
    if len(episode_rewards) > 0:
        plot_rewards(episode_rewards)
    else:
        print("警告：没有可用的奖励数据")

