from flask import Flask, jsonify, render_template
from flask_cors import CORS
import numpy as np
import matplotlib.pyplot as plt
import os
import dashscope
from dashscope import Generation
from data_handler import get_multi_asset_data
from trading_env import TradingEnv
from dqn_agent import DQNAgent
from config import WINDOW_SIZE, ASSET_CODES, TARGET_UPDATE

# 中文显示配置
plt.rcParams["font.family"] = ["SimHei", "Microsoft YaHei", "SimSun"]
plt.rcParams["axes.unicode_minus"] = False

# 配置通义千问API
dashscope.api_key = "sk-69e43851d4be47d9a32beb5f48be9327"

app = Flask(__name__)
CORS(app)

# 确保静态目录存在
os.makedirs('static', exist_ok=True)

# 全局变量用于存储回测结果
dqn_history = None
dqn_metrics = None
bah_history = None
bah_metrics = None
train_loss = None
llm_analysis = None  # 新增：存储大模型分析结果


def calculate_metrics(asset_history):
    """计算回测指标：年化收益率、夏普比率、最大回撤"""
    total_assets = [h["total_asset"] for h in asset_history]
    returns = np.diff(total_assets) / total_assets[:-1]  # 日收益率

    # 年化收益率（假设每年252个交易日）
    annual_return = (total_assets[-1] / total_assets[0]) ** (252 / len(total_assets)) - 1

    # 夏普比率（无风险利率假设为3%）
    risk_free_rate = 0.03
    sharpe_ratio = (np.mean(returns) - risk_free_rate / 252) / np.std(returns) * np.sqrt(252)

    # 最大回撤
    cumulative_max = np.maximum.accumulate(total_assets)
    drawdown = (total_assets - cumulative_max) / cumulative_max
    max_drawdown = np.min(drawdown)

    return {
        "年化收益率": round(annual_return * 100, 2),
        "夏普比率": round(sharpe_ratio, 2),
        "最大回撤": round(max_drawdown * 100, 2)
    }


def backtest_buy_and_hold(data):
    """回测基线策略：买入并持有"""
    initial_cash = 100000
    n_assets = len(ASSET_CODES)
    equal_weight = 1 / n_assets  # 等权重配置
    buy_amount = (initial_cash * equal_weight) // data.iloc[WINDOW_SIZE][f"close_{list(ASSET_CODES.keys())[0]}"]

    positions = np.ones(n_assets) * buy_amount
    cash = initial_cash - np.sum(
        positions * [data.iloc[WINDOW_SIZE][f"close_{asset_type}"] for asset_type in ASSET_CODES.keys()])

    history = []
    for step in range(WINDOW_SIZE, len(data) - 1):
        current_prices = [data.iloc[step][f"close_{asset_type}"] for asset_type in ASSET_CODES.keys()]
        position_value = np.sum(positions * current_prices)
        total_asset = cash + position_value
        history.append({"total_asset": total_asset})

    return history


def analyze_with_llm(dqn_metrics, bah_metrics):
    """调用通义千问大模型分析策略结果，返回格式化HTML文本"""
    prompt = f"""请严格按以下格式分析两个交易策略的回测结果，不使用任何Markdown加粗符号（**）：
1. 综合表现对比：[分1-2句说明两者核心指标差异]
2. DQN策略特点：
- [特点1，围绕收益/风险]
- [特点2，围绕风控/适配性]
- [特点3，围绕模型潜力]
3. 可能问题与改进方向：
- [改进方向1，如特征工程/奖励函数]
- [改进方向2，如训练方式/交易成本]
- [改进方向3，如环境建模/策略集成]
4. 结论：[1句总结策略适配性]

DQN策略指标:
- 年化收益率: {dqn_metrics['年化收益率']}%
- 夏普比率: {dqn_metrics['夏普比率']}
- 最大回撤: {dqn_metrics['最大回撤']}%

买入并持有策略指标:
- 年化收益率: {bah_metrics['年化收益率']}%
- 夏普比率: {bah_metrics['夏普比率']}
- 最大回撤: {bah_metrics['最大回撤']}%
"""
    response = Generation.call(
        model="qwen-plus",
        prompt=prompt,
        result_format="text"
    )
    if response.status_code == 200:
        # 文本转HTML结构，适配前端渲染
        processed_text = response.output.text
        # 一级标题加标签
        processed_text = processed_text.replace("1. 综合表现对比：", "<h4>1. 综合表现对比</h4><p>")
        processed_text = processed_text.replace("2. DQN策略特点：", "</p><h4>2. DQN策略特点</h4><ul>")
        processed_text = processed_text.replace("3. 可能问题与改进方向：", "</ul><h4>3. 可能问题与改进方向</h4><ul>")
        processed_text = processed_text.replace("4. 结论：", "</ul><h4>4. 结论</h4><p>")
        # 列表项格式化
        processed_text = processed_text.replace("- ", "<li>")
        # 补充闭合标签
        return processed_text + "</p>"
    else:
        return f"<p>分析失败: {response.message}</p>"


def run_strategy():
    """执行策略训练与回测"""
    global dqn_history, dqn_metrics, bah_history, bah_metrics, train_loss, llm_analysis

    # 1. 获取并预处理数据
    print("获取数据中...")
    data = get_multi_asset_data()
    print(f"数据获取完成，共{len(data)}个交易日")

    # 2. 初始化环境和智能体
    env = TradingEnv(data)
    state_dim = env._get_state().shape[0]  # 状态维度
    action_dim = 3 * env.n_assets  # 动作维度（3个动作×n个资产）
    agent = DQNAgent(state_dim, action_dim)

    # 3. 模型训练
    print("开始训练...")
    total_steps = 0
    train_loss = []
    while True:
        state = env.reset()
        done = False
        episode_loss = []
        while not done:
            # 选择动作
            action = agent.select_action(state)
            # 执行动作
            next_state, reward, done, info = env.step([action // env.n_assets] * env.n_assets)
            # 存储经验
            agent.store_transition(state, action, reward, next_state, done)
            # 学习
            loss = agent.learn()
            if loss is not None:
                episode_loss.append(loss)
            # 更新状态
            state = next_state
            total_steps += 1

            # 更新目标网络
            if total_steps % TARGET_UPDATE == 0:
                agent.update_target_net()

        if episode_loss:
            train_loss.append(np.mean(episode_loss))
        print(f"训练步数：{total_steps}，当前总资产：{info['total_asset']:.2f}")
        if total_steps > 30000:  # 终止条件
            break

    # 4. 回测评估
    print("开始回测...")
    # DQN策略回测
    dqn_history = env.history
    dqn_metrics = calculate_metrics(dqn_history)
    # 买入并持有策略回测
    bah_history = backtest_buy_and_hold(data)
    bah_metrics = calculate_metrics(bah_history)

    # 新增：调用大模型进行结果分析
    print("正在进行大模型结果分析...")
    llm_analysis = analyze_with_llm(dqn_metrics, bah_metrics)
    print("大模型分析完成")

    # 5. 结果可视化
    plt.figure(figsize=(12, 6))
    # 资金曲线对比
    plt.subplot(1, 2, 1)
    dqn_assets = [h["total_asset"] for h in dqn_history]
    bah_assets = [h["total_asset"] for h in bah_history]
    plt.plot(dqn_assets, label="DQN策略")
    plt.plot(bah_assets, label="买入并持有")
    plt.xlabel("交易日")
    plt.ylabel("总资产（元）")
    plt.title("资金曲线对比")
    plt.legend()

    # 训练损失
    plt.subplot(1, 2, 2)
    plt.plot(train_loss)
    plt.xlabel("训练轮次")
    plt.ylabel("损失值")
    plt.title("DQN模型训练损失")
    plt.tight_layout()

    plt.savefig('static/quant_results.png')
    plt.close()  # 关闭图像释放资源

    # 6. 输出指标对比
    print("\n=== 回测指标对比 ===")
    print("DQN策略：", dqn_metrics)
    print("买入并持有：", bah_metrics)


# 初始化策略数据
print("正在运行量化策略生成数据...")
run_strategy()
print("可视化完成")


# 前后端交互接口
@app.route("/")
def index():
    return render_template("index.html")


@app.route("/api/metrics")
def get_metrics():
    return jsonify({
        "dqn": dqn_metrics,
        "buy_and_hold": bah_metrics
    })


@app.route("/api/fund-curve")
def get_fund_curve():
    dqn_assets = [h["total_asset"] for h in dqn_history]
    bah_assets = [h["total_asset"] for h in bah_history]
    return jsonify({
        "dqn": dqn_assets,
        "buy_and_hold": bah_assets,
        "days": len(dqn_assets)
    })


@app.route("/api/train-loss")
def get_train_loss():
    return jsonify({
        "loss": train_loss,
        "epochs": len(train_loss)
    })


@app.route("/api/visualization")
def get_visualization():
    return jsonify({
        "image_url": "/static/quant_results.png"
    })


# 新增：获取大模型分析结果的接口
@app.route("/api/llm-analysis")
def get_llm_analysis():
    return jsonify({
        "analysis": llm_analysis
    })


if __name__ == "__main__":
    app.run(debug=True)