import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import random
from collections import deque
import matplotlib.pyplot as plt

import mplfinance as mpf
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec

import matplotlib.pyplot as plt
import pandas as pd


def plot_stock_closing_prices(stock_data_list, stock_names=None,
                              title='股票收盘价走势', xlabel='日期', ylabel='收盘价',
                              figsize=(12, 6), grid=True, legend_loc='upper left',
                              save_path=None, show_plot=True):
    """
    在同一张图中绘制多只股票的收盘价折线图

    参数:
        stock_data_list (list[pd.DataFrame]): 包含多只股票OHLCV数据的DataFrame列表
        stock_names (list[str], optional): 每只股票的名称列表，用于图例
        title (str, optional): 图表标题
        xlabel (str, optional): X轴标签
        ylabel (str, optional): Y轴标签
        figsize (tuple, optional): 图像大小，默认(12, 6)
        grid (bool, optional): 是否显示网格，默认True
        legend_loc (str, optional): 图例位置，默认'upper left'
        save_path (str, optional): 图片保存路径，如果不指定则不保存
        show_plot (bool, optional): 是否显示图像，默认True
    """
    # 检查输入数据
    if not isinstance(stock_data_list, list) or len(stock_data_list) == 0:
        raise ValueError("stock_data_list 必须是非空的DataFrame列表")

    for df in stock_data_list:
        if not isinstance(df, pd.DataFrame):
            raise ValueError("列表中的每个元素必须是pandas DataFrame")
        if 'close' not in df.columns:
            raise ValueError("DataFrame必须包含'close'列")

    # 设置默认股票名称
    if stock_names is None:
        stock_names = [f'股票 {i + 1}' for i in range(len(stock_data_list))]
    elif len(stock_names) != len(stock_data_list):
        raise ValueError("stock_names的长度必须与stock_data_list相同")

    # 创建图形
    plt.figure(figsize=figsize)

    # 绘制每条收盘价曲线
    for df, name in zip(stock_data_list, stock_names):
        plt.plot(df.index, df['close'], label=name)

    # 添加图表元素
    plt.title(title)
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.grid(grid)
    plt.legend(loc=legend_loc)

    # 自动旋转日期标签
    plt.gcf().autofmt_xdate()

    # 保存图像
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        print(f"图像已保存至: {save_path}")

    # 显示图像
    if show_plot:
        plt.show()

    plt.close()


# 修改后的DQNAgent
class DQNAgent:
    def __init__(self, state_size, n_stocks):
        self.state_size = state_size
        self.n_stocks = n_stocks
        self.action_size = 3  # 每只股票有3种动作: 0:卖出, 1:持有, 2:买入
        self.memory = deque(maxlen=2000)
        self.gamma = 0.95  # discount rate
        self.epsilon = 1.0  # exploration rate
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.learning_rate = 0.001
        self.model = self._build_model()
        self.target_model = self._build_model()
        self.update_target_model()

    def _build_model(self):
        # 修改网络结构，输出为每只股票的动作值
        model = nn.Sequential(
            nn.Linear(self.state_size, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
            nn.Linear(64, self.n_stocks * self.action_size)  # 输出为n_stocks * 3
        )
        return model

    def update_target_model(self):
        self.target_model.load_state_dict(self.model.state_dict())

    def remember(self, state, actions, reward, next_state, done):
        self.memory.append((state, actions, reward, next_state, done))

    def act(self, state):
        if np.random.rand() <= self.epsilon:
            return [random.randrange(self.action_size) for _ in range(self.n_stocks)]

        state = torch.FloatTensor(state)
        act_values = self.model(state).view(self.n_stocks, self.action_size)
        return [torch.argmax(act_values[i]).item() for i in range(self.n_stocks)]

    def replay(self, batch_size):
        if len(self.memory) < batch_size:
            return

        minibatch = random.sample(self.memory, batch_size)

        states = torch.FloatTensor([t[0] for t in minibatch])
        actions = torch.LongTensor([t[1] for t in minibatch])  # shape: [batch_size, n_stocks]
        rewards = torch.FloatTensor([t[2] for t in minibatch])
        next_states = torch.FloatTensor([t[3] for t in minibatch])
        dones = torch.FloatTensor([t[4] for t in minibatch])

        # 获取当前Q值
        current_q = self.model(states).view(batch_size, self.n_stocks, self.action_size)
        current_q = current_q.gather(2, actions.unsqueeze(2)).squeeze(2)  # shape: [batch_size, n_stocks]

        # 获取目标Q值
        next_q = self.target_model(next_states).view(batch_size, self.n_stocks, self.action_size)
        next_q = next_q.max(2)[0].detach()  # shape: [batch_size, n_stocks]

        # 计算目标值
        target = rewards.unsqueeze(1) + (1 - dones.unsqueeze(1)) * self.gamma * next_q

        # 计算损失
        loss_fn = nn.MSELoss()
        loss = loss_fn(current_q, target)

        # 优化
        optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay


# 股票交易环境保持不变
class StockTradingEnv:
    def __init__(self, data, initial_balance=10000, transaction_cost=0.001):
        self.data = data
        self.stock_codes = list(data.keys())
        self.n_stocks = len(self.stock_codes)
        self.initial_balance = initial_balance
        self.transaction_cost = transaction_cost
        self.reset()

    def reset(self):
        self.balance = self.initial_balance
        self.portfolio = {code: 0 for code in self.stock_codes}
        self.current_step = 0
        self.done = False
        self.portfolio_value = [self.initial_balance]
        return self._get_state()

    def _get_state(self):
        state = []
        current_prices = []

        for code in self.stock_codes:
            stock_data = self.data[code]
            if self.current_step >= len(stock_data):
                price = 0
            else:
                price = stock_data.iloc[self.current_step]['close']
                current_prices.append(price)

            state.extend([
                price,
                self.portfolio[code],
                self.balance
            ])

        for code in self.stock_codes:
            stock_data = self.data[code]
            if self.current_step >= len(stock_data):
                ma5, ma20, rsi = 0, 0, 0
            else:
                window = min(20, self.current_step)
                closes = stock_data.iloc[max(0, self.current_step - window):self.current_step + 1]['close']
                ma5 = closes[-min(5, len(closes)):].mean() if len(closes) >= 5 else 0
                ma20 = closes.mean() if len(closes) >= 20 else 0

                if len(closes) > 1:
                    deltas = np.diff(closes)
                    up = deltas[deltas > 0].mean() if len(deltas[deltas > 0]) > 0 else 0
                    down = -deltas[deltas < 0].mean() if len(deltas[deltas < 0]) > 0 else 0
                    rsi = 100 - (100 / (1 + up / down)) if down != 0 else 100
                else:
                    rsi = 50

            state.extend([ma5, ma20, rsi])

        return np.array(state)

    def step(self, actions):
        if self.done:
            return self._get_state(), 0, True, {}

        current_prices = []
        for code in self.stock_codes:
            stock_data = self.data[code]
            if self.current_step < len(stock_data):
                current_prices.append(stock_data.iloc[self.current_step]['close'])
            else:
                current_prices.append(0)

        total_sell_value = 0
        for i, code in enumerate(self.stock_codes):
            if actions[i] == 0 and self.portfolio[code] > 0:
                sell_value = self.portfolio[code] * current_prices[i] * (1 - self.transaction_cost)
                self.balance += sell_value
                total_sell_value += sell_value
                self.portfolio[code] = 0

        total_buy_value = 0
        buy_candidates = [i for i, a in enumerate(actions) if a == 2]
        if buy_candidates:
            amount_per_stock = self.balance / len(buy_candidates)

            for i in buy_candidates:
                code = self.stock_codes[i]
                if current_prices[i] > 0:
                    max_buy = amount_per_stock / current_prices[i]
                    buy_amount = max_buy * (1 - self.transaction_cost)
                    cost = buy_amount * current_prices[i]
                    self.balance -= cost
                    self.portfolio[code] += buy_amount
                    total_buy_value += cost

        portfolio_value = self.balance
        for i, code in enumerate(self.stock_codes):
            if self.portfolio[code] > 0 and current_prices[i] > 0:
                portfolio_value += self.portfolio[code] * current_prices[i]

        reward = portfolio_value - self.portfolio_value[-1]
        self.portfolio_value.append(portfolio_value)

        self.current_step += 1
        if all(self.current_step >= len(self.data[code]) for code in self.stock_codes):
            self.done = True

        trade_threshold = 0.02
        if abs(reward / self.portfolio_value[-2]) < trade_threshold:
            reward = 0

        return self._get_state(), reward, self.done, {'portfolio_value': portfolio_value}


# 数据生成函数保持不变
def generate_stock_data(code, start_date='2020-01-01', n_days=150, trend=0.0005, volatility=0.02, start_price=50):
    daily_returns = np.random.normal(trend, volatility, n_days)
    price = start_price * np.cumprod(1 + daily_returns)

    dates = pd.date_range(start=start_date, periods=n_days)
    df = pd.DataFrame(index=dates)
    df['open'] = price
    df['high'] = price * (1 + np.random.uniform(0, 0.015, n_days))
    df['low'] = price * (1 - np.random.uniform(0, 0.015, n_days))
    df['close'] = price
    df['volume'] = np.random.randint(10000, 500000, n_days)
    df['code'] = code

    for i in range(len(df)):
        if df.index[i].weekday() == 4:
            df.at[df.index[i], 'close'] *= 1 + np.random.uniform(0, 0.01)
        if df.index[i].day <= 5:
            df.at[df.index[i], 'close'] *= 1 + np.random.uniform(0, 0.005)

    df['high'] = df[['open', 'high', 'low', 'close']].max(axis=1)
    df['low'] = df[['open', 'high', 'low', 'close']].min(axis=1)

    shock_days = np.random.choice(n_days, size=int(n_days * 0.01), replace=False)
    df.loc[df.index[shock_days], 'close'] *= np.random.choice([0.9, 1.1], size=len(shock_days))
    print(df)
    return df.reset_index().rename(columns={'index': 'date'})


def generate_multiple_stocks():
    stocks = []
    stocks.append(generate_stock_data('STABLE', trend=0.0008, volatility=0.015))
    stocks.append(generate_stock_data('GROWTH', trend=0.001, volatility=0.04))
    stocks.append(generate_stock_data('DECLINE', trend=-0.0005, volatility=0.018))
    stocks.append(generate_stock_data('VOLATILE', trend=0.0001, volatility=0.03))

    cyclical = generate_stock_data('CYCLICAL', trend=0.0002, volatility=0.025)
    for i in range(len(cyclical)):
        if cyclical.iloc[i]['date'].month % 3 == 0 and cyclical.iloc[i]['date'].day > 25:
            cyclical.at[i, 'close'] *= 1 + np.random.uniform(0, 0.02)
    stocks.append(cyclical)
    return stocks


def prepare_stock_data(stock_data_list):
    processed_data = {}
    for stock_data in stock_data_list:
        code = stock_data['code'].iloc[0]
        df = stock_data.copy()
        df['date'] = pd.to_datetime(df['date'])
        df = df.sort_values('date')
        df = df.dropna()
        processed_data[code] = df
    return processed_data


# 修改后的训练函数
def train_agent(env, agent, episodes=1000, batch_size=32, window_size=10):
    portfolio_values = []

    for e in range(episodes):
        state = env.reset()
        state = np.reshape(state, [1, agent.state_size])
        total_reward = 0

        for time in range(window_size, len(env.data[env.stock_codes[0]]) - 1):
            actions = agent.act(state[0])

            next_state, reward, done, info = env.step(actions)
            next_state = np.reshape(next_state, [1, agent.state_size])

            agent.remember(state[0], actions, reward, next_state[0], done)

            total_reward += reward
            state = next_state

            if done:
                print(
                    f"episode: {e}/{episodes}, portfolio value: {info['portfolio_value']:.2f}, reward: {total_reward:.2f}")
                break

            if len(agent.memory) > batch_size:
                agent.replay(batch_size)

        portfolio_values.append(info['portfolio_value'])

        if e % 10 == 0:
            agent.update_target_model()

    plt.plot(portfolio_values)
    plt.title('Portfolio Value Over Episodes')
    plt.xlabel('Episode')
    plt.ylabel('Portfolio Value')
    plt.show()

    return agent


# 主函数
def main():
    # 生成模拟数据
    stocks_data = generate_multiple_stocks()
    # 使用函数绘制K线图
    # 使用函数绘制收盘价曲线
    plot_stock_closing_prices(
        stock_data_list=stocks_data,
        stock_names=["A","B","C","D","E"],  # 使用DataFrame的name属性
        title='Price Comparsion',
        xlabel='Date',
        ylabel='Close',
        figsize=(12, 6),
        grid=True,
        legend_loc='upper left',
        save_path='stock_closing_prices.png',
        show_plot=True
    )
    # 预处理数据
    processed_data = prepare_stock_data(stocks_data)

    # 创建环境和agent
    env = StockTradingEnv(processed_data)
    state_size = len(env._get_state())

    agent = DQNAgent(state_size, env.n_stocks)

    # 训练agent
    print("开始训练...")
    trained_agent = train_agent(env, agent, episodes=100)

    # 测试训练好的agent
    print("\n开始测试...")
    test_env = StockTradingEnv(processed_data)
    state = test_env.reset()
    state = np.reshape(state, [1, state_size])
    done = False
    portfolio_values = []
    actions_history = {code: [] for code in test_env.stock_codes}

    while not done:
        actions = trained_agent.act(state[0])

        for i, code in enumerate(test_env.stock_codes):
            actions_history[code].append(actions[i])

        next_state, reward, done, info = test_env.step(actions)
        next_state = np.reshape(next_state, [1, state_size])
        state = next_state
        portfolio_values.append(info['portfolio_value'])

        if done:
            print(f"最终投资组合价值: {info['portfolio_value']:.2f}")
            print(f"初始资金: {test_env.initial_balance:.2f}")
            print(f"收益率: {(info['portfolio_value'] / test_env.initial_balance - 1) * 100:.2f}%")
            break

    # 绘制结果
    plt.figure(figsize=(12, 6))
    plt.plot(portfolio_values)
    plt.title('Portfolio Value During Testing')
    plt.xlabel('Day')
    plt.ylabel('Portfolio Value')
    plt.grid(True)
    plt.show()

    plt.figure(figsize=(12, 8))
    for i, code in enumerate(test_env.stock_codes):
        plt.subplot(len(test_env.stock_codes), 1, i + 1)
        plt.plot(actions_history[code], label=f'{code} Actions')
        plt.title(f'{code} - Trading Actions Over Time')
        plt.ylabel('Action')
        plt.legend()
    plt.tight_layout()
    plt.show()

    print("\n最终持仓情况:")
    total_value = test_env.balance
    for code in test_env.stock_codes:
        stock_data = test_env.data[code]
        if test_env.current_step < len(stock_data):
            price = stock_data.iloc[test_env.current_step]['close']
        else:
            price = 0
        value = test_env.portfolio[code] * price
        total_value += value
        print(f"{code}: 持仓量 {test_env.portfolio[code]:.2f}, 当前价格 {price:.2f}, 价值 {value:.2f}")
    print(f"现金余额: {test_env.balance:.2f}")
    print(f"总投资组合价值: {total_value:.2f}")


if __name__ == "__main__":
    main()