import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
from collections import deque

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# DRQN-GRU 网络
class DRQN_GRU(nn.Module):
    def __init__(self, state_size, action_size, sequence_length=10, gru_hidden_size=128):
        super(DRQN_GRU, self).__init__()
        self.sequence_length = sequence_length
        self.gru_hidden_size = gru_hidden_size
        self.action_size = action_size
        self.state_size = state_size

        # GRU 层处理时间序列
        self.gru = nn.GRU(input_size=state_size, hidden_size=gru_hidden_size, batch_first=True)
        self.dropout = nn.Dropout(0.1)
        # 全连接层输出 Q 值
        self.fc1 = nn.Linear(gru_hidden_size, 128)
        self.fc2 = nn.Linear(128, action_size)

    def forward(self, x, hidden=None):
        # GRU 前向传播
        gru_out, hidden = self.gru(x, hidden)  # gru_out: [batch_size, sequence_length, gru_hidden_size]
        # 取最后一个时间步的输出
        gru_out = gru_out[:, -1, :]  # [batch_size, gru_hidden_size]
        # 全连接层
        hash_x = torch.relu(self.fc1(gru_out))
        hash_x = self.dropout(hash_x)
        hash_x = self.fc2(hash_x)
        return hash_x, hidden

    def init_hidden(self, batch_size):
        # 初始化 GRU 隐状态
        return torch.zeros(1, batch_size, self.gru_hidden_size).to(device)


# 优先经验回放缓冲区
class PrioritizedSequenceReplayBuffer:
    def __init__(self, capacity, sequence_length=10, alpha=0.6, beta=0.4, beta_increment=0.001):
        self.buffer = deque(maxlen=capacity)
        self.priorities = deque(maxlen=capacity)
        self.sequence_length = sequence_length
        self.alpha = alpha  # 优先级指数
        self.beta = beta  # 重要性采样指数
        self.beta_increment = beta_increment
        self.epsilon = 1e-5  # 防止优先级为0

    def push(self, sequence, priority):
        self.buffer.append(sequence)
        self.priorities.append(priority)

    def sample(self, batch_size):
        priorities = np.array(self.priorities)
        probs = priorities ** self.alpha
        probs /= probs.sum()
        indices = np.random.choice(len(self.buffer), batch_size, p=probs)
        samples = [self.buffer[idx] for idx in indices]
        weights = (len(self.buffer) * probs[indices]) ** (-self.beta)
        weights /= weights.max()  # 归一化
        self.beta = min(1.0, self.beta + self.beta_increment)
        return samples, weights, indices

    def update_priorities(self, indices, priorities):
        for idx, priority in zip(indices, priorities):
            self.priorities[idx] = priority + self.epsilon

    def __len__(self):
        return len(self.buffer)

    def clear(self):
        self.buffer.clear()
        self.priorities.clear()


# DRQN-GRU 代理
class DQNAgent:
    def __init__(self, state_size, action_size, sequence_length=40, gamma=0.99, epsilon=1,
                 epsilon_min=0.01, epsilon_decay=0.999, learning_rate=0.001,
                 batch_size=32, memory_size=80000):
        self.state_size = state_size
        self.action_size = action_size
        self.sequence_length = sequence_length
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_decay
        self.batch_size = batch_size
        self.memory = PrioritizedSequenceReplayBuffer(memory_size, sequence_length)

        # 初始化网络
        self.q_network = DRQN_GRU(state_size, action_size, sequence_length).to(device)
        self.target_network = DRQN_GRU(state_size, action_size, sequence_length).to(device)
        self.target_network.load_state_dict(self.q_network.state_dict())
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=learning_rate)

        # 用于测试时的隐状态
        self.hidden = None

    def reset_epsilon(self):
        self.epsilon = 1.0

    def reset_hidden(self):
        self.hidden = self.q_network.init_hidden(1)

    def take_action(self, state_sequence):
        # state_sequence: [sequence_length, state_size]
        state_tensor = torch.FloatTensor(state_sequence).unsqueeze(0).to(device)
        with torch.no_grad():
            if self.hidden is None:
                self.hidden = self.q_network.init_hidden(1)
            q_values, self.hidden = self.q_network(state_tensor, self.hidden)
            if random.random() < self.epsilon:
                action = random.randrange(self.action_size)
            else:
                action = q_values.argmax().item()
        return action

    def store_experience(self, sequence):
        # 设置初始优先级为最大值或默认值
        if len(self.memory.priorities) > 0:
            max_priority = max(self.memory.priorities)
        else:
            max_priority = 1.0
        self.memory.push(sequence, max_priority)

    def train(self):
        if len(self.memory) < self.batch_size:
            return
        samples, weights, indices = self.memory.sample(self.batch_size)
        states, actions, rewards, next_states, dones = [], [], [], [], []
        for seq in samples:
            state_seq, action_seq, reward_seq, next_state_seq, done_seq = zip(*seq)
            states.append(state_seq)
            actions.append(action_seq)
            rewards.append(reward_seq)
            next_states.append(next_state_seq)
            dones.append(done_seq)
        states = torch.FloatTensor(np.array(states)).to(device)  # [batch_size, seq_len, state_size]
        actions = torch.LongTensor(np.array(actions)).to(device)  # [batch_size, seq_len]
        rewards = torch.FloatTensor(np.array(rewards)).to(device)
        next_states = torch.FloatTensor(np.array(next_states)).to(device)
        dones = torch.FloatTensor(np.array(dones)).to(device)

        # 初始化隐状态
        hidden = self.q_network.init_hidden(self.batch_size)
        target_hidden = self.target_network.init_hidden(self.batch_size)

        # 计算 Q 值
        q_values, _ = self.q_network(states, hidden)
        q_values = q_values.gather(1, actions[:, -1].unsqueeze(1)).squeeze(1)  # 取最后一步的动作

        # 计算目标 Q 值
        with torch.no_grad():
            next_q_values, _ = self.target_network(next_states, target_hidden)
            next_actions = next_q_values.argmax(1)
            target_q_values = rewards[:, -1] + (1 - dones[:, -1]) * self.gamma * \
                              next_q_values.gather(1, next_actions.unsqueeze(1)).squeeze(1)

        # 计算 TD 误差并更新优先级
        td_errors = (q_values - target_q_values).abs().detach().cpu().numpy()
        self.memory.update_priorities(indices, td_errors)

        # 计算损失，加入重要性采样权重
        weights = torch.FloatTensor(weights).to(device)
        loss = (weights * (q_values - target_q_values) ** 2).mean()

        # 反向传播
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)

    def update_target_network(self):
        self.target_network.load_state_dict(self.q_network.state_dict())

    def model_save(self, path):
        try:
            torch.save(self.q_network.state_dict(), f'{path}/drqn_eval.pkl')
            torch.save(self.target_network.state_dict(), f'{path}/drqn_target.pkl')
            print("successfully save")
        except Exception:
            print('save fail')

    def best_model_save(self, path):
        try:
            torch.save(self.q_network.state_dict(), f'{path}/best_drqn_eval.pkl')
            torch.save(self.target_network.state_dict(), f'{path}/best_drqn_target.pkl')
            print("successfully save")
        except Exception:
            print('save fail')

    def model_read(self, path):
        try:
            self.q_network.load_state_dict(torch.load(f'{path}/best_drqn_eval.pkl'))
            self.target_network.load_state_dict(torch.load(f'{path}/best_drqn_target.pkl'))
            print("successfully read")
        except Exception:
            print('read fail')
