import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from collections import deque
import random

class DQN(nn.Module):
    def __init__(self, state_size, action_size):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(state_size, 64)  # 输入层到第一隐藏层
        self.fc2 = nn.Linear(64, 64)          # 第一隐藏层到第二隐藏层
        self.fc3 = nn.Linear(64, action_size) # 第二隐藏层到输出层
        
    def forward(self, x):
        x = F.relu(self.fc1(x))  # 第一层使用ReLU激活
        x = F.relu(self.fc2(x))  # 第二层使用ReLU激活
        return self.fc3(x)       # 输出层不使用激活函数(直接输出Q值)

class ProcessMiningDQNAgent:
    def __init__(self, state_size, action_size):
        self.state_size = state_size  # 状态空间大小
        self.action_size = action_size  # 动作空间大小
        self.memory = deque(maxlen=10000)  # 经验回放缓冲区(最大10000条)
        self.gamma = 0.95    # 折扣因子(未来奖励的衰减系数)
        self.epsilon = 1.0   # 初始探索率(1.0表示完全随机探索)
        self.epsilon_min = 0.01  # 最小探索率
        self.epsilon_decay = 0.995  # 探索率衰减系数
        self.learning_rate = 0.001  # 学习率
        self.model = DQN(state_size, action_size)  # 创建DQN网络
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)  # 使用Adam优化器
        
    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))  # 存储经验元组
        
    def act(self, state, available_actions):
        if np.random.rand() <= self.epsilon:  # 以epsilon概率随机探索
            return random.choice(available_actions)  # 从可用动作中随机选择
    
        state = torch.FloatTensor(state)  # 将状态转换为张量
        act_values = self.model(state)  # 通过网络获取各动作的Q值
        
        # 只考虑可用动作
        available_indices = [i for i in range(self.action_size) if i in available_actions]
        best_action = available_indices[0]  # 初始化最佳动作为第一个可用动作
        max_q = act_values[best_action].item()  # 初始化最大Q值
        
        # 遍历所有可用动作，找出Q值最大的动作
        for i in available_indices[1:]:
            if act_values[i].item() > max_q:
                max_q = act_values[i].item()
                best_action = i
            
        return best_action  # 返回最佳动作
    
    def save_model(self, path: str) -> None:
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'epsilon': self.epsilon
        }, path)

    def load_model(self, path: str) -> None:
        checkpoint = torch.load(path)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.epsilon = checkpoint['epsilon']
    
    def replay(self, batch_size):
        if len(self.memory) < batch_size:  # 如果记忆不足一个批次，直接返回
            return
        minibatch = random.sample(self.memory, batch_size)  # 随机采样一个批次的经验
        
        # 将经验转换为张量
        states = torch.FloatTensor(np.array([t[0] for t in minibatch]))
        actions = torch.LongTensor(np.array([t[1] for t in minibatch]))
        rewards = torch.FloatTensor(np.array([t[2] for t in minibatch]))
        next_states = torch.FloatTensor(np.array([t[3] for t in minibatch]))
        dones = torch.FloatTensor(np.array([t[4] for t in minibatch]))
        
        # 计算当前Q值和目标Q值
        current_q = self.model(states).gather(1, actions.unsqueeze(1))
        next_q = self.model(next_states).max(1)[0].detach()
        target = rewards + (1 - dones) * self.gamma * next_q
        
        # 计算损失并更新网络
        loss = F.mse_loss(current_q.squeeze(), target)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        
        # 衰减探索率
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay
    
    def train_from_event_log(self, event_log, batch_size=32, episodes=1000):
        for e in range(episodes):  # 训练指定轮数
            case_ids = event_log['case_id'].unique()  # 获取所有案例ID
            selected_case = np.random.choice(case_ids)  # 随机选择一个案例
            case_events = event_log[event_log['case_id'] == selected_case].sort_values('timestamp')  # 按时间排序
            
            state = self._initialize_state()  # 初始化状态
            total_reward = 0  # 累计奖励
            done = False  # 是否结束
            
            # 遍历案例中的事件序列
            for i in range(len(case_events)-1):
                current_activity = case_events.iloc[i]['activity']
                next_activity = case_events.iloc[i+1]['activity']
                
                available_actions = self._get_available_actions(current_activity)
                action = self.act(state, available_actions)  # 选择动作
                
                reward = self._calculate_reward(current_activity, next_activity)  # 计算奖励
                next_state = self._update_state(state, next_activity)  # 更新状态
                
                self.remember(state, action, reward, next_state, done)  # 存储经验
                state = next_state
                total_reward += reward
                
                self.replay(batch_size)  # 经验回放学习
            
            # 案例结束时处理
            done = True
            final_reward = self._calculate_final_reward(case_events)
            self.remember(state, None, final_reward, None, done)
            
            # 每100轮打印一次训练信息
            if e % 100 == 0:
                print(f"Episode: {e}, Total Reward: {total_reward + final_reward}, Epsilon: {self.epsilon}")