import numpy as np
import pandas as pd
from collections import defaultdict
import random

class ProcessMiningRLAgent:
    def __init__(self, activities, alpha=0.1, gamma=0.9, epsilon=0.1):
        """
        初始化强化学习代理
        
        参数:
            activities: 所有可能的活动集合
            alpha: 学习率 (0,1]
            gamma: 折扣因子 [0,1]
            epsilon: 探索概率 [0,1]
        """
        self.activities = activities
        self.alpha = alpha
        self.gamma = gamma
        self.epsilon = epsilon
        self.q_table = defaultdict(lambda: np.zeros(len(activities)))
        
        # 用于跟踪状态的活动到索引的映射
        self.activity_to_idx = {act: idx for idx, act in enumerate(activities)}
        
    def state_to_key(self, state):
        """将状态转换为可用于Q表的键"""
        return tuple(state)
    
    def choose_action(self, state, available_actions):
        """
        根据当前状态和可用动作选择动作
        
        参数:
            state: 当前状态
            available_actions: 当前可用的动作列表
            
        返回:
            选择的动作
        """
        if random.uniform(0, 1) < self.epsilon:
            # 探索: 随机选择可用动作
            return random.choice(available_actions)
        else:
            # 利用: 选择Q值最高的动作
            state_key = self.state_to_key(state)
            available_indices = [self.activity_to_idx[act] for act in available_actions]
            
            # 从可用动作中选择Q值最大的
            q_values = self.q_table[state_key]
            max_q = -float('inf')
            best_action = available_actions[0]  # 默认选择第一个
            
            for act in available_actions:
                act_idx = self.activity_to_idx[act]
                if q_values[act_idx] > max_q:
                    max_q = q_values[act_idx]
                    best_action = act
                    
            return best_action
    
    def learn(self, state, action, reward, next_state, done):
        """
        更新Q表
        
        参数:
            state: 当前状态
            action: 采取的动作
            reward: 获得的奖励
            next_state: 下一个状态
            done: 是否结束
        """
        state_key = self.state_to_key(state)
        next_state_key = self.state_to_key(next_state)
        action_idx = self.activity_to_idx[action]
        
        current_q = self.q_table[state_key][action_idx]
        
        if done:
            max_next_q = 0
        else:
            max_next_q = np.max(self.q_table[next_state_key])
            
        # Q学习更新公式
        new_q = current_q + self.alpha * (reward + self.gamma * max_next_q - current_q)
        self.q_table[state_key][action_idx] = new_q
    
    def update_from_event_log(self, event_log, num_episodes=1000):
        """
        从事件日志中学习
        
        参数:
            event_log: 事件日志，DataFrame格式，包含case_id, activity, timestamp等列
            num_episodes: 训练轮数
        """
        for episode in range(num_episodes):
            # 随机选择一个案例进行训练
            case_ids = event_log['case_id'].unique()
            selected_case = np.random.choice(case_ids)
            case_events = event_log[event_log['case_id'] == selected_case].sort_values('timestamp')
            
            state = self._initialize_state()
            done = False
            total_reward = 0
            
            for i in range(len(case_events)-1):
                current_activity = case_events.iloc[i]['activity']
                next_activity = case_events.iloc[i+1]['activity']
                
                # 获取可用动作 (假设所有活动都可用，实际中应根据流程模型)
                available_actions = self._get_available_actions(current_activity)
                
                # 选择动作 (这里使用真实的下一个活动作为动作)
                action = next_activity
                
                # 计算奖励
                reward = self._calculate_reward(current_activity, next_activity)
                
                # 更新状态
                next_state = self._update_state(state, next_activity)
                
                # 学习
                self.learn(state, action, reward, next_state, done)
                
                state = next_state
                total_reward += reward
            
            # 处理结束状态
            done = True
            final_reward = self._calculate_final_reward(case_events)
            self.learn(state, None, final_reward, None, done)
            
            if episode % 100 == 0:
                print(f"Episode {episode}, Total Reward: {total_reward + final_reward}")
    
    def _initialize_state(self):
        """初始化状态"""
        # 简化的状态表示: (上一个活动, 当前活动, 活动计数)
        return ('start', 'start', 0)
    
    def _update_state(self, state, next_activity):
        """更新状态"""
        last_activity, current_activity, count = state
        return (current_activity, next_activity, count + 1)
    
    def _get_available_actions(self, current_activity):
        """获取可用动作 (简化版)"""
        # 实际应用中应根据流程模型返回可能的后续活动
        return self.activities
    
    def _calculate_reward(self, current_activity, next_activity):
        """计算即时奖励 (简化版)"""
        # 实际应用中应根据业务目标设计
        return -1  # 鼓励减少步骤
    
    def _calculate_final_reward(self, case_events):
        """计算最终奖励 (简化版)"""
        # 实际应用中应根据案例结果计算
        return 100  # 成功完成案例的奖励
    
    def get_optimal_policy(self):
        """获取最优策略"""
        policy = {}
        for state in self.q_table:
            policy[state] = self.activities[np.argmax(self.q_table[state])]
        return policy