import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import seaborn as sns
from collections import defaultdict, Counter
import random
from datetime import datetime, timedelta
from typing import Dict, List, Tuple, Set, Optional, Any
import pickle
import os


class ProcessMiningEnvironment:
    """
    流程挖掘环境，基于真实业务流程事件日志构建
    专注于采购到付款流程，包含多维度的状态和奖励
    """
    def __init__(self, event_log: pd.DataFrame, 
                 end_activities: List[str], 
                 max_steps: int = 30,
                 compliance_rules: Dict[str, Any] = None,
                 time_weight: float = 0.5, 
                 cost_weight: float = 0.3, 
                 compliance_weight: float = 0.2):
        """
        初始化流程环境
        
        参数:
            event_log: 包含流程事件的DataFrame
            end_activities: 结束活动列表
            max_steps: 每个案例的最大步骤数
            compliance_rules: 合规规则字典
            time_weight: 时间在奖励函数中的权重
            cost_weight: 成本在奖励函数中的权重
            compliance_weight: 合规性在奖励函数中的权重
        """
        # 数据预处理
        self.log = event_log.copy()
        self.log.sort_values(by=['case_id', 'timestamp'], inplace=True)
        self.log['duration'] = self._calculate_activity_durations()
        
        # 基本属性设置
        self.activities = sorted(list(self.log['activity'].unique()))
        self.activity_to_id = {act: i for i, act in enumerate(self.activities)}
        self.id_to_activity = {i: act for act, i in self.activity_to_id.items()}
        self.num_activities = len(self.activities)
        
        # 环境配置
        self.end_activities = end_activities
        self.end_activity_ids = [self.activity_to_id[act] for act in end_activities if act in self.activity_to_id]
        if not self.end_activity_ids:
            raise ValueError(f"No valid end activities found among {end_activities}")
        self.max_steps = max_steps
        
        # 流程属性
        self.transitions = self._build_transition_model()
        self.transition_times = self._calculate_transition_times()
        self.transition_costs = self._calculate_transition_costs()
        self.start_activities = self._find_start_activities()
        
        # 合规规则
        self.compliance_rules = compliance_rules or {}
        
        # 奖励权重
        self.time_weight = time_weight
        self.cost_weight = cost_weight
        self.compliance_weight = compliance_weight
        
        # 重置状态变量
        self.current_step = 0
        self.current_activity_id = -1
        self.current_case_attrs = {}
        self.current_path = []
        
        # 用于统计和分析的指标
        self.episode_stats = {
            'path_lengths': [],
            'total_rewards': [],
            'completion_rate': 0,
            'avg_time': 0,
            'avg_cost': 0,
            'compliance_rate': 0,
        }
    
    def _calculate_activity_durations(self) -> pd.Series:
        """计算每个活动的持续时间"""
        # 分组计算每个活动实例的持续时间
        durations = pd.Series(dtype='timedelta64[ns]')
        
        # 为每个案例单独计算
        for case_id, group in self.log.groupby('case_id'):
            # 排序确保按时间顺序
            events = group.sort_values('timestamp')
            
            # 计算当前活动与下一个活动之间的时间差
            start_times = events['timestamp'].iloc[:-1].reset_index(drop=True)
            end_times = events['timestamp'].iloc[1:].reset_index(drop=True)
            
            # 添加到相应的活动
            activity_durations = end_times - start_times
            act_indices = events.index[:-1]
            
            # 更新持续时间序列
            for idx, duration in zip(act_indices, activity_durations):
                durations.at[idx] = duration
                
        # 对于缺失持续时间的活动（如最后一个活动），使用该活动类型的平均持续时间
        avg_durations = self.log.groupby('activity')['duration'].mean().fillna(pd.Timedelta(seconds=60))
        durations = durations.fillna(pd.Timedelta(seconds=60))
        
        return durations

    def _build_transition_model(self) -> Dict[int, Dict[int, float]]:
        """构建流程转移模型，包括转移概率"""
        transitions = defaultdict(lambda: defaultdict(int))
        
        # 计算每个案例内的活动转移
        for case_id, group in self.log.groupby('case_id'):
            activities = group['activity'].tolist()
            for i in range(len(activities) - 1):
                from_act = self.activity_to_id[activities[i]]
                to_act = self.activity_to_id[activities[i+1]]
                transitions[from_act][to_act] += 1
        
        # 转换为概率
        for from_act, to_acts in transitions.items():
            total = sum(to_acts.values())
            for to_act in to_acts:
                transitions[from_act][to_act] = to_acts[to_act] / total
                
        return transitions

    def _calculate_transition_times(self) -> Dict[Tuple[int, int], float]:
        """计算活动间转移的平均时间"""
        transition_times = defaultdict(list)
        
        for case_id, group in self.log.groupby('case_id'):
            events = group.sort_values('timestamp')
            activities = events['activity'].tolist()
            timestamps = events['timestamp'].tolist()
            
            for i in range(len(activities) - 1):
                from_act = self.activity_to_id[activities[i]]
                to_act = self.activity_to_id[activities[i+1]]
                time_diff = (timestamps[i+1] - timestamps[i]).total_seconds() / 3600  # 小时为单位
                transition_times[(from_act, to_act)].append(time_diff)
        
        # 计算平均时间
        avg_times = {}
        for trans, times in transition_times.items():
            avg_times[trans] = sum(times) / len(times) if times else 24  # 默认1天
            
        return avg_times

    def _calculate_transition_costs(self) -> Dict[Tuple[int, int], float]:
        """计算活动转移的平均成本"""
        # 在实际业务中，这可能来自ERP系统或成本中心数据
        # 这里我们使用模拟数据，假设成本与时间相关
        transition_costs = {}
        
        # 基础成本等于时间的线性函数加上随机因子
        for trans, time in self.transition_times.items():
            from_act, to_act = trans
            
            # 复杂审批活动成本更高
            complexity_factor = 1.0
            if any(keyword in self.id_to_activity[to_act].lower() for keyword in ['approval', 'review', 'check']):
                complexity_factor = 2.0
                
            # 基础成本计算
            base_cost = time * 50  # 假设每小时人力成本50单位
            
            # 添加随机因子以模拟现实世界的变异性
            random_factor = np.random.uniform(0.8, 1.2)
            
            transition_costs[trans] = base_cost * complexity_factor * random_factor
            
        return transition_costs

    def _find_start_activities(self) -> List[int]:
        """识别常见的流程起始活动"""
        start_counts = Counter(self.log.groupby('case_id')['activity'].first())
        
        # 将起始活动数量映射转换为概率分布
        total_cases = len(self.log['case_id'].unique())
        start_probs = {self.activity_to_id[act]: count/total_cases 
                      for act, count in start_counts.items()}
        
        # 筛选出出现频率超过5%的起始活动
        min_prob = 0.05
        start_acts = [act_id for act_id, prob in start_probs.items() 
                     if prob >= min_prob]
        
        # 如果没有满足条件的起始活动，则使用最常见的一个
        if not start_acts and start_counts:
            most_common_act = start_counts.most_common(1)[0][0]
            start_acts = [self.activity_to_id[most_common_act]]
            
        return start_acts

    def reset(self, start_activity_id: int = None) -> int:
        """
        重置环境到初始状态
        可选择指定起始活动，否则随机选择一个常见起始活动
        """
        self.current_step = 0
        
        # 如果指定了起始活动，使用它；否则随机选择
        if start_activity_id is not None and start_activity_id in self.activity_to_id.values():
            self.current_activity_id = start_activity_id
        else:
            # 使用概率采样
            start_counts = Counter(self.log.groupby('case_id')['activity'].first())
            total = sum(start_counts.values())
            probs = [start_counts.get(act, 0)/total for act in self.activities]
            self.current_activity_id = np.random.choice(range(self.num_activities), p=probs)
        
        # 重置路径历史和案例属性
        self.current_path = [self.current_activity_id]
        self.current_case_attrs = self._generate_case_attributes()
        
        return self.current_activity_id

    def _generate_case_attributes(self) -> Dict[str, Any]:
        """生成模拟的案例属性，用于更丰富的状态表示"""
        return {
            'priority': np.random.choice(['low', 'medium', 'high']),
            'value': np.random.uniform(100, 10000),
            'department': np.random.choice(['IT', 'HR', 'Finance', 'Operations', 'Sales']),
            'requester_level': np.random.choice(['staff', 'manager', 'director', 'executive'])
        }

    def get_state(self) -> Dict[str, Any]:
        """获取当前状态的丰富表示"""
        # 创建活动的one-hot编码
        activity_one_hot = np.zeros(self.num_activities)
        activity_one_hot[self.current_activity_id] = 1
        
        # 构造状态字典
        state = {
            'activity_id': self.current_activity_id,
            'activity_name': self.id_to_activity[self.current_activity_id],
            'activity_one_hot': activity_one_hot,
            'step': self.current_step,
            'case_attributes': self.current_case_attrs,
            'path_history': self.current_path.copy()
        }
        
        return state

    def get_valid_actions(self, state_id: int) -> List[int]:
        """获取当前状态下的有效后继活动"""
        return list(self.transitions.get(state_id, {}).keys())

    def is_compliant(self, from_activity: int, to_activity: int) -> Tuple[bool, float, str]:
        """
        检查转移是否符合合规规则
        返回: (合规状态, 惩罚值, 违规原因)
        """
        from_name = self.id_to_activity[from_activity]
        to_name = self.id_to_activity[to_activity]
        
        # 检查是否符合必须经过的流程节点
        if 'required_sequences' in self.compliance_rules:
            for seq in self.compliance_rules['required_sequences']:
                # 检查当前转移是否打破了必须的序列
                if seq[0] in [self.id_to_activity[a] for a in self.current_path] and \
                   seq[1] not in [self.id_to_activity[a] for a in self.current_path] and \
                   to_name not in [seq[1]] + [self.id_to_activity[a] for a in self.current_path]:
                    return False, -50, f"必须先完成{seq[1]}才能继续"
                    
        # 检查禁止的活动序列
        if 'forbidden_sequences' in self.compliance_rules:
            for seq in self.compliance_rules['forbidden_sequences']:
                if from_name == seq[0] and to_name == seq[1]:
                    return False, -100, f"禁止的转移: {from_name} -> {to_name}"
        
        # 检查基于案例属性的规则
        if 'attribute_rules' in self.compliance_rules:
            for rule in self.compliance_rules['attribute_rules']:
                attr = rule['attribute']
                condition = rule['condition']
                required_act = rule['required_activity']
                
                if attr in self.current_case_attrs:
                    value = self.current_case_attrs[attr]
                    
                    # 评估条件
                    condition_met = False
                    if condition['op'] == '>' and value > condition['value']:
                        condition_met = True
                    elif condition['op'] == '<' and value < condition['value']:
                        condition_met = True
                    elif condition['op'] == '==' and value == condition['value']:
                        condition_met = True
                    
                    # 如果条件满足但缺少必要活动
                    if condition_met and required_act not in [self.id_to_activity[a] for a in self.current_path] and \
                       to_name not in [required_act]:
                        return False, -75, f"案例{attr}={value}需要活动{required_act}"
        
        # 所有规则都通过
        return True, 10, "合规"

    def step(self, action_id: int) -> Tuple[Dict[str, Any], float, bool, Dict]:
        """
        执行一个活动转移
        
        参数:
            action_id: 下一个活动的ID
            
        返回:
            state: 新的状态表示
            reward: 奖励值
            done: 是否结束
            info: 附加信息
        """
        if self.current_activity_id == -1:
            raise Exception("环境必须先被重置才能执行步骤")
            
        valid_actions = self.get_valid_actions(self.current_activity_id)
        
        # 处理无效活动
        if action_id not in valid_actions:
            # 惩罚无效转移
            reward = -50.0
            done = False
            info = {
                'error': f"无效转移: {self.id_to_activity[self.current_activity_id]} -> {self.id_to_activity[action_id]}",
                'valid_actions': [self.id_to_activity[a] for a in valid_actions]
            }
            return self.get_state(), reward, done, info
            
        # 记录之前的状态用于计算奖励
        prev_activity_id = self.current_activity_id
            
        # 执行转移
        self.current_activity_id = action_id
        self.current_path.append(action_id)
        self.current_step += 1
            
        # 计算多维奖励
        reward, reward_info = self._calculate_reward(prev_activity_id, action_id)
            
        # 检查是否完成
        done = False
        if action_id in self.end_activity_ids:
            done = True
            reward += 100.0  # 额外完成奖励
            self.episode_stats['path_lengths'].append(len(self.current_path))
            self.episode_stats['completion_rate'] = (sum(1 for p in self.episode_stats['path_lengths'] if p > 0) / 
                                                   max(1, len(self.episode_stats['path_lengths'])))
        elif self.current_step >= self.max_steps:
            done = True
            reward -= 75.0  # 超时惩罚
            
        # 记录累积奖励
        if done:
            self.episode_stats['total_rewards'].append(reward)
            
        # 准备信息字典
        info = {
            'reward_breakdown': reward_info,
            'current_path': [self.id_to_activity[a] for a in self.current_path],
            'step': self.current_step,
            'is_end_activity': action_id in self.end_activity_ids
        }
            
        # 如果完成，重置内部状态
        if done:
            self.current_activity_id = -1
            
        return self.get_state(), reward, done, info

    def _calculate_reward(self, from_activity: int, to_activity: int) -> Tuple[float, Dict]:
        """
        计算多维奖励
        考虑时间、成本和合规性
        
        返回总奖励和详细的奖励分解
        """
        transition = (from_activity, to_activity)
        
        # 1. 时间奖励 - 负值，转移时间越短越好
        time_penalty = -self.transition_times.get(transition, 24)  # 默认24小时
        time_reward = time_penalty * self.time_weight
        
        # 2. 成本奖励 - 负值，成本越低越好
        cost_penalty = -self.transition_costs.get(transition, 1000)  # 默认1000单位
        cost_reward = cost_penalty * self.cost_weight / 1000  # 缩放以保持与其他奖励相当
        
        # 3. 合规性奖励
        is_compliant, compliance_reward_value, reason = self.is_compliant(from_activity, to_activity)
        compliance_reward = compliance_reward_value * self.compliance_weight
        
        # 汇总奖励
        total_reward = time_reward + cost_reward + compliance_reward
        
        # 步骤惩罚 - 鼓励更短的路径
        step_penalty = -1.0
        total_reward += step_penalty
        
        # 构建奖励信息
        reward_info = {
            'time_reward': time_reward,
            'cost_reward': cost_reward,
            'compliance_reward': compliance_reward,
            'step_penalty': step_penalty,
            'total_reward': total_reward,
            'compliance_reason': reason,
            'is_compliant': is_compliant
        }
        
        return total_reward, reward_info
    
    def visualize_process(self, highlight_path: List[int] = None) -> None:
        """
        可视化流程图，可选择高亮特定路径
        """
        G = nx.DiGraph()
        
        # 添加节点
        for act_id, act_name in self.id_to_activity.items():
            G.add_node(act_id, label=act_name)
        
        # 添加边
        for from_act, to_dict in self.transitions.items():
            for to_act, prob in to_dict.items():
                # 获取转移时间和成本（如果有）
                trans_time = self.transition_times.get((from_act, to_act), 0)
                trans_cost = self.transition_costs.get((from_act, to_act), 0)
                
                G.add_edge(from_act, to_act, 
                           weight=prob, 
                           time=f"{trans_time:.1f}h",
                           cost=f"${trans_cost:.0f}",
                           label=f"{prob:.2f}\n{trans_time:.1f}h\n${trans_cost:.0f}")
        
        # 设置节点颜色
        node_colors = []
        for node in G.nodes():
            if node in self.start_activities:
                color = 'lightgreen'  # 起始活动
            elif node in self.end_activity_ids:
                color = 'lightcoral'  # 结束活动
            else:
                color = 'lightblue'  # 普通活动
            node_colors.append(color)
            
        # 设置边的颜色和宽度
        edge_colors = []
        edge_widths = []
        
        for u, v in G.edges():
            if highlight_path and u in highlight_path and v in highlight_path:
                idx_u = highlight_path.index(u)
                if idx_u < len(highlight_path) - 1 and highlight_path[idx_u + 1] == v:
                    edge_colors.append('red')
                    edge_widths.append(2.5)
                    continue
            
            # 默认边样式
            weight = G.edges[u, v]['weight']
            edge_colors.append('gray')
            edge_widths.append(0.5 + weight * 2)
            
        # 创建绘图
        plt.figure(figsize=(14, 10))
        pos = nx.spring_layout(G, k=0.3, seed=42)
        
        # 绘制节点
        nx.draw_networkx_nodes(G, pos, node_size=700, node_color=node_colors, alpha=0.8)
        
        # 绘制边
        nx.draw_networkx_edges(G, pos, width=edge_widths, edge_color=edge_colors, 
                              arrowsize=15, alpha=0.7)
        
        # 添加标签
        nx.draw_networkx_labels(G, pos, labels={n: G.nodes[n]['label'] for n in G.nodes()})
        
        # 添加边标签（概率、时间、成本）
        if len(G) < 15:  # 只在节点数较少时显示详细边标签
            edge_labels = {(u, v): G.edges[u, v]['label'] for u, v in G.edges()}
            nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8)
        
        plt.title("流程图" + (" (最佳路径高亮)" if highlight_path else ""))
        plt.axis('off')
        plt.tight_layout()
        plt.show()
        
    def analyze_best_path(self, path: List[int]) -> Dict[str, Any]:
        """分析路径的性能指标"""
        if not path:
            return {"error": "空路径"}
            
        # 计算路径的关键指标
        total_time = 0
        total_cost = 0
        non_compliant_steps = 0
        
        for i in range(len(path) - 1):
            from_act = path[i]
            to_act = path[i+1]
            trans = (from_act, to_act)
            
            # 累计时间和成本
            total_time += self.transition_times.get(trans, 24)
            total_cost += self.transition_costs.get(trans, 1000)
            
            # 检查合规性
            is_compliant, _, _ = self.is_compliant(from_act, to_act)
            if not is_compliant:
                non_compliant_steps += 1
                
        # 计算关键绩效指标
        compliance_rate = 1.0 - (non_compliant_steps / max(1, len(path) - 1))
        
        analysis = {
            "path_length": len(path),
            "total_time_hours": total_time,
            "avg_time_per_step": total_time / max(1, len(path) - 1),
            "total_cost": total_cost,
            "avg_cost_per_step": total_cost / max(1, len(path) - 1),
            "compliance_rate": compliance_rate,
            "activities": [self.id_to_activity[act] for act in path]
        }
        
        return analysis


class QLearningAgent:
    """Q-learning强化学习智能体，用于学习最佳流程路径"""
    def __init__(self, 
                 num_states: int, 
                 num_actions: int, 
                 learning_rate: float = 0.1, 
                 discount_factor: float = 0.95,
                 exploration_rate: float = 1.0, 
                 exploration_decay: float = 0.995,
                 min_exploration_rate: float = 0.01):
        """
        初始化Q-learning agent
        
        参数:
            num_states: 状态空间大小（活动数量）
            num_actions: 动作空间大小（活动数量）
            learning_rate: 学习率
            discount_factor: 未来奖励折扣因子
            exploration_rate: 初始探索率
            exploration_decay: 探索率衰减系数
            min_exploration_rate: 最小探索率
        """
        # 使用嵌套字典（稀疏表示）存储Q值
        self.q_table = defaultdict(lambda: defaultdict(float))
        self.num_states = num_states
        self.num_actions = num_actions
        
        # 学习参数
        self.lr = learning_rate
        self.gamma = discount_factor
        self.epsilon = exploration_rate
        self.epsilon_decay = exploration_decay
        self.min_epsilon = min_exploration_rate
        
        # 跟踪学习进度
        self.training_stats = {
            'episode_rewards': [],
            'epsilons': [],
            'path_lengths': [],
        }
        
    def choose_action(self, state_id: int, valid_actions: List[int]) -> int:
        """
        使用epsilon-greedy策略选择动作
        
        参数:
            state_id: 当前状态ID（活动ID）
            valid_actions: 有效的后继活动ID列表
            
        返回:
            选择的动作ID
        """
        # 处理无效动作情况
        if not valid_actions:
            return -1
            
        # 探索 vs 利用
        if random.random() < self.epsilon:
            # 探索：随机选择有效动作
            return random.choice(valid_actions)
        else:
            # 利用：选择Q值最高的有效动作
            q_values = {a: self.q_table[state_id].get(a, 0.0) for a in valid_actions}
            
            # 找出最大Q值的动作
            max_q = max(q_values.values()) if q_values else 0.0
            best_actions = [a for a, q in q_values.items() if q >= max_q]
            
            # 如果有多个最优动作，随机选择一个
            return random.choice(best_actions)
            
    def learn(self, state_id: int, action_id: int, reward: float, 
              next_state_id: int, done: bool, next_valid_actions: List[int] = None) -> None:
        """
        Q-learning更新规则
        
        参数:
            state_id: 当前状态ID
            action_id: 执行的动作ID
            reward: 收到的奖励
            next_state_id: 下一个状态ID
            done: 是否到达终止状态
            next_valid_actions: 下一状态的有效动作（可选）
        """
        # 获取当前Q值
        current_q = self.q_table[state_id][action_id]
        
        # 在非终止状态下，计算下一状态的最大Q值
        max_next_q = 0.0
        if not done and next_valid_actions:
            next_q_values = [self.q_table[next_state_id].get(a, 0.0) for a in next_valid_actions]
            if next_q_values:
                max_next_q = max(next_q_values)
                
        # Q-learning更新公式: Q(s,a) = Q(s,a) + lr * [R + gamma * max(Q(s',a')) - Q(s,a)]
        new_q = current_q + self.lr * (reward + self.gamma * max_next_q - current_q)
        self.q_table[state_id][action_id] = new_q
        
    def decay_exploration(self) -> None:
        """降低探索率以增加利用"""
        self.epsilon = max(self.min_epsilon, self.epsilon * self.epsilon_decay)
        
    def save_model(self, filepath: str) -> None:
        """保存Q-table到文件"""
        # 将嵌套defaultdict转换为普通dict便于序列化
        q_table_dict = {}
        for state, actions in self.q_table.items():
            q_table_dict[state] = dict(actions)
            
        model_data = {
            'q_table': q_table_dict,
            'training_stats': self.training_stats,
            'hyperparams': {
                'lr': self.lr,
                'gamma': self.gamma,
                'epsilon': self.epsilon,
                'min_epsilon': self.min_epsilon,
            }
        }
        
        with open(filepath, 'wb') as f:
            pickle.dump(model_data, f)
            
    def load_model(self, filepath: str) -> None:
        """从文件加载Q-table"""
        if os.path.exists(filepath):
            with open(filepath, 'rb') as f:
                model_data = pickle.load(f)
                
            # 重建Q-table
            q_table_dict = model_data.get('q_table', {})
            for state, actions in q_table_dict.items():
                for action, value in actions.items():
                    self.q_table[state][action] = value
                    
            # 加载训练统计
            self.training_stats = model_data.get('training_stats', self.training_stats)
            
            # 加载超参数
            hyperparams = model_data.get('hyperparams', {})
            self.lr = hyperparams.get('lr', self.lr)
            self.gamma = hyperparams.get('gamma', self.gamma)
            self.epsilon = hyperparams.get('epsilon', self.epsilon)
            self.min_epsilon = hyperparams.get('min_epsilon', self.min_epsilon)


class ProcessOptimizer:
    """流程优化器：使用强化学习找到最佳流程路径"""
    def __init__(self, environment: ProcessMiningEnvironment, verbose: bool = True):
        """
        初始化流程优化器
        
        参数:
            environment: 流程挖掘环境
            verbose: 是否打印详细训练信息
        """
        self.env = environment
        self.verbose = verbose
        
        # 初始化智能体
        self.agent = QLearningAgent(
            num_states=self.env.num_activities,
            num_actions=self.env.num_activities,
            learning_rate=0.1,
            discount_factor=0.95,
            exploration_rate=1.0,
            exploration_decay=0.995,
            min_exploration_rate=0.01
        )
        
        # 训练指标
        self.training_history = []
        
    def train(self, num_episodes: int = 1000, 
             eval_frequency: int = 100,
             save_path: str = None):
        """
        训练Q-learning智能体
        
        参数:
            num_episodes: 训练回合数
            eval_frequency: 评估频率
            save_path: 保存模型的路径（可选）
        """
        print(f"开始训练流程优化智能体，总训练回合: {num_episodes}")
        
        for episode in range(num_episodes):
            # 重置环境
            state_id = self.env.reset()
            done = False
            total_reward = 0
            steps = 0
            
            while not done:
                # 获取有效动作
                valid_actions = self.env.get_valid_actions(state_id)
                
                if not valid_actions:
                    # 处理死胡同情况
                    if self.verbose and (episode + 1) % eval_frequency == 0:
                        print(f"  回合 {episode + 1}: 在活动 {self.env.id_to_activity[state_id]} 遇到死胡同")
                    break
                
                # 选择动作
                action_id = self.agent.choose_action(state_id, valid_actions)
                
                if action_id == -1:
                    # 无效动作，不应该发生
                    break
                
                # 执行动作
                next_state, reward, done, info = self.env.step(action_id)
                next_state_id = next_state['activity_id']
                
                # 获取下一个状态的有效动作（用于Q-learning更新）
                next_valid_actions = [] if done else self.env.get_valid_actions(next_state_id)
                
                # 更新Q值
                self.agent.learn(state_id, action_id, reward, next_state_id, done, next_valid_actions)
                
                # 更新状态和指标
                state_id = next_state_id
                total_reward += reward
                steps += 1
                
                # 防止无限循环
                if steps >= self.env.max_steps:
                    break
            
            # 衰减探索率
            self.agent.decay_exploration()
            
            # 记录训练指标
            self.agent.training_stats['episode_rewards'].append(total_reward)
            self.agent.training_stats['epsilons'].append(self.agent.epsilon)
            self.agent.training_stats['path_lengths'].append(steps)
            
            # 定期打印训练状态
            if self.verbose and (episode + 1) % eval_frequency == 0:
                # 计算最近N轮的平均奖励
                avg_reward = sum(self.agent.training_stats['episode_rewards'][-eval_frequency:]) / eval_frequency
                avg_steps = sum(self.agent.training_stats['path_lengths'][-eval_frequency:]) / eval_frequency
                
                print(f"回合 {episode + 1}/{num_episodes} | 平均奖励: {avg_reward:.1f} | 平均步数: {avg_steps:.1f} | 探索率: {self.agent.epsilon:.3f}")
                
                # 定期评估并打印最佳路径
                if (episode + 1) % (eval_frequency * 5) == 0:
                    best_path = self.get_best_path()
                    if best_path:
                        path_activities = [self.env.id_to_activity[a] for a in best_path]
                        print(f"  当前最佳路径: {' -> '.join(path_activities[:5])}... (共{len(best_path)}步)")
            
        # 训练结束后保存模型（如果指定了保存路径）
        if save_path:
            self.agent.save_model(save_path)
            print(f"模型已保存到 {save_path}")
            
        print("训练完成")
        
    def get_best_path(self, start_activity: str = None) -> List[int]:
        """
        提取学习到的最佳路径
        
        参数:
            start_activity: 开始活动名称（可选）
            
        返回:
            最佳路径的活动ID列表
        """
        if start_activity and start_activity in self.env.activity_to_id:
            start_id = self.env.activity_to_id[start_activity]
        else:
            # 使用最常见的开始活动
            start_id = self.env.start_activities[0]
            
        current_id = start_id
        path = [current_id]
        visited = {current_id}  # 用于检测循环
        
        for _ in range(self.env.max_steps):
            valid_actions = self.env.get_valid_actions(current_id)
            
            # 过滤掉已访问的活动（防止循环）
            valid_actions = [a for a in valid_actions if a not in visited]
            
            if not valid_actions:
                if current_id in self.env.end_activity_ids:
                    break  # 正常终止
                else:
                    if len(path) > 1:
                        # 尝试后退一步，看是否可以找到替代路径
                        current_id = path[-2]
                        path = path[:-1]
                        continue
                    else:
                        # 无法继续
                        break
            
            # 根据Q值选择最佳动作（无探索）
            q_values = {a: self.agent.q_table[current_id].get(a, 0.0) for a in valid_actions}
            if not q_values:
                break
                
            best_action = max(q_values, key=q_values.get)
            
            current_id = best_action
            path.append(current_id)
            visited.add(current_id)
            
            if current_id in self.env.end_activity_ids:
                break  # 到达终止状态
        
        return path
        
    def visualize_training_progress(self) -> None:
        """可视化训练进度"""
        if not self.agent.training_stats['episode_rewards']:
            print("没有训练数据可视化")
            return
            
        fig, axs = plt.subplots(3, 1, figsize=(12, 15))
        
        # 绘制奖励曲线
        rewards = self.agent.training_stats['episode_rewards']
        axs[0].plot(rewards)
        axs[0].set_title('训练回合奖励')
        axs[0].set_xlabel('回合')
        axs[0].set_ylabel('总奖励')
        
        # 添加移动平均线
        window = min(100, len(rewards) // 10)
        if window > 0:
            moving_avg = pd.Series(rewards).rolling(window=window).mean()
            axs[0].plot(moving_avg, color='red', linewidth=2)
            axs[0].legend(['回合奖励', f'{window}回合移动平均'])
        
        # 绘制探索率衰减
        axs[1].plot(self.agent.training_stats['epsilons'])
        axs[1].set_title('探索率衰减')
        axs[1].set_xlabel('回合')
        axs[1].set_ylabel('探索率 (ε)')
        
        # 绘制路径长度
        axs[2].plot(self.agent.training_stats['path_lengths'])
        axs[2].set_title('路径长度')
        axs[2].set_xlabel('回合')
        axs[2].set_ylabel('步数')
        
        plt.tight_layout()
        plt.show()
        
    def compare_paths(self, paths: List[List[int]], names: List[str] = None) -> None:
        """比较多条路径的性能"""
        if not paths:
            return
            
        if not names:
            names = [f"路径 {i+1}" for i in range(len(paths))]
            
        # 分析每条路径
        analyses = []
        for path, name in zip(paths, names):
            analysis = self.env.analyze_best_path(path)
            analysis['name'] = name
            analyses.append(analysis)
            
        # 创建比较表格
        compare_data = {
            '路径名称': [a['name'] for a in analyses],
            '步数': [a['path_length'] for a in analyses],
            '总时间(小时)': [f"{a['total_time_hours']:.1f}" for a in analyses],
            '平均每步时间': [f"{a['avg_time_per_step']:.1f}" for a in analyses],
            '总成本': [f"${a['total_cost']:.0f}" for a in analyses],
            '平均每步成本': [f"${a['avg_cost_per_step']:.0f}" for a in analyses],
            '合规率': [f"{a['compliance_rate']*100:.1f}%" for a in analyses],
        }
        
        # 显示比较表格
        df = pd.DataFrame(compare_data)
        print("\n路径比较:")
        print(df.to_string(index=False))
        
        # 可视化比较
        fig, axs = plt.subplots(2, 2, figsize=(14, 10))
        
        # 步数比较
        axs[0, 0].bar(compare_data['路径名称'], [a['path_length'] for a in analyses])
        axs[0, 0].set_title('路径步数')
        axs[0, 0].set_ylabel('步数')
        
        # 总时间比较
        axs[0, 1].bar(compare_data['路径名称'], [a['total_time_hours'] for a in analyses])
        axs[0, 1].set_title('总时间(小时)')
        axs[0, 1].set_ylabel('小时')
        
        # 总成本比较
        axs[1, 0].bar(compare_data['路径名称'], [a['total_cost'] for a in analyses])
        axs[1, 0].set_title('总成本')
        axs[1, 0].set_ylabel('成本单位')
        
        # 合规率比较
        axs[1, 1].bar(compare_data['路径名称'], [a['compliance_rate'] for a in analyses])
        axs[1, 1].set_title('合规率')
        axs[1, 1].set_ylabel('比率')
        axs[1, 1].set_ylim([0, 1])
        
        plt.tight_layout()
        plt.show()
        
        # 打印每条路径的活动序列
        print("\n每条路径的详细活动序列:")
        for analysis in analyses:
            print(f"\n{analysis['name']}:")
            print(" -> ".join(analysis['activities']))


def generate_synthetic_process_log(num_cases: int = 500, 
                                 variants_ratio: Dict[str, float] = None,
                                 noise_level: float = 0.1) -> pd.DataFrame:
    """
    生成合成的采购到付款流程日志
    
    参数:
        num_cases: 案例数量
        variants_ratio: 不同流程变体的比例
        noise_level: 噪音水平（随机变异）
        
    返回:
        pandas DataFrame格式的事件日志
    """
    # 默认流程变体及其比例
    if variants_ratio is None:
        variants_ratio = {
            'standard': 0.6,    # 标准采购流程
            'expedited': 0.15,  # 加急采购流程
            'approval': 0.15,   # 多级审批流程
            'rejected': 0.1     # 被拒绝的流程
        }
    
    # 检查比例总和是否为1
    if abs(sum(variants_ratio.values()) - 1.0) > 0.01:
        print("警告: 变体比例总和不为1，将自动调整")
        # 归一化
        total = sum(variants_ratio.values())
        variants_ratio = {k: v/total for k, v in variants_ratio.items()}
    
    # 定义基本活动集
    activities = {
        'PR_Creation': '采购申请创建',
        'PR_Review': '采购申请审核',
        'PR_Approval': '采购申请批准',
        'PR_Rejection': '采购申请拒绝',
        'Vendor_Selection': '供应商选择',
        'Quotation_Comparison': '报价比较',
        'PO_Creation': '采购订单创建',
        'PO_Review': '采购订单审核',
        'PO_Approval': '采购订单批准',
        'PO_Rejection': '采购订单拒绝',
        'PO_Sending': '采购订单发送',
        'Goods_Receipt': '物品接收',
        'Quality_Check': '质量检查',
        'Invoice_Receipt': '发票接收',
        'Invoice_Verification': '发票验证',
        'Payment_Approval': '付款批准',
        'Payment_Execution': '付款执行',
        'Process_Complete': '流程完成'
    }
    
    # 定义流程变体路径
    process_variants = {
        'standard': [
            'PR_Creation', 'PR_Review', 'PR_Approval', 'Vendor_Selection', 
            'Quotation_Comparison', 'PO_Creation', 'PO_Review', 'PO_Approval', 
            'PO_Sending', 'Goods_Receipt', 'Quality_Check', 'Invoice_Receipt', 
            'Invoice_Verification', 'Payment_Approval', 'Payment_Execution', 'Process_Complete'
        ],
        'expedited': [
            'PR_Creation', 'PR_Approval', 'PO_Creation', 'PO_Approval', 
            'PO_Sending', 'Goods_Receipt', 'Invoice_Receipt', 
            'Payment_Approval', 'Payment_Execution', 'Process_Complete'
        ],
        'approval': [
            'PR_Creation', 'PR_Review', 'PR_Approval', 'Vendor_Selection', 
            'PO_Creation', 'PO_Review', 'PO_Review', 'PO_Approval', 'PO_Approval',  # 额外审批步骤
            'PO_Sending', 'Goods_Receipt', 'Quality_Check', 'Invoice_Receipt', 
            'Invoice_Verification', 'Payment_Approval', 'Payment_Approval',  # 额外付款审批
            'Payment_Execution', 'Process_Complete'
        ],
        'rejected': [
            'PR_Creation', 'PR_Review', 'PR_Rejection', 'PR_Creation',  # 重新提交
            'PR_Review', 'PR_Approval', 'Vendor_Selection', 'PO_Creation', 
            'PO_Review', 'PO_Rejection', 'PO_Creation', 'PO_Review',  # 重新提交
            'PO_Approval', 'PO_Sending', 'Goods_Receipt', 'Process_Complete'
        ]
    }
    
    # 定义各活动的时间和成本范围
    activity_params = {
        'PR_Creation': {'time': (0.5, 2), 'cost': (10, 50)},
        'PR_Review': {'time': (0.5, 3), 'cost': (20, 60)},
        'PR_Approval': {'time': (1, 8), 'cost': (30, 80)},
        'PR_Rejection': {'time': (0.5, 1), 'cost': (10, 30)},
        'Vendor_Selection': {'time': (2, 24), 'cost': (40, 200)},
        'Quotation_Comparison': {'time': (1, 8), 'cost': (30, 100)},
        'PO_Creation': {'time': (0.5, 3), 'cost': (20, 60)},
        'PO_Review': {'time': (0.5, 4), 'cost': (20, 70)},
        'PO_Approval': {'time': (1, 12), 'cost': (30, 90)},
        'PO_Rejection': {'time': (0.5, 1), 'cost': (10, 30)},
        'PO_Sending': {'time': (0.2, 1), 'cost': (5, 20)},
        'Goods_Receipt': {'time': (4, 72), 'cost': (20, 100)},
        'Quality_Check': {'time': (1, 8), 'cost': (40, 120)},
        'Invoice_Receipt': {'time': (1, 24), 'cost': (10, 30)},
        'Invoice_Verification': {'time': (0.5, 4), 'cost': (20, 60)},
        'Payment_Approval': {'time': (1, 24), 'cost': (30, 80)},
        'Payment_Execution': {'time': (0.5, 4), 'cost': (10, 40)},
        'Process_Complete': {'time': (0.1, 0.5), 'cost': (5, 15)}
    }
    
    # 生成案例分布
    case_variants = np.random.choice(
        list(variants_ratio.keys()),
        size=num_cases,
        p=list(variants_ratio.values())
    )
    
    # 生成事件日志
    event_log = []
    case_id = 1000  # 起始案例ID
    
    for variant in case_variants:
        # 获取当前变体的活动序列
        activities_path = process_variants[variant]
        
        # 添加带噪声的活动
        if random.random() < noise_level:
            # 插入、重复或删除随机活动
            noise_type = random.choice(['insert', 'repeat', 'delete'])
            if noise_type == 'insert' and len(activities_path) > 2:
                insert_pos = random.randint(1, len(activities_path) - 2)
                insert_act = random.choice(list(activity_params.keys()))
                activities_path.insert(insert_pos, insert_act)
            elif noise_type == 'repeat' and len(activities_path) > 2:
                repeat_pos = random.randint(1, len(activities_path) - 2)
                activities_path.insert(repeat_pos, activities_path[repeat_pos])
            elif noise_type == 'delete' and len(activities_path) > 3:
                delete_pos = random.randint(1, len(activities_path) - 2)
                activities_path.pop(delete_pos)
        
        # 生成当前案例的属性
        case_attrs = {
            'amount': round(random.uniform(100, 10000), 2),
            'department': random.choice(['IT', 'HR', 'Finance', 'Operations', 'Sales', 'Marketing']),
            'priority': random.choice(['Low', 'Medium', 'High', 'Critical']),
            'requester': f"User_{random.randint(1000, 9999)}"
        }
        
        # 生成起始时间
        start_time = datetime(2023, 1, 1) + timedelta(
            days=random.randint(0, 364),
            hours=random.randint(8, 17),
            minutes=random.randint(0, 59)
        )
        
        # 添加每个活动的事件
        current_time = start_time
        
        for activity in activities_path:
            # 获取活动参数
            time_range = activity_params.get(activity, {'time': (0.5, 4)})['time']
            cost_range = activity_params.get(activity, {'cost': (10, 50)})['cost']
            
            # 添加随机时间延迟
            time_delay = random.uniform(time_range[0], time_range[1])
            current_time += timedelta(hours=time_delay)
            
            # 生成成本
            cost = random.uniform(cost_range[0], cost_range[1])
            
            # 创建事件
            event = {
                'case_id': f"C{case_id}",
                'activity': activity,
                'timestamp': current_time,
                'resource': f"User_{random.randint(100, 999)}",
                'cost': round(cost, 2),
                'amount': case_attrs['amount'],
                'department': case_attrs['department'],
                'priority': case_attrs['priority'],
                'requester': case_attrs['requester']
            }
            
            event_log.append(event)
        
        # 增加案例ID
        case_id += 1
    
    # 转换为DataFrame
    log_df = pd.DataFrame(event_log)
    
    return log_df


def main():
    """主函数：展示整个流程优化过程"""
    # 1. 生成合成事件日志
    print("生成合成事件日志...")
    log_df = generate_synthetic_process_log(num_cases=500)
    print(f"生成了 {len(log_df)} 条事件记录，{log_df['case_id'].nunique()} 个案例")
    
    # 显示事件日志样本
    print("\n事件日志样本:")
    print(log_df.head())
    
    # 2. 设置合规规则
    compliance_rules = {
        'required_sequences': [
            ('PR_Approval', 'PO_Creation'),  # PR必须在PO之前批准
            ('Quality_Check', 'Payment_Approval')  # 付款前必须进行质检
        ],
        'forbidden_sequences': [
            ('PR_Rejection', 'PO_Creation'),  # 被拒绝的PR不能直接创建PO
            ('PR_Creation', 'PO_Approval')    # 不能跳过PO创建直接批准
        ],
        'attribute_rules': [
            {
                'attribute': 'amount', 
                'condition': {'op': '>', 'value': 5000},
                'required_activity': 'PO_Review'  # 大额采购需要审核
            }
        ]
    }
    
    # 3. 创建流程挖掘环境
    print("\n初始化流程挖掘环境...")
    env = ProcessMiningEnvironment(
        event_log=log_df, 
        end_activities=['Process_Complete'],
        max_steps=25,
        compliance_rules=compliance_rules,
        time_weight=0.5,
        cost_weight=0.3,
        compliance_weight=0.2
    )
    
    # 4. 创建和训练流程优化器
    print("\n创建流程优化器并开始训练...")
    optimizer = ProcessOptimizer(env)
    
    # 训练智能体
    optimizer.train(num_episodes=2000, eval_frequency=100)
    
    # 5. 可视化训练进度
    print("\n可视化训练进度...")
    optimizer.visualize_training_progress()
    
    # 6. 获取学习到的最佳路径
    print("\n提取最佳路径...")
    best_path = optimizer.get_best_path()
    best_path_activities = [env.id_to_activity[act_id] for act_id in best_path]
    
    print# filepath: /Users/bill/ws/code/python/process_minig/process_main.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import seaborn as sns
from collections import defaultdict, Counter
import random
from datetime import datetime, timedelta
from typing import Dict, List, Tuple, Set, Optional, Any
import pickle
import os


class ProcessMiningEnvironment:
    """
    流程挖掘环境，基于真实业务流程事件日志构建
    专注于采购到付款流程，包含多维度的状态和奖励
    """
    def __init__(self, event_log: pd.DataFrame, 
                 end_activities: List[str], 
                 max_steps: int = 30,
                 compliance_rules: Dict[str, Any] = None,
                 time_weight: float = 0.5, 
                 cost_weight: float = 0.3, 
                 compliance_weight: float = 0.2):
        """
        初始化流程环境
        
        参数:
            event_log: 包含流程事件的DataFrame
            end_activities: 结束活动列表
            max_steps: 每个案例的最大步骤数
            compliance_rules: 合规规则字典
            time_weight: 时间在奖励函数中的权重
            cost_weight: 成本在奖励函数中的权重
            compliance_weight: 合规性在奖励函数中的权重
        """
        # 数据预处理
        self.log = event_log.copy()
        self.log.sort_values(by=['case_id', 'timestamp'], inplace=True)
        self.log['duration'] = self._calculate_activity_durations()
        
        # 基本属性设置
        self.activities = sorted(list(self.log['activity'].unique()))
        self.activity_to_id = {act: i for i, act in enumerate(self.activities)}
        self.id_to_activity = {i: act for act, i in self.activity_to_id.items()}
        self.num_activities = len(self.activities)
        
        # 环境配置
        self.end_activities = end_activities
        self.end_activity_ids = [self.activity_to_id[act] for act in end_activities if act in self.activity_to_id]
        if not self.end_activity_ids:
            raise ValueError(f"No valid end activities found among {end_activities}")
        self.max_steps = max_steps
        
        # 流程属性
        self.transitions = self._build_transition_model()
        self.transition_times = self._calculate_transition_times()
        self.transition_costs = self._calculate_transition_costs()
        self.start_activities = self._find_start_activities()
        
        # 合规规则
        self.compliance_rules = compliance_rules or {}
        
        # 奖励权重
        self.time_weight = time_weight
        self.cost_weight = cost_weight
        self.compliance_weight = compliance_weight
        
        # 重置状态变量
        self.current_step = 0
        self.current_activity_id = -1
        self.current_case_attrs = {}
        self.current_path = []
        
        # 用于统计和分析的指标
        self.episode_stats = {
            'path_lengths': [],
            'total_rewards': [],
            'completion_rate': 0,
            'avg_time': 0,
            'avg_cost': 0,
            'compliance_rate': 0,
        }
    
    def _calculate_activity_durations(self) -> pd.Series:
        """计算每个活动的持续时间"""
        # 分组计算每个活动实例的持续时间
        durations = pd.Series(dtype='timedelta64[ns]')
        
        # 为每个案例单独计算
        for case_id, group in self.log.groupby('case_id'):
            # 排序确保按时间顺序
            events = group.sort_values('timestamp')
            
            # 计算当前活动与下一个活动之间的时间差
            start_times = events['timestamp'].iloc[:-1].reset_index(drop=True)
            end_times = events['timestamp'].iloc[1:].reset_index(drop=True)
            
            # 添加到相应的活动
            activity_durations = end_times - start_times
            act_indices = events.index[:-1]
            
            # 更新持续时间序列
            for idx, duration in zip(act_indices, activity_durations):
                durations.at[idx] = duration
                
        # 对于缺失持续时间的活动（如最后一个活动），使用该活动类型的平均持续时间
        avg_durations = self.log.groupby('activity')['duration'].mean().fillna(pd.Timedelta(seconds=60))
        durations = durations.fillna(pd.Timedelta(seconds=60))
        
        return durations

    def _build_transition_model(self) -> Dict[int, Dict[int, float]]:
        """构建流程转移模型，包括转移概率"""
        transitions = defaultdict(lambda: defaultdict(int))
        
        # 计算每个案例内的活动转移
        for case_id, group in self.log.groupby('case_id'):
            activities = group['activity'].tolist()
            for i in range(len(activities) - 1):
                from_act = self.activity_to_id[activities[i]]
                to_act = self.activity_to_id[activities[i+1]]
                transitions[from_act][to_act] += 1
        
        # 转换为概率
        for from_act, to_acts in transitions.items():
            total = sum(to_acts.values())
            for to_act in to_acts:
                transitions[from_act][to_act] = to_acts[to_act] / total
                
        return transitions

    def _calculate_transition_times(self) -> Dict[Tuple[int, int], float]:
        """计算活动间转移的平均时间"""
        transition_times = defaultdict(list)
        
        for case_id, group in self.log.groupby('case_id'):
            events = group.sort_values('timestamp')
            activities = events['activity'].tolist()
            timestamps = events['timestamp'].tolist()
            
            for i in range(len(activities) - 1):
                from_act = self.activity_to_id[activities[i]]
                to_act = self.activity_to_id[activities[i+1]]
                time_diff = (timestamps[i+1] - timestamps[i]).total_seconds() / 3600  # 小时为单位
                transition_times[(from_act, to_act)].append(time_diff)
        
        # 计算平均时间
        avg_times = {}
        for trans, times in transition_times.items():
            avg_times[trans] = sum(times) / len(times) if times else 24  # 默认1天
            
        return avg_times

    def _calculate_transition_costs(self) -> Dict[Tuple[int, int], float]:
        """计算活动转移的平均成本"""
        # 在实际业务中，这可能来自ERP系统或成本中心数据
        # 这里我们使用模拟数据，假设成本与时间相关
        transition_costs = {}
        
        # 基础成本等于时间的线性函数加上随机因子
        for trans, time in self.transition_times.items():
            from_act, to_act = trans
            
            # 复杂审批活动成本更高
            complexity_factor = 1.0
            if any(keyword in self.id_to_activity[to_act].lower() for keyword in ['approval', 'review', 'check']):
                complexity_factor = 2.0
                
            # 基础成本计算
            base_cost = time * 50  # 假设每小时人力成本50单位
            
            # 添加随机因子以模拟现实世界的变异性
            random_factor = np.random.uniform(0.8, 1.2)
            
            transition_costs[trans] = base_cost * complexity_factor * random_factor
            
        return transition_costs

    def _find_start_activities(self) -> List[int]:
        """识别常见的流程起始活动"""
        start_counts = Counter(self.log.groupby('case_id')['activity'].first())
        
        # 将起始活动数量映射转换为概率分布
        total_cases = len(self.log['case_id'].unique())
        start_probs = {self.activity_to_id[act]: count/total_cases 
                      for act, count in start_counts.items()}
        
        # 筛选出出现频率超过5%的起始活动
        min_prob = 0.05
        start_acts = [act_id for act_id, prob in start_probs.items() 
                     if prob >= min_prob]
        
        # 如果没有满足条件的起始活动，则使用最常见的一个
        if not start_acts and start_counts:
            most_common_act = start_counts.most_common(1)[0][0]
            start_acts = [self.activity_to_id[most_common_act]]
            
        return start_acts

    def reset(self, start_activity_id: int = None) -> int:
        """
        重置环境到初始状态
        可选择指定起始活动，否则随机选择一个常见起始活动
        """
        self.current_step = 0
        
        # 如果指定了起始活动，使用它；否则随机选择
        if start_activity_id is not None and start_activity_id in self.activity_to_id.values():
            self.current_activity_id = start_activity_id
        else:
            # 使用概率采样
            start_counts = Counter(self.log.groupby('case_id')['activity'].first())
            total = sum(start_counts.values())
            probs = [start_counts.get(act, 0)/total for act in self.activities]
            self.current_activity_id = np.random.choice(range(self.num_activities), p=probs)
        
        # 重置路径历史和案例属性
        self.current_path = [self.current_activity_id]
        self.current_case_attrs = self._generate_case_attributes()
        
        return self.current_activity_id

    def _generate_case_attributes(self) -> Dict[str, Any]:
        """生成模拟的案例属性，用于更丰富的状态表示"""
        return {
            'priority': np.random.choice(['low', 'medium', 'high']),
            'value': np.random.uniform(100, 10000),
            'department': np.random.choice(['IT', 'HR', 'Finance', 'Operations', 'Sales']),
            'requester_level': np.random.choice(['staff', 'manager', 'director', 'executive'])
        }

    def get_state(self) -> Dict[str, Any]:
        """获取当前状态的丰富表示"""
        # 创建活动的one-hot编码
        activity_one_hot = np.zeros(self.num_activities)
        activity_one_hot[self.current_activity_id] = 1
        
        # 构造状态字典
        state = {
            'activity_id': self.current_activity_id,
            'activity_name': self.id_to_activity[self.current_activity_id],
            'activity_one_hot': activity_one_hot,
            'step': self.current_step,
            'case_attributes': self.current_case_attrs,
            'path_history': self.current_path.copy()
        }
        
        return state

    def get_valid_actions(self, state_id: int) -> List[int]:
        """获取当前状态下的有效后继活动"""
        return list(self.transitions.get(state_id, {}).keys())

    def is_compliant(self, from_activity: int, to_activity: int) -> Tuple[bool, float, str]:
        """
        检查转移是否符合合规规则
        返回: (合规状态, 惩罚值, 违规原因)
        """
        from_name = self.id_to_activity[from_activity]
        to_name = self.id_to_activity[to_activity]
        
        # 检查是否符合必须经过的流程节点
        if 'required_sequences' in self.compliance_rules:
            for seq in self.compliance_rules['required_sequences']:
                # 检查当前转移是否打破了必须的序列
                if seq[0] in [self.id_to_activity[a] for a in self.current_path] and \
                   seq[1] not in [self.id_to_activity[a] for a in self.current_path] and \
                   to_name not in [seq[1]] + [self.id_to_activity[a] for a in self.current_path]:
                    return False, -50, f"必须先完成{seq[1]}才能继续"
                    
        # 检查禁止的活动序列
        if 'forbidden_sequences' in self.compliance_rules:
            for seq in self.compliance_rules['forbidden_sequences']:
                if from_name == seq[0] and to_name == seq[1]:
                    return False, -100, f"禁止的转移: {from_name} -> {to_name}"
        
        # 检查基于案例属性的规则
        if 'attribute_rules' in self.compliance_rules:
            for rule in self.compliance_rules['attribute_rules']:
                attr = rule['attribute']
                condition = rule['condition']
                required_act = rule['required_activity']
                
                if attr in self.current_case_attrs:
                    value = self.current_case_attrs[attr]
                    
                    # 评估条件
                    condition_met = False
                    if condition['op'] == '>' and value > condition['value']:
                        condition_met = True
                    elif condition['op'] == '<' and value < condition['value']:
                        condition_met = True
                    elif condition['op'] == '==' and value == condition['value']:
                        condition_met = True
                    
                    # 如果条件满足但缺少必要活动
                    if condition_met and required_act not in [self.id_to_activity[a] for a in self.current_path] and \
                       to_name not in [required_act]:
                        return False, -75, f"案例{attr}={value}需要活动{required_act}"
        
        # 所有规则都通过
        return True, 10, "合规"

    def step(self, action_id: int) -> Tuple[Dict[str, Any], float, bool, Dict]:
        """
        执行一个活动转移
        
        参数:
            action_id: 下一个活动的ID
            
        返回:
            state: 新的状态表示
            reward: 奖励值
            done: 是否结束
            info: 附加信息
        """
        if self.current_activity_id == -1:
            raise Exception("环境必须先被重置才能执行步骤")
            
        valid_actions = self.get_valid_actions(self.current_activity_id)
        
        # 处理无效活动
        if action_id not in valid_actions:
            # 惩罚无效转移
            reward = -50.0
            done = False
            info = {
                'error': f"无效转移: {self.id_to_activity[self.current_activity_id]} -> {self.id_to_activity[action_id]}",
                'valid_actions': [self.id_to_activity[a] for a in valid_actions]
            }
            return self.get_state(), reward, done, info
            
        # 记录之前的状态用于计算奖励
        prev_activity_id = self.current_activity_id
            
        # 执行转移
        self.current_activity_id = action_id
        self.current_path.append(action_id)
        self.current_step += 1
            
        # 计算多维奖励
        reward, reward_info = self._calculate_reward(prev_activity_id, action_id)
            
        # 检查是否完成
        done = False
        if action_id in self.end_activity_ids:
            done = True
            reward += 100.0  # 额外完成奖励
            self.episode_stats['path_lengths'].append(len(self.current_path))
            self.episode_stats['completion_rate'] = (sum(1 for p in self.episode_stats['path_lengths'] if p > 0) / 
                                                   max(1, len(self.episode_stats['path_lengths'])))
        elif self.current_step >= self.max_steps:
            done = True
            reward -= 75.0  # 超时惩罚
            
        # 记录累积奖励
        if done:
            self.episode_stats['total_rewards'].append(reward)
            
        # 准备信息字典
        info = {
            'reward_breakdown': reward_info,
            'current_path': [self.id_to_activity[a] for a in self.current_path],
            'step': self.current_step,
            'is_end_activity': action_id in self.end_activity_ids
        }
            
        # 如果完成，重置内部状态
        if done:
            self.current_activity_id = -1
            
        return self.get_state(), reward, done, info

    def _calculate_reward(self, from_activity: int, to_activity: int) -> Tuple[float, Dict]:
        """
        计算多维奖励
        考虑时间、成本和合规性
        
        返回总奖励和详细的奖励分解
        """
        transition = (from_activity, to_activity)
        
        # 1. 时间奖励 - 负值，转移时间越短越好
        time_penalty = -self.transition_times.get(transition, 24)  # 默认24小时
        time_reward = time_penalty * self.time_weight
        
        # 2. 成本奖励 - 负值，成本越低越好
        cost_penalty = -self.transition_costs.get(transition, 1000)  # 默认1000单位
        cost_reward = cost_penalty * self.cost_weight / 1000  # 缩放以保持与其他奖励相当
        
        # 3. 合规性奖励
        is_compliant, compliance_reward_value, reason = self.is_compliant(from_activity, to_activity)
        compliance_reward = compliance_reward_value * self.compliance_weight
        
        # 汇总奖励
        total_reward = time_reward + cost_reward + compliance_reward
        
        # 步骤惩罚 - 鼓励更短的路径
        step_penalty = -1.0
        total_reward += step_penalty
        
        # 构建奖励信息
        reward_info = {
            'time_reward': time_reward,
            'cost_reward': cost_reward,
            'compliance_reward': compliance_reward,
            'step_penalty': step_penalty,
            'total_reward': total_reward,
            'compliance_reason': reason,
            'is_compliant': is_compliant
        }
        
        return total_reward, reward_info
    
    def visualize_process(self, highlight_path: List[int] = None) -> None:
        """
        可视化流程图，可选择高亮特定路径
        """
        G = nx.DiGraph()
        
        # 添加节点
        for act_id, act_name in self.id_to_activity.items():
            G.add_node(act_id, label=act_name)
        
        # 添加边
        for from_act, to_dict in self.transitions.items():
            for to_act, prob in to_dict.items():
                # 获取转移时间和成本（如果有）
                trans_time = self.transition_times.get((from_act, to_act), 0)
                trans_cost = self.transition_costs.get((from_act, to_act), 0)
                
                G.add_edge(from_act, to_act, 
                           weight=prob, 
                           time=f"{trans_time:.1f}h",
                           cost=f"${trans_cost:.0f}",
                           label=f"{prob:.2f}\n{trans_time:.1f}h\n${trans_cost:.0f}")
        
        # 设置节点颜色
        node_colors = []
        for node in G.nodes():
            if node in self.start_activities:
                color = 'lightgreen'  # 起始活动
            elif node in self.end_activity_ids:
                color = 'lightcoral'  # 结束活动
            else:
                color = 'lightblue'  # 普通活动
            node_colors.append(color)
            
        # 设置边的颜色和宽度
        edge_colors = []
        edge_widths = []
        
        for u, v in G.edges():
            if highlight_path and u in highlight_path and v in highlight_path:
                idx_u = highlight_path.index(u)
                if idx_u < len(highlight_path) - 1 and highlight_path[idx_u + 1] == v:
                    edge_colors.append('red')
                    edge_widths.append(2.5)
                    continue
            
            # 默认边样式
            weight = G.edges[u, v]['weight']
            edge_colors.append('gray')
            edge_widths.append(0.5 + weight * 2)
            
        # 创建绘图
        plt.figure(figsize=(14, 10))
        pos = nx.spring_layout(G, k=0.3, seed=42)
        
        # 绘制节点
        nx.draw_networkx_nodes(G, pos, node_size=700, node_color=node_colors, alpha=0.8)
        
        # 绘制边
        nx.draw_networkx_edges(G, pos, width=edge_widths, edge_color=edge_colors, 
                              arrowsize=15, alpha=0.7)
        
        # 添加标签
        nx.draw_networkx_labels(G, pos, labels={n: G.nodes[n]['label'] for n in G.nodes()})
        
        # 添加边标签（概率、时间、成本）
        if len(G) < 15:  # 只在节点数较少时显示详细边标签
            edge_labels = {(u, v): G.edges[u, v]['label'] for u, v in G.edges()}
            nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8)
        
        plt.title("流程图" + (" (最佳路径高亮)" if highlight_path else ""))
        plt.axis('off')
        plt.tight_layout()
        plt.show()
        
    def analyze_best_path(self, path: List[int]) -> Dict[str, Any]:
        """分析路径的性能指标"""
        if not path:
            return {"error": "空路径"}
            
        # 计算路径的关键指标
        total_time = 0
        total_cost = 0
        non_compliant_steps = 0
        
        for i in range(len(path) - 1):
            from_act = path[i]
            to_act = path[i+1]
            trans = (from_act, to_act)
            
            # 累计时间和成本
            total_time += self.transition_times.get(trans, 24)
            total_cost += self.transition_costs.get(trans, 1000)
            
            # 检查合规性
            is_compliant, _, _ = self.is_compliant(from_act, to_act)
            if not is_compliant:
                non_compliant_steps += 1
                
        # 计算关键绩效指标
        compliance_rate = 1.0 - (non_compliant_steps / max(1, len(path) - 1))
        
        analysis = {
            "path_length": len(path),
            "total_time_hours": total_time,
            "avg_time_per_step": total_time / max(1, len(path) - 1),
            "total_cost": total_cost,
            "avg_cost_per_step": total_cost / max(1, len(path) - 1),
            "compliance_rate": compliance_rate,
            "activities": [self.id_to_activity[act] for act in path]
        }
        
        return analysis


class QLearningAgent:
    """Q-learning强化学习智能体，用于学习最佳流程路径"""
    def __init__(self, 
                 num_states: int, 
                 num_actions: int, 
                 learning_rate: float = 0.1, 
                 discount_factor: float = 0.95,
                 exploration_rate: float = 1.0, 
                 exploration_decay: float = 0.995,
                 min_exploration_rate: float = 0.01):
        """
        初始化Q-learning agent
        
        参数:
            num_states: 状态空间大小（活动数量）
            num_actions: 动作空间大小（活动数量）
            learning_rate: 学习率
            discount_factor: 未来奖励折扣因子
            exploration_rate: 初始探索率
            exploration_decay: 探索率衰减系数
            min_exploration_rate: 最小探索率
        """
        # 使用嵌套字典（稀疏表示）存储Q值
        self.q_table = defaultdict(lambda: defaultdict(float))
        self.num_states = num_states
        self.num_actions = num_actions
        
        # 学习参数
        self.lr = learning_rate
        self.gamma = discount_factor
        self.epsilon = exploration_rate
        self.epsilon_decay = exploration_decay
        self.min_epsilon = min_exploration_rate
        
        # 跟踪学习进度
        self.training_stats = {
            'episode_rewards': [],
            'epsilons': [],
            'path_lengths': [],
        }
        
    def choose_action(self, state_id: int, valid_actions: List[int]) -> int:
        """
        使用epsilon-greedy策略选择动作
        
        参数:
            state_id: 当前状态ID（活动ID）
            valid_actions: 有效的后继活动ID列表
            
        返回:
            选择的动作ID
        """
        # 处理无效动作情况
        if not valid_actions:
            return -1
            
        # 探索 vs 利用
        if random.random() < self.epsilon:
            # 探索：随机选择有效动作
            return random.choice(valid_actions)
        else:
            # 利用：选择Q值最高的有效动作
            q_values = {a: self.q_table[state_id].get(a, 0.0) for a in valid_actions}
            
            # 找出最大Q值的动作
            max_q = max(q_values.values()) if q_values else 0.0
            best_actions = [a for a, q in q_values.items() if q >= max_q]
            
            # 如果有多个最优动作，随机选择一个
            return random.choice(best_actions)
            
    def learn(self, state_id: int, action_id: int, reward: float, 
              next_state_id: int, done: bool, next_valid_actions: List[int] = None) -> None:
        """
        Q-learning更新规则
        
        参数:
            state_id: 当前状态ID
            action_id: 执行的动作ID
            reward: 收到的奖励
            next_state_id: 下一个状态ID
            done: 是否到达终止状态
            next_valid_actions: 下一状态的有效动作（可选）
        """
        # 获取当前Q值
        current_q = self.q_table[state_id][action_id]
        
        # 在非终止状态下，计算下一状态的最大Q值
        max_next_q = 0.0
        if not done and next_valid_actions:
            next_q_values = [self.q_table[next_state_id].get(a, 0.0) for a in next_valid_actions]
            if next_q_values:
                max_next_q = max(next_q_values)
                
        # Q-learning更新公式: Q(s,a) = Q(s,a) + lr * [R + gamma * max(Q(s',a')) - Q(s,a)]
        new_q = current_q + self.lr * (reward + self.gamma * max_next_q - current_q)
        self.q_table[state_id][action_id] = new_q
        
    def decay_exploration(self) -> None:
        """降低探索率以增加利用"""
        self.epsilon = max(self.min_epsilon, self.epsilon * self.epsilon_decay)
        
    def save_model(self, filepath: str) -> None:
        """保存Q-table到文件"""
        # 将嵌套defaultdict转换为普通dict便于序列化
        q_table_dict = {}
        for state, actions in self.q_table.items():
            q_table_dict[state] = dict(actions)
            
        model_data = {
            'q_table': q_table_dict,
            'training_stats': self.training_stats,
            'hyperparams': {
                'lr': self.lr,
                'gamma': self.gamma,
                'epsilon': self.epsilon,
                'min_epsilon': self.min_epsilon,
            }
        }
        
        with open(filepath, 'wb') as f:
            pickle.dump(model_data, f)
            
    def load_model(self, filepath: str) -> None:
        """从文件加载Q-table"""
        if os.path.exists(filepath):
            with open(filepath, 'rb') as f:
                model_data = pickle.load(f)
                
            # 重建Q-table
            q_table_dict = model_data.get('q_table', {})
            for state, actions in q_table_dict.items():
                for action, value in actions.items():
                    self.q_table[state][action] = value
                    
            # 加载训练统计
            self.training_stats = model_data.get('training_stats', self.training_stats)
            
            # 加载超参数
            hyperparams = model_data.get('hyperparams', {})
            self.lr = hyperparams.get('lr', self.lr)
            self.gamma = hyperparams.get('gamma', self.gamma)
            self.epsilon = hyperparams.get('epsilon', self.epsilon)
            self.min_epsilon = hyperparams.get('min_epsilon', self.min_epsilon)


class ProcessOptimizer:
    """流程优化器：使用强化学习找到最佳流程路径"""
    def __init__(self, environment: ProcessMiningEnvironment, verbose: bool = True):
        """
        初始化流程优化器
        
        参数:
            environment: 流程挖掘环境
            verbose: 是否打印详细训练信息
        """
        self.env = environment
        self.verbose = verbose
        
        # 初始化智能体
        self.agent = QLearningAgent(
            num_states=self.env.num_activities,
            num_actions=self.env.num_activities,
            learning_rate=0.1,
            discount_factor=0.95,
            exploration_rate=1.0,
            exploration_decay=0.995,
            min_exploration_rate=0.01
        )
        
        # 训练指标
        self.training_history = []
        
    def train(self, num_episodes: int = 1000, 
             eval_frequency: int = 100,
             save_path: str = None):
        """
        训练Q-learning智能体
        
        参数:
            num_episodes: 训练回合数
            eval_frequency: 评估频率
            save_path: 保存模型的路径（可选）
        """
        print(f"开始训练流程优化智能体，总训练回合: {num_episodes}")
        
        for episode in range(num_episodes):
            # 重置环境
            state_id = self.env.reset()
            done = False
            total_reward = 0
            steps = 0
            
            while not done:
                # 获取有效动作
                valid_actions = self.env.get_valid_actions(state_id)
                
                if not valid_actions:
                    # 处理死胡同情况
                    if self.verbose and (episode + 1) % eval_frequency == 0:
                        print(f"  回合 {episode + 1}: 在活动 {self.env.id_to_activity[state_id]} 遇到死胡同")
                    break
                
                # 选择动作
                action_id = self.agent.choose_action(state_id, valid_actions)
                
                if action_id == -1:
                    # 无效动作，不应该发生
                    break
                
                # 执行动作
                next_state, reward, done, info = self.env.step(action_id)
                next_state_id = next_state['activity_id']
                
                # 获取下一个状态的有效动作（用于Q-learning更新）
                next_valid_actions = [] if done else self.env.get_valid_actions(next_state_id)
                
                # 更新Q值
                self.agent.learn(state_id, action_id, reward, next_state_id, done, next_valid_actions)
                
                # 更新状态和指标
                state_id = next_state_id
                total_reward += reward
                steps += 1
                
                # 防止无限循环
                if steps >= self.env.max_steps:
                    break
            
            # 衰减探索率
            self.agent.decay_exploration()
            
            # 记录训练指标
            self.agent.training_stats['episode_rewards'].append(total_reward)
            self.agent.training_stats['epsilons'].append(self.agent.epsilon)
            self.agent.training_stats['path_lengths'].append(steps)
            
            # 定期打印训练状态
            if self.verbose and (episode + 1) % eval_frequency == 0:
                # 计算最近N轮的平均奖励
                avg_reward = sum(self.agent.training_stats['episode_rewards'][-eval_frequency:]) / eval_frequency
                avg_steps = sum(self.agent.training_stats['path_lengths'][-eval_frequency:]) / eval_frequency
                
                print(f"回合 {episode + 1}/{num_episodes} | 平均奖励: {avg_reward:.1f} | 平均步数: {avg_steps:.1f} | 探索率: {self.agent.epsilon:.3f}")
                
                # 定期评估并打印最佳路径
                if (episode + 1) % (eval_frequency * 5) == 0:
                    best_path = self.get_best_path()
                    if best_path:
                        path_activities = [self.env.id_to_activity[a] for a in best_path]
                        print(f"  当前最佳路径: {' -> '.join(path_activities[:5])}... (共{len(best_path)}步)")
            
        # 训练结束后保存模型（如果指定了保存路径）
        if save_path:
            self.agent.save_model(save_path)
            print(f"模型已保存到 {save_path}")
            
        print("训练完成")
        
    def get_best_path(self, start_activity: str = None) -> List[int]:
        """
        提取学习到的最佳路径
        
        参数:
            start_activity: 开始活动名称（可选）
            
        返回:
            最佳路径的活动ID列表
        """
        if start_activity and start_activity in self.env.activity_to_id:
            start_id = self.env.activity_to_id[start_activity]
        else:
            # 使用最常见的开始活动
            start_id = self.env.start_activities[0]
            
        current_id = start_id
        path = [current_id]
        visited = {current_id}  # 用于检测循环
        
        for _ in range(self.env.max_steps):
            valid_actions = self.env.get_valid_actions(current_id)
            
            # 过滤掉已访问的活动（防止循环）
            valid_actions = [a for a in valid_actions if a not in visited]
            
            if not valid_actions:
                if current_id in self.env.end_activity_ids:
                    break  # 正常终止
                else:
                    if len(path) > 1:
                        # 尝试后退一步，看是否可以找到替代路径
                        current_id = path[-2]
                        path = path[:-1]
                        continue
                    else:
                        # 无法继续
                        break
            
            # 根据Q值选择最佳动作（无探索）
            q_values = {a: self.agent.q_table[current_id].get(a, 0.0) for a in valid_actions}
            if not q_values:
                break
                
            best_action = max(q_values, key=q_values.get)
            
            current_id = best_action
            path.append(current_id)
            visited.add(current_id)
            
            if current_id in self.env.end_activity_ids:
                break  # 到达终止状态
        
        return path
        
    def visualize_training_progress(self) -> None:
        """可视化训练进度"""
        if not self.agent.training_stats['episode_rewards']:
            print("没有训练数据可视化")
            return
            
        fig, axs = plt.subplots(3, 1, figsize=(12, 15))
        
        # 绘制奖励曲线
        rewards = self.agent.training_stats['episode_rewards']
        axs[0].plot(rewards)
        axs[0].set_title('训练回合奖励')
        axs[0].set_xlabel('回合')
        axs[0].set_ylabel('总奖励')
        
        # 添加移动平均线
        window = min(100, len(rewards) // 10)
        if window > 0:
            moving_avg = pd.Series(rewards).rolling(window=window).mean()
            axs[0].plot(moving_avg, color='red', linewidth=2)
            axs[0].legend(['回合奖励', f'{window}回合移动平均'])
        
        # 绘制探索率衰减
        axs[1].plot(self.agent.training_stats['epsilons'])
        axs[1].set_title('探索率衰减')
        axs[1].set_xlabel('回合')
        axs[1].set_ylabel('探索率 (ε)')
        
        # 绘制路径长度
        axs[2].plot(self.agent.training_stats['path_lengths'])
        axs[2].set_title('路径长度')
        axs[2].set_xlabel('回合')
        axs[2].set_ylabel('步数')
        
        plt.tight_layout()
        plt.show()
        
    def compare_paths(self, paths: List[List[int]], names: List[str] = None) -> None:
        """比较多条路径的性能"""
        if not paths:
            return
            
        if not names:
            names = [f"路径 {i+1}" for i in range(len(paths))]
            
        # 分析每条路径
        analyses = []
        for path, name in zip(paths, names):
            analysis = self.env.analyze_best_path(path)
            analysis['name'] = name
            analyses.append(analysis)
            
        # 创建比较表格
        compare_data = {
            '路径名称': [a['name'] for a in analyses],
            '步数': [a['path_length'] for a in analyses],
            '总时间(小时)': [f"{a['total_time_hours']:.1f}" for a in analyses],
            '平均每步时间': [f"{a['avg_time_per_step']:.1f}" for a in analyses],
            '总成本': [f"${a['total_cost']:.0f}" for a in analyses],
            '平均每步成本': [f"${a['avg_cost_per_step']:.0f}" for a in analyses],
            '合规率': [f"{a['compliance_rate']*100:.1f}%" for a in analyses],
        }
        
        # 显示比较表格
        df = pd.DataFrame(compare_data)
        print("\n路径比较:")
        print(df.to_string(index=False))
        
        # 可视化比较
        fig, axs = plt.subplots(2, 2, figsize=(14, 10))
        
        # 步数比较
        axs[0, 0].bar(compare_data['路径名称'], [a['path_length'] for a in analyses])
        axs[0, 0].set_title('路径步数')
        axs[0, 0].set_ylabel('步数')
        
        # 总时间比较
        axs[0, 1].bar(compare_data['路径名称'], [a['total_time_hours'] for a in analyses])
        axs[0, 1].set_title('总时间(小时)')
        axs[0, 1].set_ylabel('小时')
        
        # 总成本比较
        axs[1, 0].bar(compare_data['路径名称'], [a['total_cost'] for a in analyses])
        axs[1, 0].set_title('总成本')
        axs[1, 0].set_ylabel('成本单位')
        
        # 合规率比较
        axs[1, 1].bar(compare_data['路径名称'], [a['compliance_rate'] for a in analyses])
        axs[1, 1].set_title('合规率')
        axs[1, 1].set_ylabel('比率')
        axs[1, 1].set_ylim([0, 1])
        
        plt.tight_layout()
        plt.show()
        
        # 打印每条路径的活动序列
        print("\n每条路径的详细活动序列:")
        for analysis in analyses:
            print(f"\n{analysis['name']}:")
            print(" -> ".join(analysis['activities']))


def generate_synthetic_process_log(num_cases: int = 500, 
                                 variants_ratio: Dict[str, float] = None,
                                 noise_level: float = 0.1) -> pd.DataFrame:
    """
    生成合成的采购到付款流程日志
    
    参数:
        num_cases: 案例数量
        variants_ratio: 不同流程变体的比例
        noise_level: 噪音水平（随机变异）
        
    返回:
        pandas DataFrame格式的事件日志
    """
    # 默认流程变体及其比例
    if variants_ratio is None:
        variants_ratio = {
            'standard': 0.6,    # 标准采购流程
            'expedited': 0.15,  # 加急采购流程
            'approval': 0.15,   # 多级审批流程
            'rejected': 0.1     # 被拒绝的流程
        }
    
    # 检查比例总和是否为1
    if abs(sum(variants_ratio.values()) - 1.0) > 0.01:
        print("警告: 变体比例总和不为1，将自动调整")
        # 归一化
        total = sum(variants_ratio.values())
        variants_ratio = {k: v/total for k, v in variants_ratio.items()}
    
    # 定义基本活动集
    activities = {
        'PR_Creation': '采购申请创建',
        'PR_Review': '采购申请审核',
        'PR_Approval': '采购申请批准',
        'PR_Rejection': '采购申请拒绝',
        'Vendor_Selection': '供应商选择',
        'Quotation_Comparison': '报价比较',
        'PO_Creation': '采购订单创建',
        'PO_Review': '采购订单审核',
        'PO_Approval': '采购订单批准',
        'PO_Rejection': '采购订单拒绝',
        'PO_Sending': '采购订单发送',
        'Goods_Receipt': '物品接收',
        'Quality_Check': '质量检查',
        'Invoice_Receipt': '发票接收',
        'Invoice_Verification': '发票验证',
        'Payment_Approval': '付款批准',
        'Payment_Execution': '付款执行',
        'Process_Complete': '流程完成'
    }
    
    # 定义流程变体路径
    process_variants = {
        'standard': [
            'PR_Creation', 'PR_Review', 'PR_Approval', 'Vendor_Selection', 
            'Quotation_Comparison', 'PO_Creation', 'PO_Review', 'PO_Approval', 
            'PO_Sending', 'Goods_Receipt', 'Quality_Check', 'Invoice_Receipt', 
            'Invoice_Verification', 'Payment_Approval', 'Payment_Execution', 'Process_Complete'
        ],
        'expedited': [
            'PR_Creation', 'PR_Approval', 'PO_Creation', 'PO_Approval', 
            'PO_Sending', 'Goods_Receipt', 'Invoice_Receipt', 
            'Payment_Approval', 'Payment_Execution', 'Process_Complete'
        ],
        'approval': [
            'PR_Creation', 'PR_Review', 'PR_Approval', 'Vendor_Selection', 
            'PO_Creation', 'PO_Review', 'PO_Review', 'PO_Approval', 'PO_Approval',  # 额外审批步骤
            'PO_Sending', 'Goods_Receipt', 'Quality_Check', 'Invoice_Receipt', 
            'Invoice_Verification', 'Payment_Approval', 'Payment_Approval',  # 额外付款审批
            'Payment_Execution', 'Process_Complete'
        ],
        'rejected': [
            'PR_Creation', 'PR_Review', 'PR_Rejection', 'PR_Creation',  # 重新提交
            'PR_Review', 'PR_Approval', 'Vendor_Selection', 'PO_Creation', 
            'PO_Review', 'PO_Rejection', 'PO_Creation', 'PO_Review',  # 重新提交
            'PO_Approval', 'PO_Sending', 'Goods_Receipt', 'Process_Complete'
        ]
    }
    
    # 定义各活动的时间和成本范围
    activity_params = {
        'PR_Creation': {'time': (0.5, 2), 'cost': (10, 50)},
        'PR_Review': {'time': (0.5, 3), 'cost': (20, 60)},
        'PR_Approval': {'time': (1, 8), 'cost': (30, 80)},
        'PR_Rejection': {'time': (0.5, 1), 'cost': (10, 30)},
        'Vendor_Selection': {'time': (2, 24), 'cost': (40, 200)},
        'Quotation_Comparison': {'time': (1, 8), 'cost': (30, 100)},
        'PO_Creation': {'time': (0.5, 3), 'cost': (20, 60)},
        'PO_Review': {'time': (0.5, 4), 'cost': (20, 70)},
        'PO_Approval': {'time': (1, 12), 'cost': (30, 90)},
        'PO_Rejection': {'time': (0.5, 1), 'cost': (10, 30)},
        'PO_Sending': {'time': (0.2, 1), 'cost': (5, 20)},
        'Goods_Receipt': {'time': (4, 72), 'cost': (20, 100)},
        'Quality_Check': {'time': (1, 8), 'cost': (40, 120)},
        'Invoice_Receipt': {'time': (1, 24), 'cost': (10, 30)},
        'Invoice_Verification': {'time': (0.5, 4), 'cost': (20, 60)},
        'Payment_Approval': {'time': (1, 24), 'cost': (30, 80)},
        'Payment_Execution': {'time': (0.5, 4), 'cost': (10, 40)},
        'Process_Complete': {'time': (0.1, 0.5), 'cost': (5, 15)}
    }
    
    # 生成案例分布
    case_variants = np.random.choice(
        list(variants_ratio.keys()),
        size=num_cases,
        p=list(variants_ratio.values())
    )
    
    # 生成事件日志
    event_log = []
    case_id = 1000  # 起始案例ID
    
    for variant in case_variants:
        # 获取当前变体的活动序列
        activities_path = process_variants[variant]
        
        # 添加带噪声的活动
        if random.random() < noise_level:
            # 插入、重复或删除随机活动
            noise_type = random.choice(['insert', 'repeat', 'delete'])
            if noise_type == 'insert' and len(activities_path) > 2:
                insert_pos = random.randint(1, len(activities_path) - 2)
                insert_act = random.choice(list(activity_params.keys()))
                activities_path.insert(insert_pos, insert_act)
            elif noise_type == 'repeat' and len(activities_path) > 2:
                repeat_pos = random.randint(1, len(activities_path) - 2)
                activities_path.insert(repeat_pos, activities_path[repeat_pos])
            elif noise_type == 'delete' and len(activities_path) > 3:
                delete_pos = random.randint(1, len(activities_path) - 2)
                activities_path.pop(delete_pos)
        
        # 生成当前案例的属性
        case_attrs = {
            'amount': round(random.uniform(100, 10000), 2),
            'department': random.choice(['IT', 'HR', 'Finance', 'Operations', 'Sales', 'Marketing']),
            'priority': random.choice(['Low', 'Medium', 'High', 'Critical']),
            'requester': f"User_{random.randint(1000, 9999)}"
        }
        
        # 生成起始时间
        start_time = datetime(2023, 1, 1) + timedelta(
            days=random.randint(0, 364),
            hours=random.randint(8, 17),
            minutes=random.randint(0, 59)
        )
        
        # 添加每个活动的事件
        current_time = start_time
        
        for activity in activities_path:
            # 获取活动参数
            time_range = activity_params.get(activity, {'time': (0.5, 4)})['time']
            cost_range = activity_params.get(activity, {'cost': (10, 50)})['cost']
            
            # 添加随机时间延迟
            time_delay = random.uniform(time_range[0], time_range[1])
            current_time += timedelta(hours=time_delay)
            
            # 生成成本
            cost = random.uniform(cost_range[0], cost_range[1])
            
            # 创建事件
            event = {
                'case_id': f"C{case_id}",
                'activity': activity,
                'timestamp': current_time,
                'resource': f"User_{random.randint(100, 999)}",
                'cost': round(cost, 2),
                'amount': case_attrs['amount'],
                'department': case_attrs['department'],
                'priority': case_attrs['priority'],
                'requester': case_attrs['requester']
            }
            
            event_log.append(event)
        
        # 增加案例ID
        case_id += 1
    
    # 转换为DataFrame
    log_df = pd.DataFrame(event_log)
    
    return log_df


def main():
    """主函数：展示整个流程优化过程"""
    # 1. 生成合成事件日志
    print("生成合成事件日志...")
    log_df = generate_synthetic_process_log(num_cases=500)
    print(f"生成了 {len(log_df)} 条事件记录，{log_df['case_id'].nunique()} 个案例")
    
    # 显示事件日志样本
    print("\n事件日志样本:")
    print(log_df.head())
    
    # 2. 设置合规规则
    compliance_rules = {
        'required_sequences': [
            ('PR_Approval', 'PO_Creation'),  # PR必须在PO之前批准
            ('Quality_Check', 'Payment_Approval')  # 付款前必须进行质检
        ],
        'forbidden_sequences': [
            ('PR_Rejection', 'PO_Creation'),  # 被拒绝的PR不能直接创建PO
            ('PR_Creation', 'PO_Approval')    # 不能跳过PO创建直接批准
        ],
        'attribute_rules': [
            {
                'attribute': 'amount', 
                'condition': {'op': '>', 'value': 5000},
                'required_activity': 'PO_Review'  # 大额采购需要审核
            }
        ]
    }
    
    # 3. 创建流程挖掘环境
    print("\n初始化流程挖掘环境...")
    env = ProcessMiningEnvironment(
        event_log=log_df, 
        end_activities=['Process_Complete'],
        max_steps=25,
        compliance_rules=compliance_rules,
        time_weight=0.5,
        cost_weight=0.3,
        compliance_weight=0.2
    )
    
    # 4. 创建和训练流程优化器
    print("\n创建流程优化器并开始训练...")
    optimizer = ProcessOptimizer(env)
    
    # 训练智能体
    optimizer.train(num_episodes=2000, eval_frequency=100)
    
    # 5. 可视化训练进度
    print("\n可视化训练进度...")
    optimizer.visualize_training_progress()
    
    # 6. 获取学习到的最佳路径
    print("\n提取最佳路径...")
    best_path = optimizer.get_best_path()
    best_path_activities = [env.id_to_activity[act_id] for act_id in best_path]
    
    print