import os
import gym
import numpy as np
import pandas as pd
import torch
from gym import spaces
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines3.common.callbacks import CheckpointCallback, EvalCallback
from sb3_contrib import RecurrentPPO
from typing import Dict, List, Tuple, Optional, Union, Any
import networkx as nx
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
'''
RecurrentPPO 流程挖掘最佳路径优化方案
针对流程挖掘中稀疏动作空间和混合状态空间（稀疏流程节点 + 连续属性）的情况，以下是一个优化的实现方案：
'''

class SparseProcessEnv(gym.Env):
    """
    流程挖掘环境：用于处理稀疏动作空间和混合状态空间
    """
    
    def __init__(self, 
                 event_log: pd.DataFrame, 
                 node_features: Dict[str, Dict[str, float]],
                 transition_matrix: Dict[str, Dict[str, float]] = None,
                 continuous_features: List[str] = None,
                 categorical_features: List[str] = None,
                 max_steps: int = 20,
                 reward_weights: Dict[str, float] = None):
        """
        初始化流程环境
        
        参数:
            event_log: 事件日志
            node_features: 节点特征字典 {节点名称: {特征名称: 特征值}}
            transition_matrix: 转移矩阵 {源节点: {目标节点: 频率}}
            continuous_features: 连续特征列表
            categorical_features: 分类特征列表
            max_steps: 每个episode的最大步数
            reward_weights: 奖励权重
        """
        super(SparseProcessEnv, self).__init__()
        
        self.event_log = event_log
        self.node_features = node_features
        self.max_steps = max_steps
        
        # 提取活动和创建映射
        self.activities = sorted(event_log['activity'].unique())
        self.activity_to_idx = {act: i for i, act in enumerate(self.activities)}
        self.idx_to_activity = {i: act for act, i in self.activity_to_idx.items()}
        
        # 特征处理
        self.continuous_features = continuous_features or []
        self.categorical_features = categorical_features or []
        self.feature_scalers = self._create_feature_scalers()
        
        # 转移矩阵
        self.transition_matrix = transition_matrix or self._extract_transitions()
        
        # 构建流程图用于分析
        self.process_graph = self._build_process_graph()
        
        # 识别起始和结束节点
        self.start_activities = self._identify_start_activities()
        self.end_activities = self._identify_end_activities()
        
        # 状态空间: 稀疏节点表示 + 连续属性特征
        self.node_feature_dim = self._get_node_feature_dimension()
        self.total_features_dim = len(self.activities) + self.node_feature_dim + 1  # +1 for step
        
        self.observation_space = spaces.Box(
            low=-np.inf, high=np.inf, 
            shape=(self.total_features_dim,), 
            dtype=np.float32
        )
        
        # 动作空间: 离散，但是稀疏
        self.action_space = spaces.Discrete(len(self.activities))
        
        # 奖励设计
        self.reward_weights = reward_weights or {
            'compliance': 0.4,    # 路径合规性
            'time': 0.3,          # 时间效率
            'resource': 0.2,      # 资源利用
            'goal': 0.1           # 目标完成
        }
        
        # 当前状态
        self.current_activity = None
        self.activity_history = []
        self.current_step = 0
        self.case_features = {}
        self.current_case_id = None

    def _create_feature_scalers(self) -> Dict[str, StandardScaler]:
        """为连续特征创建标准化器"""
        scalers = {}
        
        for feature in self.continuous_features:
            if feature in self.event_log.columns:
                scaler = StandardScaler()
                scaler.fit(self.event_log[feature].values.reshape(-1, 1))
                scalers[feature] = scaler
                
        return scalers
        
    def _extract_transitions(self) -> Dict[str, Dict[str, float]]:
        """从事件日志中提取活动转移概率"""
        transitions = {}
        
        for case_id in self.event_log['case_id'].unique():
            case_events = self.event_log[self.event_log['case_id'] == case_id].sort_values('timestamp')
            activities = case_events['activity'].tolist()
            
            for i in range(len(activities) - 1):
                src = activities[i]
                dst = activities[i + 1]
                
                if src not in transitions:
                    transitions[src] = {}
                    
                transitions[src][dst] = transitions[src].get(dst, 0) + 1
                
        # 转换为概率
        for src in transitions:
            total = sum(transitions[src].values())
            for dst in transitions[src]:
                transitions[src][dst] /= total
                
        return transitions
    
    def _build_process_graph(self) -> nx.DiGraph:
        """构建有向流程图"""
        G = nx.DiGraph()
        
        # 添加节点
        for act in self.activities:
            G.add_node(act)
            
        # 添加边
        for src in self.transition_matrix:
            for dst, prob in self.transition_matrix[src].items():
                G.add_edge(src, dst, weight=prob)
                
        return G
    
    def _identify_start_activities(self) -> List[str]:
        """识别起始活动"""
        # 方法1: 基于图结构
        candidates = [node for node in self.process_graph.nodes() if self.process_graph.in_degree(node) == 0]
        
        # 如果没有入度为0的节点，使用启发式方法
        if not candidates:
            start_counts = {}
            for case_id in self.event_log['case_id'].unique():
                case_events = self.event_log[self.event_log['case_id'] == case_id].sort_values('timestamp')
                first_activity = case_events['activity'].iloc[0]
                start_counts[first_activity] = start_counts.get(first_activity, 0) + 1
                
            # 选择最常出现在开始的活动
            candidates = [act for act, count in start_counts.items() 
                          if count > len(self.event_log['case_id'].unique()) * 0.05]
            
        return candidates or [self.activities[0]]  # 默认第一个活动
    
    def _identify_end_activities(self) -> List[str]:
        """识别结束活动"""
        # 方法1: 基于图结构
        candidates = [node for node in self.process_graph.nodes() if self.process_graph.out_degree(node) == 0]
        
        # 如果没有出度为0的节点，使用启发式方法
        if not candidates:
            end_counts = {}
            for case_id in self.event_log['case_id'].unique():
                case_events = self.event_log[self.event_log['case_id'] == case_id].sort_values('timestamp')
                last_activity = case_events['activity'].iloc[-1]
                end_counts[last_activity] = end_counts.get(last_activity, 0) + 1
                
            # 选择最常出现在结束的活动
            candidates = [act for act, count in end_counts.items() 
                          if count > len(self.event_log['case_id'].unique()) * 0.05]
            
        return candidates or [self.activities[-1]]  # 默认最后一个活动
    
    def _get_node_feature_dimension(self) -> int:
        """获取节点特征维度"""
        # 检查第一个节点特征的维度
        if not self.node_features:
            return 0
            
        first_node = next(iter(self.node_features))
        return len(self.node_features[first_node])
    
    def _get_valid_actions(self, current_activity: str) -> List[int]:
        """获取有效的动作列表"""
        if current_activity not in self.transition_matrix:
            return []
            
        # 从转移矩阵中获取有效的下一步活动
        valid_next = list(self.transition_matrix[current_activity].keys())
        return [self.activity_to_idx[act] for act in valid_next]
    
    def _get_state_representation(self) -> np.ndarray:
        """获取当前状态的向量表示"""
        # 1. 稀疏节点表示 (one-hot)
        node_one_hot = np.zeros(len(self.activities), dtype=np.float32)
        if self.current_activity in self.activity_to_idx:
            node_one_hot[self.activity_to_idx[self.current_activity]] = 1.0
            
        # 2. 节点特征
        node_features = np.zeros(self.node_feature_dim, dtype=np.float32)
        if self.current_activity in self.node_features:
            features = self.node_features[self.current_activity]
            node_features = np.array(list(features.values()), dtype=np.float32)
            
        # 3. 当前步骤 (归一化)
        step_feature = np.array([self.current_step / self.max_steps], dtype=np.float32)
        
        # 连接所有特征
        state = np.concatenate([node_one_hot, node_features, step_feature])
        return state
    
    def _calculate_reward(self, action: int) -> float:
        """计算奖励"""
        next_activity = self.idx_to_activity.get(action, None)
        
        # 基础奖励 - 无效动作惩罚
        if next_activity is None:
            return -10.0
        
        # 检查是否是有效转移
        current = self.current_activity
        if current not in self.transition_matrix or next_activity not in self.transition_matrix.get(current, {}):
            return -5.0  # 无效转移惩罚
            
        # --- 复合奖励计算 ---
        
        # 1. 路径合规性奖励 - 基于转移概率
        compliance_reward = self.transition_matrix[current].get(next_activity, 0) * 2.0
        
        # 2. 时间效率奖励 - 基于节点平均持续时间
        time_weight = self.node_features.get(next_activity, {}).get('avg_duration', 1.0)
        time_reward = -0.1 * time_weight
        
        # 3. 资源利用率奖励 - 基于节点资源评分
        resource_util = self.node_features.get(next_activity, {}).get('resource_util', 0.5)
        resource_reward = 0.5 * resource_util
        
        # 4. 目标奖励 - 流程完成
        goal_reward = 2.0 if next_activity in self.end_activities else 0.0
        
        # 5. 循环检测惩罚 - 惩罚重复活动
        loop_penalty = -1.0 if next_activity in self.activity_history[-3:] else 0.0
        
        # 组合奖励
        total_reward = (
            self.reward_weights['compliance'] * compliance_reward +
            self.reward_weights['time'] * time_reward +
            self.reward_weights['resource'] * resource_reward +
            self.reward_weights['goal'] * goal_reward +
            loop_penalty  # 额外惩罚项
        )
        
        return total_reward
    
    def reset(self) -> np.ndarray:
        """重置环境"""
        # 随机选择一个起始活动
        self.current_activity = np.random.choice(self.start_activities)
        self.activity_history = [self.current_activity]
        self.current_step = 0
        
        # 获取状态表示
        return self._get_state_representation()
    
    def step(self, action: int) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
        """执行一步"""
        self.current_step += 1
        reward = self._calculate_reward(action)
        
        # 更新当前活动
        next_activity = self.idx_to_activity.get(action, self.current_activity)
        self.current_activity = next_activity
        self.activity_history.append(next_activity)
        
        # 检查是否结束
        done = (
            self.current_step >= self.max_steps or  # 达到最大步数
            next_activity in self.end_activities or  # 到达终止活动
            not self._get_valid_actions(next_activity)  # 无路可走
        )
        
        # 获取新状态
        next_state = self._get_state_representation()
        
        # 额外信息
        info = {
            'valid_actions': self._get_valid_actions(self.current_activity),
            'current_activity': self.current_activity,
            'step': self.current_step,
            'history': self.activity_history.copy()
        }
        
        return next_state, reward, done, info

    def render(self, mode='human'):
        """可视化当前状态"""
        if mode == 'human':
            print(f"\nStep: {self.current_step}")
            print(f"Current Activity: {self.current_activity}")
            print(f"History: {' -> '.join(self.activity_history)}")
            
            # 显示当前有效动作
            valid_actions = self._get_valid_actions(self.current_activity)
            valid_activities = [self.idx_to_activity[idx] for idx in valid_actions]
            print(f"Valid Next Activities: {valid_activities}")
            
        return None

# 辅助功能：准备节点特征
def prepare_node_features(event_log: pd.DataFrame) -> Dict[str, Dict[str, float]]:
    """从事件日志中提取节点特征"""
    node_features = {}
    
    # 按活动分组计算特征
    activities = event_log['activity'].unique()
    
    for activity in activities:
        activity_events = event_log[event_log['activity'] == activity]
        features = {}
        
        # 1. 时间特征
        if 'duration' in activity_events.columns:
            features['avg_duration'] = activity_events['duration'].mean()
            features['max_duration'] = activity_events['duration'].max()
            features['var_duration'] = activity_events['duration'].var() if len(activity_events) > 1 else 0
            
        # 2. 资源特征
        if 'resource' in activity_events.columns:
            # 资源利用率 = 不同资源数 / 事件数
            resources = activity_events['resource'].nunique()
            features['resource_diversity'] = resources / len(activity_events)
            features['resource_util'] = 1.0 - features['resource_diversity']  # 高利用 = 低多样性
            
        # 3. 成本特征
        if 'cost' in activity_events.columns:
            features['avg_cost'] = activity_events['cost'].mean()
            
        # 4. 质量特征
        if 'quality' in activity_events.columns:
            features['avg_quality'] = activity_events['quality'].mean() / 100.0  # 归一化
        
        # 5. 位置特征
        case_positions = {}
        for case_id in event_log['case_id'].unique():
            case_events = event_log[event_log['case_id'] == case_id].sort_values('timestamp')
            activities_list = case_events['activity'].tolist()
            if activity in activities_list:
                pos = activities_list.index(activity)
                rel_pos = pos / (len(activities_list) - 1) if len(activities_list) > 1 else 0.5
                case_positions[case_id] = rel_pos
                
        if case_positions:
            features['avg_position'] = sum(case_positions.values()) / len(case_positions)
            
        # 确保所有活动都有一组基本特征
        if not features:
            features = {'avg_duration': 1.0, 'resource_util': 0.5, 'avg_position': 0.5}
            
        node_features[activity] = features
    
    return node_features

def train_rppo_model(env_fn, model_path: str, 
                     log_dir: str = "logs", 
                     total_timesteps: int = 100000,
                     learning_rate: float = 3e-4,
                     batch_size: int = 64,
                     n_steps: int = 1024,
                     gamma: float = 0.99):
    """训练RecurrentPPO模型"""
    # 创建环境
    env = DummyVecEnv([env_fn])
    env = VecNormalize(env, norm_obs=True, norm_reward=False)
    
    # 创建评估环境
    eval_env = DummyVecEnv([env_fn])
    # 从训练环境复制归一化统计信息
    eval_env = VecNormalize(eval_env, norm_obs=True, norm_reward=False)
    eval_env.obs_rms = env.obs_rms  # 共享观察归一化
    
    # 检查工作目录
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    
    # 保存回调
    checkpoint_callback = CheckpointCallback(
        save_freq=n_steps * 10,
        save_path=os.path.join(log_dir, "checkpoints"),
        name_prefix="rppo_model",
        save_vecnormalize=True
    )
    
    # 评估回调
    eval_callback = EvalCallback(
        eval_env,
        best_model_save_path=os.path.join(log_dir, "best_model"),
        log_path=os.path.join(log_dir, "eval_results"),
        eval_freq=n_steps * 5,
        deterministic=True,
        render=False
    )
    
    # 创建模型
    model = RecurrentPPO(
        "MlpLstmPolicy",
        env,
        verbose=1,
        n_steps=n_steps,
        batch_size=batch_size,
        learning_rate=learning_rate,
        gamma=gamma,
        ent_coef=0.01,  # 熵系数(鼓励探索)
        vf_coef=0.5,    # 价值函数系数
        max_grad_norm=0.5,  # 梯度裁剪
        tensorboard_log=log_dir,
        policy_kwargs={
            "lstm_hidden_size": 64,  # LSTM隐藏层大小
            "n_lstm_layers": 1,      # LSTM层数
            "net_arch": [64, dict(pi=[32], vf=[32])]  # 网络架构
        }
    )
    
    # 训练模型
    model.learn(
        total_timesteps=total_timesteps,
        callback=[checkpoint_callback, eval_callback],
        tb_log_name="RecurrentPPO_SparseProcess"
    )
    
    # 保存最终模型
    model.save(model_path)
    env.save(os.path.join(log_dir, "vec_normalize.pkl"))
    
    return model, env

def generate_optimal_path(model, env_fn, start_activity: str = None, 
                          max_steps: int = 20, deterministic: bool = True):
    """从给定起始活动生成最优路径"""
    # 创建环境实例
    env = env_fn()
    obs = env.reset()
    
    # 如果指定了起始活动，则设置它
    if start_activity is not None:
        env.current_activity = start_activity
        env.activity_history = [start_activity]
        obs = env._get_state_representation()
    
    # LSTM状态初始化为None
    lstm_states = None
    done = False
    path = [env.current_activity]
    total_reward = 0
    
    # 执行步骤直到终止
    step = 0
    while not done and step < max_steps:
        # 预测动作
        action, lstm_states = model.predict(
            np.array([obs]), 
            state=lstm_states,
            deterministic=deterministic
        )
        
        # 执行动作
        obs, reward, done, info = env.step(action[0])
        total_reward += reward
        step += 1
        
        # 记录路径
        path.append(env.current_activity)
        
        # 如果没有有效动作，结束
        if not info['valid_actions']:
            break
    
    result = {
        'path': path,
        'total_reward': total_reward,
        'steps': step,
        'complete': env.current_activity in env.end_activities
    }
    
    return result

def analyze_process_model(event_log: pd.DataFrame, model, env_fn, 
                          num_samples: int = 10, visualize: bool = True):
    """分析流程模型和优化路径"""
    # 创建环境实例
    env = env_fn()
    
    # 1. 从每个起始活动生成最优路径
    optimal_paths = {}
    for start in env.start_activities:
        result = generate_optimal_path(model, env_fn, start)
        optimal_paths[start] = result
        print(f"\n从 '{start}' 开始的最优路径:")
        print(f"路径: {' -> '.join(result['path'])}")
        print(f"总奖励: {result['total_reward']:.2f}")
        print(f"步数: {result['steps']}")
        print(f"完成: {'是' if result['complete'] else '否'}")
    
    # 2. 与原始流程比较
    print("\n原始流程统计:")
    original_paths = {}
    
    for case_id in event_log['case_id'].unique()[:num_samples]:
        case_events = event_log[event_log['case_id'] == case_id].sort_values('timestamp')
        activities = case_events['activity'].tolist()
        start = activities[0]
        
        if start not in original_paths:
            original_paths[start] = []
            
        original_paths[start].append(activities)
        
    # 计算平均路径长度
    for start in original_paths:
        avg_len = sum(len(p) for p in original_paths[start]) / len(original_paths[start])
        print(f"从 '{start}' 开始的平均路径长度: {avg_len:.2f}")
    
    # 3. 可视化
    if visualize:
        for start in env.start_activities[:min(3, len(env.start_activities))]:
            if start in optimal_paths:
                _visualize_path(env, optimal_paths[start]['path'])
    
    return optimal_paths

def _visualize_path(env, path):
    """可视化路径"""
    G = nx.DiGraph()
    
    # 添加节点和边
    for i in range(len(path) - 1):
        src = path[i]
        dst = path[i + 1]
        G.add_edge(src, dst)
    
    # 创建绘图
    plt.figure(figsize=(12, 8))
    pos = nx.spring_layout(G, seed=42)
    
    # 绘制节点
    node_colors = ['lightblue' if node == path[0] else 
                  'lightgreen' if node == path[-1] else 
                  'lightgray' for node in G.nodes()]
    
    nx.draw_networkx_nodes(G, pos, node_color=node_colors, node_size=700)
    
    # 绘制边和标签
    nx.draw_networkx_edges(G, pos, width=2, arrowsize=20)
    nx.draw_networkx_labels(G, pos, font_size=12)
    
    # 设置标题和显示
    plt.title(f"最优路径: {' -> '.join(path)}")
    plt.axis('off')
    plt.tight_layout()
    plt.show()

def main():
    """主函数"""
    # 配置
    EVENT_LOG_PATH = "process_event_log.csv"
    MODEL_PATH = "rppo_process_model"
    LOG_DIR = "process_mining_logs"
    
    # 1. 加载事件日志
    try:
        event_log = pd.read_csv(EVENT_LOG_PATH)
        event_log['timestamp'] = pd.to_datetime(event_log['timestamp'])
    except FileNotFoundError:
        print(f"事件日志文件 '{EVENT_LOG_PATH}' 未找到。创建一个样例事件日志...")
        event_log = _create_sample_event_log()
        event_log.to_csv(EVENT_LOG_PATH, index=False)
        print(f"样例事件日志已保存至 '{EVENT_LOG_PATH}'")
    
    # 2. 准备节点特征
    node_features = prepare_node_features(event_log)
    
    # 3. 定义环境创建函数
    def make_env():
        return SparseProcessEnv(
            event_log=event_log,
            node_features=node_features,
            continuous_features=['duration', 'cost'],
            categorical_features=['resource'],
            max_steps=20
        )
    
    # 4. 训练模型
    print("\n开始训练RecurrentPPO模型...")
    model, env = train_rppo_model(
        make_env, 
        model_path=MODEL_PATH,
        log_dir=LOG_DIR,
        total_timesteps=50000  # 减少步数以加快演示
    )
    
    # 5. 分析流程模型
    print("\n分析优化后的流程模型...")
    optimal_paths = analyze_process_model(event_log, model, make_env)
    
    print("\n训练和评估完成!")

def _create_sample_event_log():
    """创建一个样例事件日志"""
    # 定义活动
    activities = ['申请提交', '初步审核', '材料补充', '经理审批', '财务审核', '审批通过', '申请拒绝']
    resources = ['员工A', '员工B', '经理A', '经理B', '财务A', '系统']
    
    # 创建数据列表
    data = []
    case_id_counter = 1001
    
    # 生成50个案例
    for i in range(50):
        case_id = f"CASE-{case_id_counter}"
        case_id_counter += 1
        
        # 决定案例路径类型
        path_type = np.random.choice(['正常', '需补充', '拒绝'], p=[0.6, 0.3, 0.1])
        
        # 基础时间
        timestamp = pd.Timestamp('2023-01-01') + pd.Timedelta(days=i//3)
        
        # 创建案例活动序列
        if path_type == '正常':
            activities_seq = ['申请提交', '初步审核', '经理审批', '财务审核', '审批通过']
        elif path_type == '需补充':
            activities_seq = ['申请提交', '初步审核', '材料补充', '初步审核', '经理审批', '财务审核', '审批通过']
        else:
            # 随机拒绝点
            reject_point = np.random.choice(['初审拒绝', '终审拒绝'])
            if reject_point == '初审拒绝':
                activities_seq = ['申请提交', '初步审核', '申请拒绝']
            else:
                activities_seq = ['申请提交', '初步审核', '经理审批', '申请拒绝']
        
        # 为每个活动创建事件
        for idx, activity in enumerate(activities_seq):
            # 持续时间和成本的随机化
            duration = max(5, np.random.normal(30, 10)) if activity != '申请提交' else max(2, np.random.normal(10, 3))
            cost = max(10, np.random.normal(50, 20)) if activity != '申请提交' else max(5, np.random.normal(20, 5))
            
            # 资源分配
            if activity == '申请提交':
                resource = np.random.choice(['员工A', '员工B'])
            elif activity in ['初步审核', '材料补充']:
                resource = np.random.choice(['员工A', '员工B', '经理A'])
            elif activity == '经理审批':
                resource = np.random.choice(['经理A', '经理B'])
            elif activity == '财务审核':
                resource = '财务A'
            else:
                resource = '系统'
                
            # 质量分数
            quality = min(100, max(50, np.random.normal(85, 10)))
            
            # 创建记录
            data.append({
                'case_id': case_id,
                'activity': activity,
                'timestamp': timestamp,
                'resource': resource,
                'duration': round(duration, 1),
                'cost': round(cost, 2),
                'quality': round(quality)
            })
            
            # 更新时间戳
            timestamp += pd.Timedelta(minutes=int(duration))
    
    # 创建DataFrame
    return pd.DataFrame(data)

if __name__ == "__main__":
    main()