import torch
from sb3_contrib import RecurrentPPO

# 加载训练好的模型
model = RecurrentPPO.load("rppo_process_model")

# 初始化LSTM隐藏状态
lstm_state = None
done = True

# 定义复合奖励函数
def composite_reward(current_activity, next_activity, case_events):
    # 1. 路径合规性奖励 (0-1标准化)
    compliance_reward = 1.0 if is_valid_transition(current_activity, next_activity) else -1.0
    
    # 2. 时间效率奖励 (基于活动持续时间)
    time_reward = -0.1 * get_activity_duration(next_activity) 
    
    # 3. 资源利用率奖励
    resource_reward = 0.5 * get_resource_utilization(next_activity)
    
    # 4. 业务流程目标奖励
    business_reward = 2.0 if is_goal_activity(next_activity) else 0.0
    
    # 加权组合 (可根据业务需求调整权重)
    total_reward = (
        0.4 * compliance_reward +
        0.3 * time_reward + 
        0.2 * resource_reward +
        0.1 * business_reward
    )
    return total_reward

    
# 模拟实时预测
for _ in range(10):
    if done:
        obs = env.reset()
        lstm_state = None
    
    # 带LSTM状态的预测
    action, lstm_state = model.predict(
        obs, 
        state=lstm_state,
        deterministic=True
    )
    
    obs, _, done, _ = env.step(action)
    print(f"Selected action: {action}")