'''

State (s): Current activity/status in the process
Action (a): Next activity to execute
Reward (r): Business value (time reduction, cost saving, quality improvement)
Policy (π): Strategy mapping states to optimal next activities

'''

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import pandas as pd
import numpy as np
from collections import deque
import random

class ProcessEnvironment:
    def __init__(self, event_log):
        self.event_log = event_log
        self.activity_list = sorted(event_log['activity'].unique())
        self.activity_idx = {act: i for i, act in enumerate(self.activity_list)}
        self.num_activities = len(self.activity_list)
        
        # Extract process structure and frequency
        self.transitions = self._extract_transitions()
        self.case_data = self._organize_by_case()
        
    def _extract_transitions(self):
        """Extract possible transitions between activities"""
        transitions = {}
        for case_id in self.event_log['case_id'].unique():
            case_events = self.event_log[self.event_log['case_id'] == case_id].sort_values('timestamp')
            activities = case_events['activity'].tolist()
            
            for i in range(len(activities) - 1):
                if activities[i] not in transitions:
                    transitions[activities[i]] = {}
                
                next_act = activities[i + 1]
                transitions[activities[i]][next_act] = transitions[activities[i]].get(next_act, 0) + 1
        
        return transitions
    
    def _organize_by_case(self):
        """Organize event log by case ID"""
        case_data = {}
        for case_id in self.event_log['case_id'].unique():
            case_events = self.event_log[self.event_log['case_id'] == case_id].sort_values('timestamp')
            case_data[case_id] = case_events
        return case_data
    
    def get_state_representation(self, current_activity, case_attributes=None):
        """Convert current activity to state representation"""
        # One-hot encoding of current activity
        state = np.zeros(self.num_activities)
        if current_activity in self.activity_idx:
            state[self.activity_idx[current_activity]] = 1
            
        # Add case attributes if available
        if case_attributes is not None:
            # Convert case attributes to numerical features
            # This depends on the specific attributes available
            pass
            
        return state
    
    def get_available_actions(self, current_activity):
        """Get possible next activities based on observed transitions"""
        if current_activity in self.transitions:
            return list(self.transitions[current_activity].keys())
        return []
    
    def calculate_reward(self, current_activity, next_activity, case_data, weights=None):
        """Calculate comprehensive reward based on multiple business objectives
        
        Parameters:
        -----------
        current_activity : str
            The current activity in the process
        next_activity : str
            The next activity to be executed
        case_data : pd.DataFrame
            Data for the current case
        weights : dict, optional
            Weights for different reward components (time, cost, quality, compliance)
            
        Returns:
        --------
        float
            Combined reward value representing business value
        """
        # Default weights if not provided
        if weights is None:
            weights = {
                'time': 0.4,     # Time efficiency
                'cost': 0.3,     # Cost reduction
                'quality': 0.2,  # Quality improvement
                'compliance': 0.1 # Regulatory compliance
            }
        
        # Initialize reward components
        time_reward = 0
        cost_reward = 0
        quality_reward = 0
        compliance_reward = 0
        
        # Get indices for current and next activities
        try:
            current_idx = case_data[case_data['activity'] == current_activity].index[0]
            next_idx = case_data[case_data['activity'] == next_activity].index[0]
        except (IndexError, KeyError):
            return 0  # Handle case where activities aren't found
        
        # 1. Time efficiency reward
        if 'duration' in case_data.columns:
            duration = case_data.loc[next_idx, 'duration']
            # Normalize duration against average activity duration
            avg_duration = case_data['duration'].mean()
            normalized_duration = duration / (avg_duration if avg_duration > 0 else 1)
            # Negative reward proportional to normalized duration
            time_reward = -normalized_duration
        
        # 2. Cost efficiency reward
        if 'cost' in case_data.columns:
            activity_cost = case_data.loc[next_idx, 'cost']
            avg_cost = case_data['cost'].mean()
            normalized_cost = activity_cost / (avg_cost if avg_cost > 0 else 1)
            # Negative reward proportional to cost
            cost_reward = -normalized_cost
        
        # 3. Quality improvement reward
        if 'quality_score' in case_data.columns:
            quality = case_data.loc[next_idx, 'quality_score']
            # Normalize quality (assuming higher is better, 0-100 scale)
            normalized_quality = quality / 100.0
            quality_reward = normalized_quality
        
        # 4. Compliance reward (bonus for following preferred paths)
        if 'compliance_flag' in case_data.columns:
            compliance = case_data.loc[next_idx, 'compliance_flag']
            compliance_reward = 1.0 if compliance else -0.5
        else:
            # Alternative: check if transition is common (frequency-based compliance)
            if current_activity in self.transitions and next_activity in self.transitions[current_activity]:
                total_outgoing = sum(self.transitions[current_activity].values())
                transition_frequency = self.transitions[current_activity][next_activity] / total_outgoing
                compliance_reward = transition_frequency  # Higher frequency = more compliant
        
        # 5. Combine rewards using weights
        total_reward = (
            weights['time'] * time_reward +
            weights['cost'] * cost_reward +
            weights['quality'] * quality_reward +
            weights['compliance'] * compliance_reward
        )
        
        # 6. Additional business rules (optional)
        # Penalty for rework/loops
        if next_activity in case_data.loc[:current_idx, 'activity'].values:
            total_reward -= 0.5  # Penalty for revisiting activities (rework)
        
        # Bonus for progressing towards end activities
        if self._is_towards_completion(next_activity):
            total_reward += 0.2
    
        return total_reward

def _is_towards_completion(self, activity):
    """Check if an activity tends to lead towards process completion"""
    # Calculate how close activities are to process end
    completion_scores = {}
    
    for case_id, case in self.case_data.items():
        activities = case['activity'].tolist()
        n = len(activities)
        
        # Assign scores based on position (1.0 = end, 0.0 = start)
        for i, act in enumerate(activities):
            position_score = i / (n - 1) if n > 1 else 0.5
            if act not in completion_scores:
                completion_scores[act] = []
            completion_scores[act].append(position_score)
    
    # Average scores for each activity
    for act in completion_scores:
        completion_scores[act] = np.mean(completion_scores[act])
    
    # Return True if activity is in the last third of the process
    return activity in completion_scores and completion_scores[activity] > 0.67