"""
深度Q网络（DQN）调度器
实现基于强化学习的实时调度决策
"""

import numpy as np
import tensorflow as tf
from typing import List, Dict, Any, Tuple
import random
from collections import deque
from datetime import datetime, timedelta
from .base_scheduler import BaseScheduler

class DQNScheduler(BaseScheduler):
    """深度Q网络调度器"""
    
    def __init__(self, config: Dict[str, Any] = None):
        super().__init__(config)
        self.state_size = self.config.get('state_size', 50)
        self.action_size = self.config.get('action_size', 20)
        self.learning_rate = self.config.get('learning_rate', 0.001)
        self.epsilon = self.config.get('epsilon', 1.0)
        self.epsilon_min = self.config.get('epsilon_min', 0.01)
        self.epsilon_decay = self.config.get('epsilon_decay', 0.995)
        self.memory_size = self.config.get('memory_size', 10000)
        self.batch_size = self.config.get('batch_size', 32)
        self.episodes = self.config.get('episodes', 1000)
        
        # 初始化DQN网络
        self.q_network = self._build_q_network()
        self.target_network = self._build_q_network()
        self.memory = deque(maxlen=self.memory_size)
        
    def _build_q_network(self) -> tf.keras.Model:
        """构建Q网络"""
        model = tf.keras.Sequential([
            tf.keras.layers.Dense(128, activation='relu', input_shape=(self.state_size,)),
            tf.keras.layers.Dropout(0.2),
            tf.keras.layers.Dense(64, activation='relu'),
            tf.keras.layers.Dropout(0.2),
            tf.keras.layers.Dense(32, activation='relu'),
            tf.keras.layers.Dense(self.action_size, activation='linear')
        ])
        
        model.compile(
            optimizer=tf.keras.optimizers.Adam(learning_rate=self.learning_rate),
            loss='mse'
        )
        
        return model
    
    def optimize(self, 
                 resources: List[Dict], 
                 appointments: List[Dict], 
                 objectives: List[str]) -> Dict[str, Any]:
        """
        执行DQN优化
        
        Args:
            resources: 资源列表
            appointments: 预约列表
            objectives: 优化目标列表
            
        Returns:
            调度结果字典
        """
        self.resources = resources
        self.appointments = appointments
        
        # 训练DQN模型
        self._train_dqn()
        
        # 使用训练好的模型进行调度
        solution = self._generate_solution()
        objectives_values = self.calculate_objectives(solution)
        
        return {
            'solution': solution,
            'objectives': objectives_values,
            'algorithm': 'DQN',
            'episodes': self.episodes,
            'epsilon': self.epsilon
        }
    
    def _train_dqn(self):
        """训练DQN模型"""
        for episode in range(self.episodes):
            state = self._get_initial_state()
            total_reward = 0
            
            for step in range(len(self.appointments)):
                # 选择动作
                action = self._choose_action(state)
                
                # 执行动作
                next_state, reward, done = self._step(state, action)
                
                # 存储经验
                self.memory.append((state, action, reward, next_state, done))
                
                # 训练网络
                if len(self.memory) > self.batch_size:
                    self._replay()
                
                state = next_state
                total_reward += reward
                
                if done:
                    break
            
            # 更新epsilon
            if self.epsilon > self.epsilon_min:
                self.epsilon *= self.epsilon_decay
            
            # 更新目标网络
            if episode % 100 == 0:
                self.target_network.set_weights(self.q_network.get_weights())
    
    def _get_initial_state(self) -> np.ndarray:
        """获取初始状态"""
        state = np.zeros(self.state_size)
        
        # 编码资源状态
        for i, resource in enumerate(self.resources[:10]):  # 限制状态维度
            state[i] = 1 if resource['status'] == 'available' else 0
            
        # 编码预约信息
        for i, appointment in enumerate(self.appointments[:10]):
            state[10 + i] = appointment.get('priority', 1) / 5.0  # 归一化优先级
            
        # 编码时间信息
        current_hour = datetime.now().hour
        state[20] = current_hour / 24.0  # 归一化小时
        
        return state
    
    def _choose_action(self, state: np.ndarray) -> int:
        """选择动作"""
        if random.random() <= self.epsilon:
            return random.randrange(self.action_size)
        else:
            q_values = self.q_network.predict(state.reshape(1, -1), verbose=0)
            return np.argmax(q_values[0])
    
    def _step(self, state: np.ndarray, action: int) -> Tuple[np.ndarray, float, bool]:
        """执行一步动作"""
        # 根据动作分配资源
        appointment_index = action % len(self.appointments)
        resource_index = action // len(self.appointments)
        
        if appointment_index < len(self.appointments) and resource_index < len(self.resources):
            appointment = self.appointments[appointment_index]
            resource = self.resources[resource_index]
            
            # 计算奖励
            reward = self._calculate_reward(appointment, resource)
            
            # 更新状态
            next_state = state.copy()
            next_state[resource_index] = 0  # 标记资源为已使用
            
            return next_state, reward, True
        else:
            return state, -1, True
    
    def _calculate_reward(self, appointment: Dict, resource: Dict) -> float:
        """计算奖励"""
        reward = 0
        
        # 资源匹配奖励
        if resource['department_id'] == appointment['department_id']:
            reward += 10
        else:
            reward -= 5
            
        # 资源状态奖励
        if resource['status'] == 'available':
            reward += 5
        else:
            reward -= 10
            
        # 优先级奖励
        priority = appointment.get('priority', 1)
        reward += priority * 2
        
        return reward
    
    def _replay(self):
        """经验回放"""
        batch = random.sample(self.memory, self.batch_size)
        
        states = np.array([e[0] for e in batch])
        actions = np.array([e[1] for e in batch])
        rewards = np.array([e[2] for e in batch])
        next_states = np.array([e[3] for e in batch])
        dones = np.array([e[4] for e in batch])
        
        # 计算目标Q值
        target_q_values = self.q_network.predict(states, verbose=0)
        next_q_values = self.target_network.predict(next_states, verbose=0)
        
        for i in range(self.batch_size):
            if dones[i]:
                target_q_values[i][actions[i]] = rewards[i]
            else:
                target_q_values[i][actions[i]] = rewards[i] + 0.95 * np.max(next_q_values[i])
        
        # 训练网络
        self.q_network.fit(states, target_q_values, epochs=1, verbose=0)
    
    def _generate_solution(self) -> Dict[str, Any]:
        """使用训练好的模型生成调度方案"""
        assignments = {}
        state = self._get_initial_state()
        
        for appointment in self.appointments:
            # 使用贪婪策略选择动作
            q_values = self.q_network.predict(state.reshape(1, -1), verbose=0)
            action = np.argmax(q_values[0])
            
            # 解码动作
            resource_index = action % len(self.resources)
            if resource_index < len(self.resources):
                resource = self.resources[resource_index]
                
                # 分配时间
                start_time = appointment['preferred_time']
                duration = appointment.get('duration', 30)
                end_time = start_time + timedelta(minutes=duration)
                
                resource_id = resource['id']
                if resource_id not in assignments:
                    assignments[resource_id] = []
                    
                assignments[resource_id].append({
                    'appointment_id': appointment['id'],
                    'resource_id': resource_id,
                    'start_time': start_time,
                    'end_time': end_time
                })
                
                # 更新状态
                state[resource_index] = 0
                
        return {'assignments': assignments}
