"""
增强的强化学习服务
使用强化学习优化工作流决策，支持多维度奖励和自适应优化
"""

import numpy as np
import random
from typing import Dict, Any, List, Tuple, Optional
import json
import logging
from datetime import datetime
from collections import defaultdict, deque
import pickle
from sklearn.preprocessing import StandardScaler

from src.utils.logging import get_logger

logger = get_logger(__name__)


class EnhancedReinforcementLearningService:
    """增强的强化学习服务类，用于优化工作流决策"""
    
    def __init__(self, learning_rate=0.1, discount_factor=0.9, epsilon=0.1):
        """
        初始化强化学习服务
        
        Args:
            learning_rate: 学习率 (α)
            discount_factor: 折扣因子 (γ)
            epsilon: ε-贪婪策略参数
        """
        # Q表：状态-动作价值函数
        self.q_table = defaultdict(lambda: defaultdict(float))
        
        # 参数设置
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.epsilon = epsilon
        
        # 自适应参数
        self.performance_history = deque(maxlen=100)
        self.adaptation_threshold = 0.7
        
        # 历史记录
        self.experience_buffer = deque(maxlen=10000)  # 经验回放缓冲区
        self.state_action_history = []  # 状态-动作历史
        
        # 动作空间定义
        self.action_space = [
            "use_multi_agent_workflow",
            "use_optimized_workflow", 
            "use_streaming_workflow",
            "adjust_timeout",
            "increase_iterations",
            "decrease_iterations",
            "enable_multimodal",
            "disable_multimodal"
        ]
        
        # 状态空间特征
        self.state_features = [
            "question_complexity",
            "question_length",
            "domain_type",
            "historical_success_rate",
            "user_preference",
            "content_type",
            "time_constraint"
        ]
        
        logger.info("增强的强化学习服务初始化完成")
    
    def _state_to_key(self, state: Dict[str, Any]) -> str:
        """
        将状态转换为字符串键，用于Q表索引
        
        Args:
            state: 状态字典
            
        Returns:
            str: 状态键
        """
        # 对状态进行排序以确保一致性
        sorted_state = sorted(state.items(), key=lambda x: x[0])
        return json.dumps(sorted_state, sort_keys=True)
    
    def get_action(self, state: Dict[str, Any]) -> str:
        """
        根据当前状态选择动作（ε-贪婪策略）
        
        Args:
            state: 当前状态
            
        Returns:
            str: 选择的动作
        """
        state_key = self._state_to_key(state)
        
        # ε-贪婪策略
        if random.random() < self.epsilon:
            # 探索：随机选择动作
            action = random.choice(self.action_space)
            logger.debug(f"探索性动作选择: {action}")
        else:
            # 利用：选择Q值最高的动作
            q_values = self.q_table[state_key]
            if not q_values:
                # 如果状态未见过，随机选择
                action = random.choice(self.action_space)
                logger.debug(f"新状态，随机动作选择: {action}")
            else:
                # 使用列表推导式来避免类型检查问题
                action = max([(k, v) for k, v in q_values.items()], key=lambda x: x[1])[0]
                logger.debug(f"利用性动作选择: {action} (Q值: {q_values[action]})")
        
        # 记录状态-动作对
        self.state_action_history.append((state_key, action))
        
        return action
    
    def update_q_value(self, state: Dict[str, Any], action: str, 
                       reward: float, next_state: Dict[str, Any]) -> bool:
        """
        更新Q值（Q-learning更新规则）
        
        Args:
            state: 当前状态
            action: 执行的动作
            reward: 获得的奖励
            next_state: 下一状态
            
        Returns:
            bool: 是否更新成功
        """
        try:
            state_key = self._state_to_key(state)
            next_state_key = self._state_to_key(next_state)
            
            # 获取当前Q值
            current_q = self.q_table[state_key][action]
            
            # 获取下一状态的最大Q值
            next_max_q = max(self.q_table[next_state_key].values(), default=0.0)
            
            # Q-learning更新规则: Q(s,a) ← Q(s,a) + α[r + γmax Q(s',a') - Q(s,a)]
            new_q = current_q + self.learning_rate * (reward + self.discount_factor * next_max_q - current_q)
            
            # 更新Q表
            self.q_table[state_key][action] = new_q
            
            # 保存到经验缓冲区
            experience = {
                'state': state,
                'action': action,
                'reward': reward,
                'next_state': next_state,
                'timestamp': datetime.utcnow()
            }
            self.experience_buffer.append(experience)
            
            logger.debug(f"Q值更新: 状态={state_key[:50]}..., 动作={action}, 奖励={reward}, 新Q值={new_q}")
            return True
            
        except Exception as e:
            logger.error(f"Q值更新失败: {e}")
            return False
    
    def calculate_comprehensive_reward(self, execution_result: Dict[str, Any]) -> float:
        """
        根据执行结果计算多维度奖励
        
        Args:
            execution_result: 执行结果
            
        Returns:
            float: 奖励值
        """
        reward = 0.0
        
        # 基于执行成功率的奖励
        if execution_result.get('success', False):
            reward += 10.0
        else:
            reward -= 5.0
        
        # 基于执行时间的奖励（越快越好）
        execution_time = execution_result.get('execution_time', 0)
        if execution_time > 0:
            # 时间越短奖励越高，但设置上限
            time_reward = max(0, 5.0 - execution_time / 10.0)
            reward += time_reward
        
        # 基于结果质量的奖励
        quality_score = execution_result.get('quality_score', 0)
        reward += quality_score * 2.0  # 质量评分权重
        
        # 基于用户反馈的奖励
        user_feedback = execution_result.get('user_feedback', 0)  # -1到1的范围
        reward += user_feedback * 3.0  # 用户反馈权重
        
        # 基于资源使用的惩罚
        resource_usage = execution_result.get('resource_usage', 0)
        if resource_usage > 0.8:  # 资源使用超过80%时开始惩罚
            reward -= (resource_usage - 0.8) * 5.0
            
        # 新增：成本效益奖励
        cost_efficiency = execution_result.get('cost_efficiency', 0)
        reward += cost_efficiency * 1.5
        
        # 新增：用户满意度奖励
        user_satisfaction = execution_result.get('user_satisfaction', 0)
        reward += user_satisfaction * 2.5
        
        # 新增：创新性奖励
        innovation_score = execution_result.get('innovation_score', 0)
        reward += innovation_score * 1.0
        
        logger.debug(f"综合奖励计算: 基础奖励={reward}, 执行结果={execution_result}")
        return reward
        
    def adapt_parameters(self):
        """根据历史性能自适应调整学习参数"""
        if len(self.performance_history) < 10:
            return
            
        recent_performance = np.mean(list(self.performance_history)[-10:])
        overall_performance = np.mean(self.performance_history)
        
        # 如果近期性能下降，增加探索率
        if recent_performance < overall_performance * self.adaptation_threshold:
            self.epsilon = min(1.0, self.epsilon * 1.1)
            self.learning_rate = min(0.5, self.learning_rate * 1.05)
            logger.info(f"性能下降，增加探索率: epsilon={self.epsilon}, learning_rate={self.learning_rate}")
        # 如果性能稳定，减少探索率，稳定学习
        elif recent_performance > overall_performance * 1.1:
            self.epsilon = max(0.01, self.epsilon * 0.95)
            logger.info(f"性能提升，减少探索率: epsilon={self.epsilon}")
            
    def record_performance(self, performance: float):
        """记录性能指标用于自适应调整"""
        self.performance_history.append(performance)
        self.adapt_parameters()


class DeepQNetworkService:
    """深度Q网络服务类，用于处理更复杂的状态空间"""
    
    def __init__(self, state_size: int, action_size: int):
        """
        初始化深度Q网络服务
        
        Args:
            state_size: 状态空间大小
            action_size: 动作空间大小
        """
        self.state_size = state_size
        self.action_size = action_size
        self.model = None
        self.target_model = None
        self.optimizer = None
        self._initialize_model()
        
    def _initialize_model(self):
        """初始化深度Q网络模型"""
        try:
            # 将TensorFlow相关导入放在函数内部，避免在没有安装TensorFlow时出现静态分析错误
            tf = __import__('tensorflow', fromlist=[''])
            Sequential = getattr(__import__('tensorflow.keras.models', fromlist=['Sequential']), 'Sequential')
            Dense = getattr(__import__('tensorflow.keras.layers', fromlist=['Dense']), 'Dense')
            Adam = getattr(__import__('tensorflow.keras.optimizers', fromlist=['Adam']), 'Adam')
            
            # 评估网络
            self.model = Sequential([
                Dense(128, activation='relu', input_shape=(self.state_size,)),
                Dense(128, activation='relu'),
                Dense(self.action_size, activation='linear')
            ])
            
            # 目标网络
            self.target_model = Sequential([
                Dense(128, activation='relu', input_shape=(self.state_size,)),
                Dense(128, activation='relu'),
                Dense(self.action_size, activation='linear')
            ])
            
            self.optimizer = Adam(learning_rate=0.001)
            self.model.compile(optimizer=self.optimizer, loss='mse')
            self.target_model.compile(optimizer=self.optimizer, loss='mse')
            
            logger.info("深度Q网络模型初始化完成")
        except ImportError as e:
            logger.warning(f"TensorFlow未安装或导入失败，无法使用深度Q网络: {e}")
            self.model = None
            self.target_model = None
            self.optimizer = None
        except Exception as e:
            logger.error(f"初始化深度Q网络模型时发生未知错误: {e}")
            self.model = None
            self.target_model = None
            self.optimizer = None
    
    def predict(self, state):
        """预测Q值"""
        if self.model is None:
            return None
        return self.model.predict(state, verbose=0)
    
    def train(self, states, targets):
        """训练模型"""
        if self.model is None:
            return
        self.model.fit(states, targets, epochs=1, verbose=0)
    
    def update_target_model(self):
        """更新目标网络"""
        if self.model is None or self.target_model is None:
            return
        self.target_model.set_weights(self.model.get_weights())


def get_enhanced_reinforcement_learning_service():
    """获取增强的强化学习服务实例"""
    return EnhancedReinforcementLearningService()