"""
AI决策模块 - 负责制定魔兽争霸3游戏策略

该模块使用强化学习算法，根据视觉模块提供的游戏状态信息，
制定最优的游戏策略并生成相应的操作指令。
同时集成了大语言模型辅助决策功能，以提升策略的智能性。
"""

import numpy as np
import random
import json
import os
from typing import Dict, List, Tuple, Any
from collections import deque
import tensorflow as tf
from tensorflow import keras

# 导入大模型接口模块
from .llm_interface import LLMInterface


class DecisionMaker:
    """
    AI决策类 - 制定游戏策略
    
    该类负责：
    1. 根据游戏状态制定策略
    2. 使用强化学习优化决策
    3. 生成具体的操作指令
    """
    
    def __init__(self, model_path: str = None, use_llm: bool = False, llm_api_url: str = None, llm_api_key: str = None):
        """
        初始化AI决策器
        
        Args:
            model_path: 预训练模型路径（可选）
            use_llm: 是否使用大语言模型辅助决策
            llm_api_url: 大模型API地址（可选）
            llm_api_key: 大模型API密钥（可选）
        """
        # 游戏状态维度（根据视觉处理模块输出定义）
        self.state_size = 10  # 示例：资源数量、单位数量等
        
        # 动作空间大小（根据游戏可执行操作定义）
        self.action_size = 20  # 示例：移动、攻击、建造等操作种类
        
        # 经验回放缓冲区
        self.memory = deque(maxlen=2000)
        
        # 强化学习参数
        self.gamma = 0.95    # 折扣因子
        self.epsilon = 1.0   # 探索率
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.learning_rate = 0.001
        
        # 创建Q网络
        self.q_network = self._build_model()
        
        # 创建目标网络
        self.target_network = self._build_model()
        self.update_target_network()
        
        # 如果提供了预训练模型，则加载
        if model_path and os.path.exists(model_path):
            self.load_model(model_path)
        
        # 初始化大模型接口
        self.use_llm = use_llm
        if self.use_llm:
            self.llm_interface = LLMInterface(api_url=llm_api_url, api_key=llm_api_key)
        else:
            self.llm_interface = None
        
        print(f"AI决策器初始化完成，大模型辅助决策: {'启用' if self.use_llm else '未启用'}")
    
    def _build_model(self) -> keras.Model:
        """
        构建深度Q网络模型
        
        Returns:
            keras.Model: 构建的神经网络模型
        """
        model = keras.Sequential([
            keras.layers.Dense(64, input_dim=self.state_size, activation='relu'),
            keras.layers.Dense(64, activation='relu'),
            keras.layers.Dense(32, activation='relu'),
            keras.layers.Dense(self.action_size, activation='linear')
        ])
        
        model.compile(
            optimizer=keras.optimizers.Adam(learning_rate=self.learning_rate),
            loss='mse'
        )
        
        return model
    
    def update_target_network(self) -> None:
        """
        更新目标网络权重
        """
        self.target_network.set_weights(self.q_network.get_weights())
    
    def remember(self, state: np.ndarray, action: int, reward: float, 
                 next_state: np.ndarray, done: bool) -> None:
        """
        将经验存储到回放缓冲区
        
        Args:
            state: 当前状态
            action: 执行的动作
            reward: 获得的奖励
            next_state: 下一状态
            done: 是否结束
        """
        self.memory.append((state, action, reward, next_state, done))
    
    def act(self, state: np.ndarray) -> int:
        """
        根据当前状态选择动作
        
        Args:
            state: 当前游戏状态
            
        Returns:
            int: 选择的动作编号
        """
        # ε-贪婪策略：有一定概率随机探索
        if np.random.rand() <= self.epsilon:
            return random.randrange(self.action_size)
        
        # 否则使用Q网络预测最优动作
        # 确保输入类型与形状稳定
        state = np.asarray(state, dtype=np.float32).reshape(1, -1)
        q_values = self.q_network.predict(state, verbose=0)
        return np.argmax(q_values[0])
    
    def replay(self, batch_size: int = 32) -> None:
        """
        从经验回放中采样并训练网络
        
        Args:
            batch_size: 批次大小
        """
        if len(self.memory) < batch_size:
            return
        
        # 随机采样一批经验
        minibatch = random.sample(self.memory, batch_size)
        
        for state, action, reward, next_state, done in minibatch:
            # 计算目标Q值
            if done:
                target = reward
            else:
                # 使用目标网络计算下一状态的最大Q值
                t = self.target_network.predict(next_state.reshape(1, -1), verbose=0)[0]
                target = reward + self.gamma * np.amax(t)
            
            # 更新Q值
            target_f = self.q_network.predict(state.reshape(1, -1), verbose=0)
            target_f[0][action] = target
            
            # 训练网络
            self.q_network.fit(state.reshape(1, -1), target_f, epochs=1, verbose=0)
        
        # 降低探索率
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay
    
    def save_model(self, filepath: str) -> None:
        """
        保存模型权重
        
        Args:
            filepath: 保存路径
        """
        self.q_network.save_weights(filepath)
        print(f"模型已保存至: {filepath}")
    
    def load_model(self, filepath: str) -> None:
        """
        加载模型权重
        
        Args:
            filepath: 模型文件路径
        """
        try:
            self.q_network.load_weights(filepath)
            self.update_target_network()
            print(f"模型已从 {filepath} 加载")
        except Exception as e:
            print(f"模型加载失败: {e}")
    
    def preprocess_state(self, raw_state: Dict[str, Any]) -> np.ndarray:
        """
        预处理游戏状态，转换为神经网络输入格式
        
        Args:
            raw_state: 原始游戏状态数据
            
        Returns:
            np.ndarray: 处理后的状态向量
        """
        # 示例处理逻辑，实际应根据视觉模块输出调整
        # 使用float32以匹配常见深度学习默认dtype
        state_vector = np.zeros(self.state_size, dtype=np.float32)
        
        try:
            # 提取资源信息
            if 'resources' in raw_state:
                resources = raw_state['resources']
                state_vector[0] = resources.get('gold', 0) / 1000.0  # 归一化
                state_vector[1] = resources.get('wood', 0) / 1000.0  # 归一化
            
            # 提取单位信息（示例）：兼容units/buildings列表长度与计数字段
            units = raw_state.get('units', None)
            buildings = raw_state.get('buildings', None)

            unit_count = raw_state.get('unit_count', None)
            if unit_count is None and isinstance(units, list):
                unit_count = len(units)
            state_vector[2] = float(unit_count or 0) / 100.0

            building_count = raw_state.get('building_count', None)
            if building_count is None and isinstance(buildings, list):
                building_count = len(buildings)
            state_vector[3] = float(building_count or 0) / 50.0
            
            # 其他状态信息...
            
        except Exception as e:
            print(f"状态预处理失败: {e}")
        
        return state_vector
    
    def postprocess_action(self, action: int) -> Dict[str, Any]:
        """
        后处理动作，转换为具体的游戏操作指令
        
        Args:
            action: 神经网络输出的动作编号
            
        Returns:
            Dict[str, Any]: 具体操作指令
        """
        # 动作映射表（示例）
        action_map = {
            0: {'type': 'move', 'params': {'x': 400, 'y': 300}},
            1: {'type': 'move', 'params': {'x': 600, 'y': 500}},
            2: {'type': 'attack', 'params': {'x': 800, 'y': 400}},
            3: {'type': 'build', 'params': {'structure': 'barracks', 'x': 200, 'y': 200}},
            4: {'type': 'train', 'params': {'unit': 'footman'}},
            # ... 更多动作映射
        }
        
        # 默认动作
        default_action = {'type': 'idle', 'params': {}}
        
        return action_map.get(action, default_action)
    
    def calculate_reward(self, prev_state: Dict[str, Any], 
                        current_state: Dict[str, Any], 
                        action: Dict[str, Any]) -> float:
        """
        计算执行动作后的奖励值
        
        Args:
            prev_state: 执行动作前的状态
            current_state: 执行动作后的状态
            action: 执行的动作
            
        Returns:
            float: 奖励值
        """
        reward = 0.0
        
        try:
            # 资源增长奖励
            prev_gold = prev_state.get('resources', {}).get('gold', 0)
            current_gold = current_state.get('resources', {}).get('gold', 0)
            reward += (current_gold - prev_gold) * 0.01
            
            prev_wood = prev_state.get('resources', {}).get('wood', 0)
            current_wood = current_state.get('resources', {}).get('wood', 0)
            reward += (current_wood - prev_wood) * 0.01
            
            # 单位数量奖励
            prev_units = prev_state.get('unit_count', 0)
            current_units = current_state.get('unit_count', 0)
            reward += (current_units - prev_units) * 0.5
            
            # 建筑数量奖励
            prev_buildings = prev_state.get('building_count', 0)
            current_buildings = current_state.get('building_count', 0)
            reward += (current_buildings - prev_buildings) * 1.0
            
            # 惩罚无效动作
            if action['type'] == 'idle':
                reward -= 0.1
                
        except Exception as e:
            print(f"奖励计算失败: {e}")
        
        return reward
    
    def make_decision(self, game_state: Dict[str, Any]) -> Dict[str, Any]:
        """
        根据游戏状态制定决策
        
        Args:
            game_state: 当前游戏状态
            
        Returns:
            Dict[str, Any]: 决策结果（操作指令）
        """
        # 预处理状态
        processed_state = self.preprocess_state(game_state)
        
        # 使用强化学习选择动作
        rl_action_idx = self.act(processed_state)
        rl_action = self.postprocess_action(rl_action_idx)
        
        # 构造强化学习决策结果
        rl_decision = {
            'action_index': rl_action_idx,
            'action': rl_action,
            'processed_state': processed_state,
            'source': 'rl'
        }
        
        # 如果启用了大模型辅助决策
        if self.use_llm and self.llm_interface:
            try:
                # 获取大模型建议
                llm_action = self.llm_interface.llm_decision(game_state)
                
                # 融合强化学习决策和大模型建议
                final_decision = self.llm_interface.integrate_with_rl(rl_decision, llm_action)
                
                return final_decision
            except Exception as e:
                print(f"大模型辅助决策过程异常: {e}")
                # 出现异常时回退到纯强化学习决策
                return rl_decision
        else:
            # 未启用大模型时直接返回强化学习决策
            return rl_decision


# 使用示例
if __name__ == "__main__":
    # 创建AI决策器实例
    ai = DecisionMaker()
    
    # 示例游戏状态
    sample_state = {
        'resources': {'gold': 500, 'wood': 300},
        'unit_count': 5,
        'building_count': 3
    }
    
    # 制定决策
    decision = ai.make_decision(sample_state)
    print("AI决策:", decision)
