import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GATv2Conv, GCNConv
from torch_geometric.data import Data, Batch
import numpy as np

from config import *
from graph_utils import build_grid_graph


class TemporalAttention(nn.Module):
    """时间注意力模块，用于捕捉历史状态中的重要信息"""
    def __init__(self, hidden_dim, num_heads=4):
        super(TemporalAttention, self).__init__()
        self.hidden_dim = hidden_dim
        self.num_heads = num_heads
        self.head_dim = hidden_dim // num_heads
        
        self.query = nn.Linear(hidden_dim, hidden_dim)
        self.key = nn.Linear(hidden_dim, hidden_dim)
        self.value = nn.Linear(hidden_dim, hidden_dim)
        
        self.fc_out = nn.Linear(hidden_dim, hidden_dim)
        
    def forward(self, x, mask=None):
        # x shape: [batch_size, seq_len, hidden_dim]
        batch_size, seq_len, _ = x.size()
        
        # [batch_size, seq_len, hidden_dim] -> [batch_size, seq_len, num_heads, head_dim]
        q = self.query(x).view(batch_size, seq_len, self.num_heads, self.head_dim)
        k = self.key(x).view(batch_size, seq_len, self.num_heads, self.head_dim)
        v = self.value(x).view(batch_size, seq_len, self.num_heads, self.head_dim)
        
        # 重排为多头形式
        q = q.transpose(1, 2)  # [batch_size, num_heads, seq_len, head_dim]
        k = k.transpose(1, 2)
        v = v.transpose(1, 2)
        
        # 缩放点积注意力
        scores = torch.matmul(q, k.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32))
        
        # 应用掩码（如果提供）
        if mask is not None:
            scores = scores.masked_fill(mask == 0, -1e9)
        
        # 注意力权重
        attention = F.softmax(scores, dim=-1)
        
        # 应用注意力权重
        out = torch.matmul(attention, v)  # [batch_size, num_heads, seq_len, head_dim]
        out = out.transpose(1, 2).contiguous()  # [batch_size, seq_len, num_heads, head_dim]
        out = out.view(batch_size, seq_len, -1)  # [batch_size, seq_len, hidden_dim]
        
        return self.fc_out(out)

class SpatialGNN(nn.Module):
    """增强的空间GNN模块，使用多层GAT和边缘特征"""
    def __init__(self, node_dim, edge_dim, hidden_dim, num_layers=4):
        super(SpatialGNN, self).__init__()
        self.node_dim = node_dim
        self.edge_dim = edge_dim
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        
        # 节点特征初始编码
        self.node_encoder = nn.Sequential(
            nn.Linear(node_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim)
        )
        
        # 边特征初始编码
        self.edge_encoder = nn.Sequential(
            nn.Linear(edge_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, hidden_dim // 2)
        )
        
        # GNN层（使用GATv2，更强的注意力机制）
        self.convs = nn.ModuleList()
        self.batch_norms = nn.ModuleList()
        self.residuals = nn.ModuleList()
        
        for i in range(num_layers):
            self.convs.append(GATv2Conv(
                hidden_dim, 
                hidden_dim // 8, 
                heads=8, 
                edge_dim=hidden_dim // 2,
                dropout=0.1
            ))
            self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
            self.residuals.append(nn.Linear(hidden_dim, hidden_dim))
            
    def forward(self, x, edge_index, edge_attr, batch=None):
        # 节点特征编码
        x = self.node_encoder(x)
        
        # 边特征编码
        edge_attr = self.edge_encoder(edge_attr)
        
        # 应用GNN层
        for i in range(self.num_layers):
            identity = x
            x = self.convs[i](x, edge_index, edge_attr)
            x = self.batch_norms[i](x)
            x = F.relu(x)
            x = x + self.residuals[i](identity)  # 残差连接
            
        return x

class HybridWorldModel(nn.Module):
    """混合型世界模型：结合时序注意力和空间GNN，使用多层预测器"""
    def __init__(self, 
                 node_features, 
                 edge_features, 
                 hidden_dim=256, 
                 history_length=5, 
                 dropout_rate=0.2):
        super(HybridWorldModel, self).__init__()
        
        self.node_features = node_features
        self.edge_features = edge_features
        self.hidden_dim = hidden_dim
        self.history_length = history_length
        
        # 空间GNN编码器
        self.spatial_encoder = SpatialGNN(
            node_dim=node_features,
            edge_dim=edge_features,
            hidden_dim=hidden_dim,
            num_layers=4
        )
        
        # 时序注意力
        self.temporal_attn = TemporalAttention(hidden_dim, num_heads=8)
        
        # 时空融合层
        self.fusion_layer = nn.Sequential(
            nn.Linear(hidden_dim * 2, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate)
        )
        
        # 环境状态预测头
        self.env_head = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, CELL_FEATURES)
        )
        
        # 智能体预测头 - 使用多分支结构
        # 分支1: 绝对位置预测
        self.abs_pos_head = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, 2)  # (x, y)
        )
        
        # 分支2: 相对位移预测
        self.rel_mov_head = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, 2)  # (dx, dy)
        )
        
        # 分支3: 概率热图预测 (每个位置的概率分布)
        self.prob_map_head = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(hidden_dim, ENV_SIZE * ENV_SIZE)  # 整个网格的概率
        )
        
        # 元预测器：整合多种预测结果
        self.meta_predictor = nn.Sequential(
            nn.Linear(hidden_dim + 4 + ENV_SIZE * ENV_SIZE, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, 4)  # 最终预测: (dx, dy, x, y)
        )
        
        # 固定的边索引和属性 (网格图)
        self._setup_grid_graph()
        
    def _setup_grid_graph(self):
        # 创建固定的边索引和属性
        edge_index, edge_attr = build_grid_graph(ENV_SIZE)
        self.register_buffer('edge_index', edge_index)
        self.register_buffer('edge_attr', edge_attr)
        
    def _prepare_history(self, states_history):
        """准备历史状态序列"""
        batch_size, seq_len = states_history.size(0), states_history.size(1)
        
        # 重塑每个时间步的状态为图形式
        node_features_seq = []
        
        for t in range(seq_len):
            states_t = states_history[:, t]
            node_features_t = self._reshape_for_graph(states_t)
            node_features_seq.append(node_features_t)
            
        # 将序列堆叠为 [batch_size * grid_size * grid_size, seq_len, node_features]
        node_features_history = torch.stack(node_features_seq, dim=1)
        
        return node_features_history
    
    def _reshape_for_graph(self, x):
        """将批次输入重塑为图形式"""
        batch_size = x.size(0)
        grid_size = ENV_SIZE
        
        # 假设输入x的形状是[batch_size, grid_size*grid_size*features]
        # 重塑为[batch_size, grid_size, grid_size, features]
        x = x.view(batch_size, grid_size, grid_size, -1)
        
        # 现在将其转换为适合图的形式
        # 对于每个批次样本，创建图节点特征
        all_node_features = []
        for b in range(batch_size):
            # 对于每个单元格，提取特征
            sample_nodes = []
            for i in range(grid_size):
                for j in range(grid_size):
                    # 获取单元格状态特征
                    cell_features = x[b, i, j].tolist()
                    
                    # 添加位置编码
                    pos_i = 2 * (i / (grid_size - 1)) - 1  # 范围从-1到1
                    pos_j = 2 * (j / (grid_size - 1)) - 1  # 范围从-1到1
                    
                    # 添加位置编码到特征中
                    node_feature = cell_features + [pos_i, pos_j]
                    sample_nodes.append(node_feature)
            
            all_node_features.extend(sample_nodes)
        
        # 转换为适当的形状: [batch_size * grid_size * grid_size, node_features]
        all_node_features = torch.tensor(all_node_features, dtype=torch.float, device=x.device)
        
        return all_node_features
            
    def forward(self, x, states_history=None):
        """前向传播
        Args:
            x: 当前状态 [batch_size, features]
            states_history: 历史状态 [batch_size, history_length, features] 或 None
        """
        batch_size = x.size(0)
        grid_size = ENV_SIZE
        total_nodes = grid_size * grid_size
        
        # 将输入重塑为图形式
        node_features = self._reshape_for_graph(x)
        
        # 创建batch索引，用于区分不同样本的节点
        batch_idx = []
        for b in range(batch_size):
            batch_idx.extend([b] * total_nodes)
        batch_idx = torch.tensor(batch_idx, dtype=torch.long, device=x.device)
        
        # 复制边索引和属性以匹配批次大小
        batch_edge_index, batch_edge_attr = self._repeat_edges_for_batch(batch_size)
        
        # 使用空间GNN处理当前状态
        spatial_encoding = self.spatial_encoder(node_features, batch_edge_index, batch_edge_attr, batch_idx)
        
        # 池化每个批次的节点特征
        spatial_encoding_pooled = self._pool_node_features(spatial_encoding, batch_idx, batch_size)
        
        # 时序处理 (如果有历史状态)
        if states_history is not None:
            # 准备历史状态
            node_features_history = self._prepare_history(states_history)
            
            # 处理每个时间步
            temp_encodings = []
            for t in range(states_history.size(1)):
                # 处理此时间步的所有批次
                states_t = states_history[:, t]
                node_features_t = self._reshape_for_graph(states_t)
                
                # 使用相同的空间编码器
                encoding_t = self.spatial_encoder(node_features_t, batch_edge_index, batch_edge_attr, batch_idx)
                encoding_t_pooled = self._pool_node_features(encoding_t, batch_idx, batch_size)
                temp_encodings.append(encoding_t_pooled)
                
            # 堆叠时间维度
            temp_sequence = torch.stack(temp_encodings, dim=1)  # [batch_size, seq_len, hidden_dim]
            
            # 应用时间注意力
            temp_encoding = self.temporal_attn(temp_sequence)[:, -1]  # 获取最后一个时间步的输出
            
            # 融合时空特征
            combined_features = torch.cat([spatial_encoding_pooled, temp_encoding], dim=1)
            fused_features = self.fusion_layer(combined_features)
        else:
            # 如果没有历史，只使用空间特征
            fused_features = spatial_encoding_pooled
            
        # 环境状态预测
        env_output = self._predict_environment(fused_features, spatial_encoding, batch_idx)
        
        # 智能体预测 - 使用多种方法
        abs_pos = self.abs_pos_head(fused_features)  # 绝对位置
        rel_mov = self.rel_mov_head(fused_features)  # 相对移动
        
        # 概率热图
        prob_logits = self.prob_map_head(fused_features)  # [batch_size, grid_size*grid_size]
        prob_map = F.softmax(prob_logits, dim=1)  # 转换为概率
        
        # 合并所有预测结果进行元预测
        meta_input = torch.cat([fused_features, abs_pos, rel_mov, prob_map], dim=1)
        agent_output = self.meta_predictor(meta_input)
        
        return env_output, agent_output
    
    def _repeat_edges_for_batch(self, batch_size):
        """为批处理重复边索引和属性"""
        edge_index = self.edge_index
        edge_attr = self.edge_attr
        
        total_nodes = ENV_SIZE * ENV_SIZE
        batch_edge_index = []
        batch_edge_attr = []
        
        for b in range(batch_size):
            # 复制边索引，调整节点ID以适应批次
            offset = b * total_nodes
            batch_edge_index.append(edge_index + offset)
            batch_edge_attr.append(edge_attr)
            
        # 连接所有批次的边
        batch_edge_index = torch.cat(batch_edge_index, dim=1)
        batch_edge_attr = torch.cat(batch_edge_attr, dim=0)
        
        return batch_edge_index, batch_edge_attr
    
    def _pool_node_features(self, node_features, batch_idx, batch_size):
        """池化每个批次的节点特征"""
        pooled_features = []
        
        for b in range(batch_size):
            # 提取该批次的节点
            mask = (batch_idx == b)
            batch_nodes = node_features[mask]
            
            # 使用注意力加权求和或平均池化
            pooled = torch.mean(batch_nodes, dim=0)
            pooled_features.append(pooled)
            
        return torch.stack(pooled_features)
    
    def _predict_environment(self, global_features, node_features, batch_idx):
        """预测环境状态"""
        batch_size = global_features.size(0)
        
        # 对每个节点进行预测
        env_node_predictions = self.env_head(node_features)  # [total_nodes, CELL_FEATURES]
        
        # 将节点预测重组为网格
        env_output = torch.zeros(batch_size, ENV_SIZE, ENV_SIZE, CELL_FEATURES, device=node_features.device)
        
        for b in range(batch_size):
            mask = (batch_idx == b)
            batch_predictions = env_node_predictions[mask]
            
            # 重塑为网格
            env_output[b] = batch_predictions.view(ENV_SIZE, ENV_SIZE, CELL_FEATURES)
            
        return env_output