"""
热力图预测模型
基于时空卷积神经网络预测问题分布热力图
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
import logging
from typing import Dict, List, Tuple, Optional
from torch.utils.data import Dataset, DataLoader, TensorDataset
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import seaborn as sns
import folium
from folium.plugins import HeatMap
from config.model_config import config

class SpatialTemporalCNN(nn.Module):
    """时空卷积神经网络"""
    
    def __init__(self, input_shape: Tuple[int, int, int], 
                 spatial_filters: List[int], 
                 temporal_filters: List[int],
                 hidden_dims: List[int],
                 dropout_rate: float = 0.3):
        super().__init__()
        
        self.input_shape = input_shape  # (时间, 高度, 宽度)
        time_steps, height, width = input_shape
        
        # 空间卷积层
        spatial_layers = []
        in_channels = 1
        for out_channels in spatial_filters:
            spatial_layers.extend([
                nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
                nn.BatchNorm2d(out_channels),
                nn.ReLU(),
                nn.MaxPool2d(2, 2)
            ])
            in_channels = out_channels
            height //= 2
            width //= 2
        
        self.spatial_conv = nn.Sequential(*spatial_layers)
        
        # 时间卷积层
        temporal_layers = []
        in_channels = spatial_filters[-1]
        for out_channels in temporal_filters:
            temporal_layers.extend([
                nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1),
                nn.BatchNorm1d(out_channels),
                nn.ReLU(),
                nn.Dropout(dropout_rate)
            ])
            in_channels = out_channels
        
        self.temporal_conv = nn.Sequential(*temporal_layers)
        
        # 全连接层
        flattened_size = temporal_filters[-1] * time_steps * height * width
        fc_layers = []
        in_features = flattened_size
        
        for hidden_dim in hidden_dims:
            fc_layers.extend([
                nn.Linear(in_features, hidden_dim),
                nn.ReLU(),
                nn.Dropout(dropout_rate)
            ])
            in_features = hidden_dim
        
        # 输出层 (预测下一时间步的热力图)
        fc_layers.append(nn.Linear(in_features, height * width))
        self.fc = nn.Sequential(*fc_layers)
        
        self.output_height = height
        self.output_width = width
    
    def forward(self, x):
        batch_size, time_steps, height, width = x.shape
        
        # 空间特征提取
        spatial_features = []
        for t in range(time_steps):
            # (batch_size, 1, height, width)
            spatial_input = x[:, t:t+1, :, :]
            spatial_output = self.spatial_conv(spatial_input)
            spatial_features.append(spatial_output)
        
        # 堆叠时间维度
        spatial_features = torch.stack(spatial_features, dim=2)  # (batch_size, channels, time_steps, h, w)
        
        # 重塑为时间序列 (batch_size, channels, time_steps * h * w)
        batch_size, channels, time_steps, h, w = spatial_features.shape
        temporal_input = spatial_features.view(batch_size, channels, -1)
        
        # 时间特征提取
        temporal_output = self.temporal_conv(temporal_input)
        
        # 展平并通过全连接层
        flattened = temporal_output.view(batch_size, -1)
        output = self.fc(flattened)
        
        # 重塑为热力图
        output = output.view(batch_size, self.output_height, self.output_width)
        
        return output

class HeatmapPredictionModel:
    """热力图预测模型"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.model = None
        self.scaler = MinMaxScaler()
        self.model_config = config.heatmap_prediction
        
        # 设置设备
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.logger.info(f"使用设备: {self.device}")
    
    def train(self, X: np.ndarray, y: np.ndarray) -> Dict:
        """训练热力图预测模型"""
        
        self.logger.info("开始训练热力图预测模型...")
        
        # 1. 数据预处理
        X_processed, y_processed = self._preprocess_data(X, y)
        
        # 2. 数据分割
        X_train, X_test, y_train, y_test = train_test_split(
            X_processed, y_processed, 
            test_size=0.2, 
            random_state=42
        )
        
        X_train, X_val, y_train, y_val = train_test_split(
            X_train, y_train, 
            test_size=0.25, 
            random_state=42
        )
        
        # 3. 创建数据加载器
        train_loader = self._create_dataloader(X_train, y_train, shuffle=True)
        val_loader = self._create_dataloader(X_val, y_val, shuffle=False)
        test_loader = self._create_dataloader(X_test, y_test, shuffle=False)
        
        # 4. 初始化模型
        self._initialize_model(X_train.shape[1:])
        
        # 5. 训练模型
        train_losses, val_losses = self._train_model(train_loader, val_loader)
        
        # 6. 模型评估
        evaluation_results = self._evaluate_model(test_loader, X_test, y_test)
        
        # 7. 保存模型
        self._save_model()
        
        self.logger.info("热力图预测模型训练完成")
        return evaluation_results
    
    def predict_heatmap(self, historical_data: np.ndarray, 
                       prediction_steps: int = 1) -> np.ndarray:
        """预测未来热力图"""
        
        if self.model is None:
            raise ValueError("模型尚未训练，请先调用train()方法")
        
        self.model.eval()
        
        # 预处理输入数据
        input_data = self._preprocess_input(historical_data)
        
        predictions = []
        current_input = input_data
        
        with torch.no_grad():
            for step in range(prediction_steps):
                # 预测下一时间步
                pred = self.model(current_input)
                predictions.append(pred.cpu().numpy())
                
                # 更新输入（滑动窗口）
                if step < prediction_steps - 1:
                    # 移除最旧的时间步，添加预测结果
                    new_input = torch.cat([
                        current_input[:, 1:, :, :],
                        pred.unsqueeze(1)
                    ], dim=1)
                    current_input = new_input
        
        # 反标准化
        predictions = np.array(predictions)
        predictions = self._inverse_transform_predictions(predictions)
        
        return predictions
    
    def generate_heatmap_visualization(self, predicted_heatmap: np.ndarray,
                                     center_coords: Tuple[float, float],
                                     zoom_level: int = 12) -> folium.Map:
        """生成热力图可视化"""
        
        # 创建地图
        m = folium.Map(location=center_coords, zoom_start=zoom_level)
        
        # 将预测热力图转换为坐标点
        heat_data = self._heatmap_to_coordinates(predicted_heatmap, center_coords)
        
        # 添加热力图层
        HeatMap(heat_data, radius=15, blur=10, max_zoom=18).add_to(m)
        
        return m
    
    def predict_risk_hotspots(self, predicted_heatmap: np.ndarray,
                            threshold_percentile: float = 90) -> List[Dict]:
        """识别风险热点区域"""
        
        # 计算阈值
        threshold = np.percentile(predicted_heatmap, threshold_percentile)
        
        # 找到高风险区域
        hotspots = []
        high_risk_indices = np.where(predicted_heatmap >= threshold)
        
        for i, j in zip(high_risk_indices[0], high_risk_indices[1]):
            hotspots.append({
                'grid_x': int(i),
                'grid_y': int(j),
                'risk_score': float(predicted_heatmap[i, j]),
                'risk_level': self._classify_risk_level(predicted_heatmap[i, j], threshold)
            })
        
        # 按风险得分排序
        hotspots = sorted(hotspots, key=lambda x: x['risk_score'], reverse=True)
        
        return hotspots
    
    def _preprocess_data(self, X: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """预处理数据"""
        
        # 标准化
        original_shape_X = X.shape
        original_shape_y = y.shape
        
        X_reshaped = X.reshape(-1, X.shape[-1])
        y_reshaped = y.reshape(-1, y.shape[-1])
        
        X_scaled = self.scaler.fit_transform(X_reshaped)
        y_scaled = self.scaler.transform(y_reshaped)
        
        X_processed = X_scaled.reshape(original_shape_X)
        y_processed = y_scaled.reshape(original_shape_y)
        
        # 转换为张量
        X_tensor = torch.FloatTensor(X_processed)
        y_tensor = torch.FloatTensor(y_processed)
        
        self.logger.info(f"预处理后数据形状: X={X_tensor.shape}, y={y_tensor.shape}")
        
        return X_tensor, y_tensor
    
    def _create_dataloader(self, X: torch.Tensor, y: torch.Tensor, 
                          shuffle: bool = True) -> DataLoader:
        """创建数据加载器"""
        
        dataset = TensorDataset(X, y)
        return DataLoader(
            dataset, 
            batch_size=self.model_config.batch_size,
            shuffle=shuffle,
            num_workers=0
        )
    
    def _initialize_model(self, input_shape: Tuple[int, int, int]):
        """初始化模型"""
        
        self.model = SpatialTemporalCNN(
            input_shape=input_shape,
            spatial_filters=self.model_config.spatial_filters,
            temporal_filters=self.model_config.temporal_filters,
            hidden_dims=self.model_config.hidden_dims,
            dropout_rate=self.model_config.dropout_rate
        )
        
        self.model.to(self.device)
        
        # 打印模型信息
        total_params = sum(p.numel() for p in self.model.parameters())
        self.logger.info(f"模型参数数量: {total_params:,}")
    
    def _train_model(self, train_loader: DataLoader, 
                    val_loader: DataLoader) -> Tuple[List[float], List[float]]:
        """训练模型"""
        
        # 定义损失函数和优化器
        criterion = nn.MSELoss()
        optimizer = torch.optim.Adam(
            self.model.parameters(), 
            lr=self.model_config.learning_rate
        )
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, mode='min', patience=5, factor=0.5
        )
        
        train_losses = []
        val_losses = []
        best_val_loss = float('inf')
        patience_counter = 0
        
        for epoch in range(self.model_config.num_epochs):
            # 训练阶段
            self.model.train()
            train_loss = 0.0
            
            for batch_X, batch_y in train_loader:
                batch_X = batch_X.to(self.device)
                batch_y = batch_y.to(self.device)
                
                optimizer.zero_grad()
                outputs = self.model(batch_X)
                loss = criterion(outputs, batch_y)
                loss.backward()
                optimizer.step()
                
                train_loss += loss.item()
            
            train_loss /= len(train_loader)
            
            # 验证阶段
            self.model.eval()
            val_loss = 0.0
            
            with torch.no_grad():
                for batch_X, batch_y in val_loader:
                    batch_X = batch_X.to(self.device)
                    batch_y = batch_y.to(self.device)
                    
                    outputs = self.model(batch_X)
                    loss = criterion(outputs, batch_y)
                    val_loss += loss.item()
            
            val_loss /= len(val_loader)
            
            train_losses.append(train_loss)
            val_losses.append(val_loss)
            
            # 学习率调度
            scheduler.step(val_loss)
            
            # 早停检查
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                patience_counter = 0
                # 保存最佳模型
                torch.save(self.model.state_dict(), 
                          f"{config.data.model_dir}/best_heatmap_model.pth")
            else:
                patience_counter += 1
            
            if patience_counter >= self.model_config.patience:
                self.logger.info(f"早停于第 {epoch+1} 轮")
                break
            
            if (epoch + 1) % 10 == 0:
                self.logger.info(f"轮次 {epoch+1}/{self.model_config.num_epochs}, "
                               f"训练损失: {train_loss:.6f}, 验证损失: {val_loss:.6f}")
        
        # 加载最佳模型
        self.model.load_state_dict(torch.load(f"{config.data.model_dir}/best_heatmap_model.pth"))
        
        return train_losses, val_losses
    
    def _evaluate_model(self, test_loader: DataLoader, 
                       X_test: torch.Tensor, y_test: torch.Tensor) -> Dict:
        """评估模型"""
        
        self.model.eval()
        test_loss = 0.0
        predictions = []
        actuals = []
        
        with torch.no_grad():
            for batch_X, batch_y in test_loader:
                batch_X = batch_X.to(self.device)
                batch_y = batch_y.to(self.device)
                
                outputs = self.model(batch_X)
                loss = nn.MSELoss()(outputs, batch_y)
                test_loss += loss.item()
                
                predictions.append(outputs.cpu().numpy())
                actuals.append(batch_y.cpu().numpy())
        
        test_loss /= len(test_loader)
        
        predictions = np.concatenate(predictions, axis=0)
        actuals = np.concatenate(actuals, axis=0)
        
        # 计算评估指标
        mse = np.mean((predictions - actuals) ** 2)
        mae = np.mean(np.abs(predictions - actuals))
        rmse = np.sqrt(mse)
        
        # 计算相关系数
        correlation = np.corrcoef(predictions.flatten(), actuals.flatten())[0, 1]
        
        results = {
            'test_loss': test_loss,
            'mse': mse,
            'mae': mae,
            'rmse': rmse,
            'correlation': correlation
        }
        
        self.logger.info(f"模型评估结果 - MSE: {mse:.6f}, MAE: {mae:.6f}, "
                        f"RMSE: {rmse:.6f}, 相关系数: {correlation:.4f}")
        
        # 可视化结果
        self._plot_evaluation_results(predictions, actuals)
        
        return results
    
    def _preprocess_input(self, input_data: np.ndarray) -> torch.Tensor:
        """预处理输入数据"""
        
        # 标准化
        original_shape = input_data.shape
        reshaped_data = input_data.reshape(-1, input_data.shape[-1])
        scaled_data = self.scaler.transform(reshaped_data)
        processed_data = scaled_data.reshape(original_shape)
        
        # 转换为张量并添加批次维度
        tensor_data = torch.FloatTensor(processed_data).unsqueeze(0)
        return tensor_data.to(self.device)
    
    def _inverse_transform_predictions(self, predictions: np.ndarray) -> np.ndarray:
        """反标准化预测结果"""
        
        original_shape = predictions.shape
        reshaped_predictions = predictions.reshape(-1, predictions.shape[-1])
        inverse_scaled = self.scaler.inverse_transform(reshaped_predictions)
        return inverse_scaled.reshape(original_shape)
    
    def _heatmap_to_coordinates(self, heatmap: np.ndarray, 
                              center_coords: Tuple[float, float]) -> List[List[float]]:
        """将热力图转换为坐标点"""
        
        center_lat, center_lon = center_coords
        heat_data = []
        
        height, width = heatmap.shape
        
        # 计算坐标范围
        lat_range = 0.1  # 约11km
        lon_range = 0.1
        
        for i in range(height):
            for j in range(width):
                if heatmap[i, j] > 0:
                    lat = center_lat + (i - height/2) * (lat_range / height)
                    lon = center_lon + (j - width/2) * (lon_range / width)
                    intensity = float(heatmap[i, j])
                    heat_data.append([lat, lon, intensity])
        
        return heat_data
    
    def _classify_risk_level(self, risk_score: float, threshold: float) -> str:
        """分类风险等级"""
        
        if risk_score >= threshold * 1.5:
            return "极高"
        elif risk_score >= threshold * 1.2:
            return "高"
        elif risk_score >= threshold:
            return "中等"
        else:
            return "低"
    
    def _plot_evaluation_results(self, predictions: np.ndarray, actuals: np.ndarray):
        """绘制评估结果"""
        
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        # 预测vs实际散点图
        axes[0, 0].scatter(actuals.flatten(), predictions.flatten(), alpha=0.5)
        axes[0, 0].plot([actuals.min(), actuals.max()], [actuals.min(), actuals.max()], 'r--')
        axes[0, 0].set_xlabel('实际值')
        axes[0, 0].set_ylabel('预测值')
        axes[0, 0].set_title('预测值 vs 实际值')
        
        # 残差图
        residuals = predictions.flatten() - actuals.flatten()
        axes[0, 1].scatter(actuals.flatten(), residuals, alpha=0.5)
        axes[0, 1].axhline(y=0, color='r', linestyle='--')
        axes[0, 1].set_xlabel('实际值')
        axes[0, 1].set_ylabel('残差')
        axes[0, 1].set_title('残差图')
        
        # 预测热力图示例
        sample_pred = predictions[0] if len(predictions) > 0 else np.random.rand(10, 10)
        im1 = axes[1, 0].imshow(sample_pred, cmap='hot', interpolation='nearest')
        axes[1, 0].set_title('预测热力图示例')
        plt.colorbar(im1, ax=axes[1, 0])
        
        # 实际热力图示例
        sample_actual = actuals[0] if len(actuals) > 0 else np.random.rand(10, 10)
        im2 = axes[1, 1].imshow(sample_actual, cmap='hot', interpolation='nearest')
        axes[1, 1].set_title('实际热力图示例')
        plt.colorbar(im2, ax=axes[1, 1])
        
        plt.tight_layout()
        plt.savefig(f'{config.data.log_dir}/heatmap_model_evaluation.png')
        plt.close()
    
    def _save_model(self):
        """保存模型"""
        
        model_path = f"{config.data.model_dir}/{self.model_config.model_name}.pth"
        
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'scaler': self.scaler,
            'model_config': self.model_config,
            'input_shape': self.model.input_shape
        }, model_path)
        
        self.logger.info(f"模型已保存到: {model_path}")
    
    def load_model(self, model_path: str = None):
        """加载模型"""
        
        if model_path is None:
            model_path = f"{config.data.model_dir}/{self.model_config.model_name}.pth"
        
        checkpoint = torch.load(model_path, map_location=self.device)
        
        # 重建模型
        self.model = SpatialTemporalCNN(
            input_shape=checkpoint['input_shape'],
            spatial_filters=self.model_config.spatial_filters,
            temporal_filters=self.model_config.temporal_filters,
            hidden_dims=self.model_config.hidden_dims,
            dropout_rate=self.model_config.dropout_rate
        )
        
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.model.to(self.device)
        self.model.eval()
        
        self.scaler = checkpoint['scaler']
        
        self.logger.info(f"模型已从 {model_path} 加载") 