"""
时空序列预测模型模块
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import logging
from typing import List, Dict, Any
import json
import os

class SpatioTemporalPredictor(nn.Module):
    """时空序列预测模型"""
    
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, dropout, num_heads):
        super().__init__()
        
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.num_layers = num_layers
        self.dropout_rate = dropout
        
        # 编码器 - 双向GRU
        self.encoder_gru = nn.GRU(
            input_size=input_dim,
            hidden_size=hidden_dim,
            num_layers=num_layers,
            batch_first=True,
            bidirectional=True,
            dropout=dropout if num_layers > 1 else 0
        )
        
        # 注意力机制
        self.attention = nn.MultiheadAttention(
            embed_dim=hidden_dim * 2,  # 双向
            num_heads=num_heads,
            dropout=dropout,
            batch_first=True
        )
        
        # 解码器 - GRU
        self.decoder_gru = nn.GRU(
            input_size=hidden_dim * 2 + input_dim,  # 注意力输出 + 输入特征
            hidden_size=hidden_dim,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0
        )
        
        # 输出层
        self.output_layer = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim // 2, output_dim)
        )
        
        # 层归一化
        self.layer_norm1 = nn.LayerNorm(hidden_dim * 2)
        self.layer_norm2 = nn.LayerNorm(hidden_dim)
        
    def forward(self, src, trg, teacher_forcing_ratio=0.5):
        """
        前向传播
        
        Args:
            src: 源序列 [batch_size, seq_len, input_dim]
            trg: 目标序列 [batch_size, trg_len, input_dim]
            teacher_forcing_ratio: 教师强制比例
        """
        batch_size = src.size(0)
        trg_len = trg.size(1)
        
        # 编码器
        encoder_output, hidden = self.encoder_gru(src)
        encoder_output = self.layer_norm1(encoder_output)
        
        # 初始化解码器
        decoder_input = trg[:, 0:1, :]  # 第一个时间步
        decoder_hidden = self._init_decoder_hidden(hidden)
        
        outputs = []
        
        for t in range(trg_len):
            # 注意力机制
            attn_output, attn_weights = self.attention(
                query=decoder_input,
                key=encoder_output,
                value=encoder_output
            )
            
            # 解码器
            decoder_output, decoder_hidden = self.decoder_gru(
                torch.cat([decoder_input, attn_output], dim=-1),
                decoder_hidden
            )
            decoder_output = self.layer_norm2(decoder_output)
            
            # 输出预测
            output = self.output_layer(decoder_output)
            outputs.append(output)
            
            # 教师强制
            if self.training and torch.rand(1).item() < teacher_forcing_ratio:
                # 使用真实值作为下一个输入
                if t < trg_len - 1:
                    decoder_input = trg[:, t+1:t+2, :]
            else:
                # 使用预测值作为下一个输入
                decoder_input = self._prepare_decoder_input(output, src)
        
        return torch.cat(outputs, dim=1)
    
    def _init_decoder_hidden(self, encoder_hidden):
        """初始化解码器隐藏状态"""
        if encoder_hidden is None:
            return None
            
        # 合并双向GRU的隐藏状态
        if isinstance(encoder_hidden, tuple):
            # LSTM情况
            h, c = encoder_hidden
            decoder_h = torch.cat([h[-2], h[-1]], dim=-1).unsqueeze(0)
            decoder_c = torch.cat([c[-2], c[-1]], dim=-1).unsqueeze(0)
            return (decoder_h.repeat(self.num_layers, 1, 1), 
                   decoder_c.repeat(self.num_layers, 1, 1))
        else:
            # GRU情况
            decoder_hidden = torch.cat([encoder_hidden[-2], encoder_hidden[-1]], 
                                    dim=-1).unsqueeze(0)
            return decoder_hidden.repeat(self.num_layers, 1, 1)
    
    def _prepare_decoder_input(self, prediction, src):
        """准备解码器输入"""
        batch_size = prediction.size(0)
        
        # 创建新的输入向量 [预测值, 其他特征...]
        decoder_input = torch.zeros(batch_size, 1, self.input_dim, 
                                  device=prediction.device)
        
        # 排队长度放在最后一个特征位置
        decoder_input[:, 0, -1] = prediction.squeeze(-1)
        
        # 其他特征复制历史最新值
        if src is not None:
            decoder_input[:, 0, :-1] = src[:, -1, :-1]
            
        return decoder_input

class QueueLengthPredictor:
    """排队长度预测器"""
    
    def __init__(self, config):
        self.config = config
        self.logger = logging.getLogger(__name__)
        
        # 模型参数
        self.input_dim = 8  # 特征维度
        self.output_dim = 1  # 输出维度（排队长度）
        
        # 初始化模型
        self.model = SpatioTemporalPredictor(
            input_dim=self.input_dim,
            hidden_dim=config.hidden_dim,
            output_dim=self.output_dim,
            num_layers=config.num_layers,
            dropout=config.dropout_rate,
            num_heads=config.num_heads
        )
        
        # 训练状态
        self.history_data = []
        self.is_trained = False
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model.to(self.device)
        
        # 优化器
        self.optimizer = torch.optim.Adam(
            self.model.parameters(), 
            lr=config.learning_rate
        )
        
        # 损失函数
        self.criterion = nn.HuberLoss()
        
        # 加载预训练模型
        self.load_model()
    
    def build_feature_vector(self, current_length, traffic_params=None, 
                           signal_info=None, time_context=None):
        """构建特征向量"""
        features = np.zeros(self.input_dim)
        
        # 1. 当前排队长度（归一化）
        features[0] = current_length / 100.0  # 假设最大100米
        
        # 2. 交通流参数
        traffic_params = traffic_params or {}
        features[1] = traffic_params.get('arrival_rate', 0) / 60.0  # 车辆/分钟
        features[2] = traffic_params.get('saturation_flow', 1800) / 2000.0
        features[3] = traffic_params.get('queue_vehicles', 0) / 20.0
        features[4] = traffic_params.get('avg_stop_time', 0) / 60.0
        
        # 3. 信号灯信息
        signal_info = signal_info or {}
        features[5] = signal_info.get('current_phase', 0) / 4.0
        features[6] = signal_info.get('time_remaining', 0) / 120.0
        
        # 4. 时间上下文
        time_context = time_context or {}
        if 'hour_sin' in time_context and 'hour_cos' in time_context:
            features[7] = time_context['hour_sin']
        else:
            # 简单的时间编码
            from datetime import datetime
            now = datetime.now()
            hour_rad = 2 * np.pi * now.hour / 24
            features[7] = np.sin(hour_rad)
        
        return features
    
    def update_history(self, current_length, additional_info=None):
        """更新历史数据"""
        feature_vector = self.build_feature_vector(current_length, additional_info)
        
        self.history_data.append({
            'timestamp': len(self.history_data),
            'features': feature_vector,
            'true_length': current_length
        })
        
        # 保持历史数据长度
        if len(self.history_data) > self.config.history_length * 2:
            self.history_data = self.history_data[-self.config.history_length * 2:]
    
    def prepare_training_data(self):
        """准备训练数据"""
        if len(self.history_data) < self.config.history_length + self.config.prediction_horizon:
            return None, None
            
        sequences = []
        targets = []
        
        for i in range(len(self.history_data) - self.config.history_length - self.config.prediction_horizon + 1):
            # 输入序列
            seq_start = i
            seq_end = i + self.config.history_length
            sequence = [data['features'] for data in self.history_data[seq_start:seq_end]]
            
            # 目标序列
            target_start = seq_end
            target_end = target_start + self.config.prediction_horizon
            target = [data['true_length'] for data in self.history_data[target_start:target_end]]
            
            sequences.append(sequence)
            targets.append(target)
        
        if not sequences:
            return None, None
            
        return torch.FloatTensor(sequences), torch.FloatTensor(targets)
    
    def train_epoch(self):
        """训练一个周期"""
        self.model.train()
        
        sequences, targets = self.prepare_training_data()
        if sequences is None:
            return float('inf')
        
        # 移动到设备
        sequences = sequences.to(self.device)
        targets = targets.to(self.device)
        
        total_loss = 0
        batch_size = 32
        
        for i in range(0, len(sequences), batch_size):
            batch_end = min(i + batch_size, len(sequences))
            batch_src = sequences[i:batch_end]
            batch_trg = targets[i:batch_end]
            
            # 前向传播
            self.optimizer.zero_grad()
            output = self.model(batch_src, batch_trg)
            
            # 计算损失
            loss = self.criterion(output, batch_trg.unsqueeze(-1))
            
            # 反向传播
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            self.optimizer.step()
            
            total_loss += loss.item()
        
        avg_loss = total_loss / (len(sequences) / batch_size)
        return avg_loss
    
    def predict(self, current_length, additional_info=None):
        """预测未来排队长度"""
        if not self.is_trained and len(self.history_data) < self.config.history_length:
            # 返回简单预测
            return [current_length] * self.config.prediction_horizon
        
        # 更新历史数据
        self.update_history(current_length, additional_info)
        
        # 准备输入数据
        if len(self.history_data) >= self.config.history_length:
            input_sequence = [data['features'] for data in 
                            self.history_data[-self.config.history_length:]]
            input_tensor = torch.FloatTensor([input_sequence]).to(self.device)
            
            # 准备目标placeholder
            target_placeholder = torch.zeros(1, self.config.prediction_horizon, 
                                           self.input_dim).to(self.device)
            
            # 预测
            self.model.eval()
            with torch.no_grad():
                predictions = self.model(input_tensor, target_placeholder, 
                                       teacher_forcing_ratio=0.0)
                predictions = predictions.cpu().numpy()[0, :, 0]
                
                # 反归一化
                predictions = predictions * 100.0
                return predictions.tolist()
        else:
            return [current_length] * self.config.prediction_horizon
    
    def save_model(self, path=None):
        """保存模型"""
        save_path = path or self.config.model_path
        
        # 创建目录
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        
        # 保存模型状态
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'history_data': self.history_data,
            'is_trained': self.is_trained
        }, save_path)
        
        self.logger.info(f"模型已保存: {save_path}")
    
    def load_model(self, path=None):
        """加载模型"""
        load_path = path or self.config.model_path
        
        if os.path.exists(load_path):
            try:
                checkpoint = torch.load(load_path, map_location=self.device)
                self.model.load_state_dict(checkpoint['model_state_dict'])
                self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
                self.history_data = checkpoint.get('history_data', [])
                self.is_trained = checkpoint.get('is_trained', False)
                
                self.logger.info(f"模型已加载: {load_path}")
            except Exception as e:
                self.logger.warning(f"模型加载失败: {str(e)}")
        else:
            self.logger.info("未找到预训练模型，使用初始模型")