import os
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import networkx as nx
from ultralytics import YOLO
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import time
import warnings
warnings.filterwarnings('ignore')

# 设置随机种子保证可重复性
torch.manual_seed(42)
np.random.seed(42)

# 设备配置
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")

# ======================
# 1. 数据预处理模块
# ======================

class NighttimeImageProcessor:
    """夜间图像预处理类"""
    def __init__(self):
        pass
    
    def clahe(self, image):
        """自适应直方图均衡化"""
        # 转换为LAB颜色空间
        lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
        l, a, b = cv2.split(lab)
        
        # 对L通道应用CLAHE
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        cl = clahe.apply(l)
        
        # 合并通道
        limg = cv2.merge((cl, a, b))
        
        # 转换回BGR
        final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
        return final
    
    def denoise(self, image):
        """基于深度学习的图像去噪"""
        # 这里使用简单的非局部均值去噪作为示例
        # 实际应用中可以使用预训练的DnCNN模型
        return cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
    
    def retinex(self, image):
        """Retinex算法进行光照均衡化"""
        # 转换为浮点型
        image_float = image.astype(np.float32) / 255.0
        
        # 分离通道
        b, g, r = cv2.split(image_float)
        
        # 对每个通道应用Retinex
        def retinex_channel(channel):
            log_channel = np.log1p(channel)
            blur_channel = cv2.GaussianBlur(channel, (0, 0), 3)
            log_blur = np.log1p(blur_channel)
            return log_channel - log_blur
        
        r_retinex = retinex_channel(r)
        g_retinex = retinex_channel(g)
        b_retinex = retinex_channel(b)
        
        # 合并通道
        retinex_image = cv2.merge([b_retinex, g_retinex, r_retinex])
        
        # 归一化
        retinex_image = (retinex_image - np.min(retinex_image)) / (np.max(retinex_image) - np.min(retinex_image))
        
        # 转换回uint8
        return (retinex_image * 255).astype(np.uint8)
    
    def gamma_correction(self, image, gamma=2.2):
        """Gamma校正增强暗部细节"""
        inv_gamma = 1.0 / gamma
        table = np.array([((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
        return cv2.LUT(image, table)
    
    def process(self, image):
        """完整的图像预处理流程"""
        # 1. CLAHE增强对比度
        img = self.clahe(image)
        
        # 2. 去噪
        img = self.denoise(img)
        
        # 3. Retinex光照均衡化
        img = self.retinex(img)
        
        # 4. Gamma校正
        img = self.gamma_correction(img)
        
        return img

# ======================
# 2. 改进的YOLOv8模型
# ======================

class ChannelAttention(nn.Module):
    """通道注意力机制"""
    def __init__(self, in_channels, reduction_ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        self.fc1 = nn.Conv2d(in_channels, in_channels // reduction_ratio, 1, bias=False)
        self.relu = nn.ReLU()
        self.fc2 = nn.Conv2d(in_channels // reduction_ratio, in_channels, 1, bias=False)
        self.sigmoid = nn.Sigmoid()
    
    def forward(self, x):
        avg_out = self.fc2(self.relu(self.fc1(self.avg_pool(x))))
        max_out = self.fc2(self.relu(self.fc1(self.max_pool(x))))
        out = avg_out + max_out
        return self.sigmoid(out)

class SpatialAttention(nn.Module):
    """空间注意力机制"""
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()
        self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size//2, bias=False)
        self.sigmoid = nn.Sigmoid()
    
    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        concat = torch.cat([avg_out, max_out], dim=1)
        out = self.conv1(concat)
        return self.sigmoid(out)

class CBAM(nn.Module):
    """CBAM注意力模块"""
    def __init__(self, in_channels, reduction_ratio=16, kernel_size=7):
        super(CBAM, self).__init__()
        self.channel_attention = ChannelAttention(in_channels, reduction_ratio)
        self.spatial_attention = SpatialAttention(kernel_size)
    
    def forward(self, x):
        out = x * self.channel_attention(x)
        out = out * self.spatial_attention(out)
        return out

class ImprovedYOLOv8(nn.Module):
    """改进的YOLOv8模型，添加多尺度特征融合和注意力机制"""
    def __init__(self, num_classes=1):
        super(ImprovedYOLOv8, self).__init__()
        # 加载预训练的YOLOv8模型
        self.yolo = YOLO('yolov8n.pt')
        
        # 冻结YOLOv8的骨干网络参数
        for param in self.yolo.model.parameters():
            param.requires_grad = False
        
        # 添加CBAM注意力机制
        self.cbam = CBAM(256)  # 假设特征通道数为256
        
        # 添加多尺度特征融合模块
        self.fpn = nn.ModuleList([
            nn.Conv2d(256, 256, 1),
            nn.Conv2d(512, 256, 1),
            nn.Conv2d(1024, 256, 1)
        ])
        
        # 添加额外的检测头
        self.detect_head = nn.Sequential(
            nn.Conv2d(256, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, num_classes + 5, 1)  # 5 = [x, y, w, h, conf]
        )
    
    def forward(self, x):
        # 使用YOLOv8提取特征
        results = self.yolo.model(x)
        
        # 获取多尺度特征
        # 注意：这里需要根据YOLOv8的实际输出结构调整
        # 假设我们获得了三个尺度的特征图
        feat1, feat2, feat3 = results[-3], results[-2], results[-1]
        
        # 应用CBAM注意力
        feat1 = self.cbam(feat1)
        feat2 = self.cbam(feat2)
        feat3 = self.cbam(feat3)
        
        # 多尺度特征融合
        p1 = self.fpn[0](feat1)
        p2 = self.fpn[1](feat2)
        p3 = self.fpn[2](feat3)
        
        # 上采样和融合
        p2_up = F.interpolate(p2, size=p1.shape[2:], mode='nearest')
        p3_up = F.interpolate(p3, size=p1.shape[2:], mode='nearest')
        
        fused_feat = p1 + p2_up + p3_up
        
        # 检测头
        output = self.detect_head(fused_feat)
        
        return output

# ======================
# 3. 时空图神经网络
# ======================

class GraphConvolution(nn.Module):
    """图卷积层"""
    def __init__(self, in_features, out_features, bias=True):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
        if bias:
            self.bias = nn.Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()
    
    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight)
        if self.bias is not None:
            nn.init.zeros_(self.bias)
    
    def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            output += self.bias
        return output

class TemporalConvolution(nn.Module):
    """时间卷积层（膨胀因果卷积）"""
    def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1):
        super(TemporalConvolution, self).__init__()
        self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, 
                             padding=(kernel_size-1)//2 * dilation, dilation=dilation)
        self.activation = nn.ReLU()
        self.norm = nn.BatchNorm1d(out_channels)
    
    def forward(self, x):
        # x shape: (batch_size, channels, time_steps)
        out = self.conv(x)
        out = self.norm(out)
        out = self.activation(out)
        return out

class ST_GNN(nn.Module):
    """时空图神经网络"""
    def __init__(self, node_features, hidden_dim, num_layers=3):
        super(ST_GNN, self).__init__()
        self.node_features = node_features
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        
        # 空间图卷积层
        self.gc_layers = nn.ModuleList()
        for i in range(num_layers):
            in_dim = node_features if i == 0 else hidden_dim
            self.gc_layers.append(GraphConvolution(in_dim, hidden_dim))
        
        # 时间卷积层
        self.tc_layers = nn.ModuleList()
        for i in range(num_layers):
            in_dim = node_features if i == 0 else hidden_dim
            dilation = 2 ** i  # 膨胀因子随层数指数增长
            self.tc_layers.append(TemporalConvolution(in_dim, hidden_dim, dilation=dilation))
        
        # 残差连接
        self.residual = nn.Linear(node_features, hidden_dim)
        
        # 输出层
        self.output_layer = nn.Linear(hidden_dim, hidden_dim)
    
    def forward(self, node_features, adj_matrix, temporal_features):
        """
        node_features: (num_nodes, node_features)
        adj_matrix: (num_nodes, num_nodes)
        temporal_features: (batch_size, num_nodes, time_steps, features)
        """
        batch_size, num_nodes, time_steps, features = temporal_features.shape
        
        # 初始特征
        x = node_features  # (num_nodes, node_features)
        
        # 时空处理
        for i in range(self.num_layers):
            # 空间图卷积
            gc_out = self.gc_layers[i](x, adj_matrix)
            gc_out = F.relu(gc_out)
            
            # 时间卷积
            # 调整维度以适应时间卷积
            tc_in = temporal_features.permute(0, 2, 1, 3).reshape(batch_size * time_steps, num_nodes, features)
            tc_out = self.tc_layers[i](tc_in.permute(0, 2, 1))  # (batch_size*time_steps, features, num_nodes)
            tc_out = tc_out.permute(0, 2, 1).reshape(batch_size, time_steps, num_nodes, self.hidden_dim)
            
            # 融合时空特征
            # 这里简化处理，实际应用中可能需要更复杂的融合方式
            x = gc_out + tc_out.mean(dim=0).mean(dim=0)  # 简单平均
            
            # 残差连接
            if i == 0:
                x = x + self.residual(node_features)
            x = F.relu(x)
        
        # 输出层
        output = self.output_layer(x)
        return output  # (num_nodes, hidden_dim)

# ======================
# 4. 违规行为预测模块
# ======================

class ViolationPredictor(nn.Module):
    """违规行为预测模块"""
    def __init__(self, input_dim, hidden_dim, num_violations=4):
        super(ViolationPredictor, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.num_violations = num_violations
        
        # MLP层
        self.mlp = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(hidden_dim, num_violations)
        )
        
        # 风险评估层
        self.risk_assessment = nn.Sequential(
            nn.Linear(num_violations, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, 1),
            nn.Sigmoid()
        )
    
    def forward(self, features):
        """
        features: (batch_size, input_dim)
        """
        # 违规行为预测
        violation_logits = self.mlp(features)
        violation_probs = F.softmax(violation_logits, dim=1)
        
        # 风险评估
        risk_score = self.risk_assessment(violation_probs)
        
        return violation_probs, risk_score

# ======================
# 5. 完整模型
# ======================

class ElectricBikeViolationPredictor(nn.Module):
    """完整的电动自行车违规行为预测模型"""
    def __init__(self, node_features=10, hidden_dim=128, num_violations=4):
        super(ElectricBikeViolationPredictor, self).__init__()
        # 检测模型
        self.detector = ImprovedYOLOv8()
        
        # 时空图神经网络
        self.st_gnn = ST_GNN(node_features, hidden_dim)
        
        # 违规行为预测
        self.violation_predictor = ViolationPredictor(hidden_dim, hidden_dim, num_violations)
        
        # 特征提取器（从检测结果中提取特征）
        self.feature_extractor = nn.Sequential(
            nn.Linear(10, hidden_dim // 2),  # 假设从检测结果提取10维特征
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, hidden_dim)
        )
    
    def forward(self, images, adj_matrix, temporal_features):
        """
        images: (batch_size, C, H, W) - 输入图像
        adj_matrix: (num_nodes, num_nodes) - 图邻接矩阵
        temporal_features: (batch_size, num_nodes, time_steps, features) - 时间特征
        """
        batch_size = images.shape[0]
        
        # 1. 使用改进的YOLOv8检测电动自行车
        # 注意：这里简化处理，实际应用中需要解析YOLO的输出
        detection_results = self.detector(images)
        
        # 2. 从检测结果中提取特征（简化版）
        # 实际应用中需要根据检测结果提取位置、速度、方向等特征
        detected_features = torch.rand(batch_size, 10).to(device)  # 模拟特征
        
        # 3. 特征提取
        extracted_features = self.feature_extractor(detected_features)
        
        # 4. 时空图处理
        # 假设每个图像对应一个中心节点（电动自行车）
        node_features = extracted_features[0]  # 取第一个样本的特征作为中心节点特征
        st_features = self.st_gnn(node_features, adj_matrix, temporal_features)
        
        # 5. 违规行为预测
        violation_probs, risk_score = self.violation_predictor(st_features.unsqueeze(0))
        
        return {
            'detection_results': detection_results,
            'violation_probs': violation_probs,
            'risk_score': risk_score
        }

# ======================
# 6. 数据集类
# ======================

class ElectricBikeDataset(Dataset):
    """电动自行车数据集类"""
    def __init__(self, image_paths, adj_matrix, temporal_features, labels, transform=None):
        self.image_paths = image_paths
        self.adj_matrix = adj_matrix
        self.temporal_features = temporal_features
        self.labels = labels
        self.transform = transform
        self.processor = NighttimeImageProcessor()
    
    def __len__(self):
        return len(self.image_paths)
    
    def __getitem__(self, idx):
        # 加载图像
        image = cv2.imread(self.image_paths[idx])
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        
        # 图像预处理
        if self.transform:
            image = self.processor.process(image)
        
        # 转换为张量
        image = torch.from_numpy(image).permute(2, 0, 1).float() / 255.0
        
        # 获取邻接矩阵
        adj_matrix = torch.from_numpy(self.adj_matrix[idx]).float()
        
        # 获取时间特征
        temporal_features = torch.from_numpy(self.temporal_features[idx]).float()
        
        # 获取标签
        label = torch.tensor(self.labels[idx], dtype=torch.long)
        
        return image, adj_matrix, temporal_features, label

# ======================
# 7. 训练函数
# ======================

def train_model(model, dataloader, optimizer, criterion, num_epochs=10, device=device):
    """训练模型"""
    model.train()
    model.to(device)
    
    for epoch in range(num_epochs):
        running_loss = 0.0
        running_corrects = 0
        
        for batch_idx, (images, adj_matrices, temporal_features, labels) in enumerate(dataloader):
            images = images.to(device)
            adj_matrices = adj_matrices.to(device)
            temporal_features = temporal_features.to(device)
            labels = labels.to(device)
            
            # 梯度清零
            optimizer.zero_grad()
            
            # 前向传播
            outputs = model(images, adj_matrices[0], temporal_features)  # 假设每个batch的邻接矩阵相同
            violation_probs = outputs['violation_probs']
            
            # 计算损失
            loss = criterion(violation_probs, labels)
            
            # 反向传播
            loss.backward()
            optimizer.step()
            
            # 统计
            running_loss += loss.item() * images.size(0)
            _, preds = torch.max(violation_probs, 1)
            running_corrects += torch.sum(preds == labels.data)
        
        epoch_loss = running_loss / len(dataloader.dataset)
        epoch_acc = running_corrects.double() / len(dataloader.dataset)
        
        print(f'Epoch {epoch}/{num_epochs - 1}, Loss: {epoch_loss:.4f}, Acc: {epoch_acc:.4f}')
    
    return model

# ======================
# 8. 评估函数
# ======================

def evaluate_model(model, dataloader, device=device):
    """评估模型"""
    model.eval()
    model.to(device)
    
    all_preds = []
    all_labels = []
    
    with torch.no_grad():
        for images, adj_matrices, temporal_features, labels in dataloader:
            images = images.to(device)
            adj_matrices = adj_matrices.to(device)
            temporal_features = temporal_features.to(device)
            
            outputs = model(images, adj_matrices[0], temporal_features)
            violation_probs = outputs['violation_probs']
            
            _, preds = torch.max(violation_probs, 1)
            
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.numpy())
    
    # 计算指标
    accuracy = accuracy_score(all_labels, all_preds)
    precision = precision_score(all_labels, all_preds, average='macro')
    recall = recall_score(all_labels, all_preds, average='macro')
    f1 = f1_score(all_labels, all_preds, average='macro')
    
    print(f'Accuracy: {accuracy:.4f}')
    print(f'Precision: {precision:.4f}')
    print(f'Recall: {recall:.4f}')
    print(f'F1 Score: {f1:.4f}')
    
    return accuracy, precision, recall, f1

# ======================
# 9. 推理函数
# ======================

def predict_violation(model, image_path, adj_matrix, temporal_features, device=device):
    """预测单张图像的违规行为"""
    model.eval()
    model.to(device)
    
    # 加载和预处理图像
    image = cv2.imread(image_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    
    processor = NighttimeImageProcessor()
    image = processor.process(image)
    
    # 转换为张量
    image = torch.from_numpy(image).permute(2, 0, 1).float() / 255.0
    image = image.unsqueeze(0).to(device)  # 添加batch维度
    
    # 转换邻接矩阵和时间特征为张量
    adj_matrix = torch.from_numpy(adj_matrix).float().to(device)
    temporal_features = torch.from_numpy(temporal_features).float().unsqueeze(0).to(device)
    
    # 预测
    with torch.no_grad():
        outputs = model(image, adj_matrix, temporal_features)
        violation_probs = outputs['violation_probs'].cpu().numpy()[0]
        risk_score = outputs['risk_score'].cpu().numpy()[0][0]
    
    # 违规行为类型
    violation_types = ['闯红灯', '逆行', '占用机动车道', '超速行驶']
    
    # 获取预测结果
    max_idx = np.argmax(violation_probs)
    predicted_violation = violation_types[max_idx]
    confidence = violation_probs[max_idx]
    
    # 风险等级
    if risk_score < 0.3:
        risk_level = '低风险'
    elif risk_score < 0.7:
        risk_level = '中风险'
    else:
        risk_level = '高风险'
    
    return {
        'predicted_violation': predicted_violation,
        'confidence': float(confidence),
        'risk_score': float(risk_score),
        'risk_level': risk_level,
        'all_probabilities': {violation_types[i]: float(violation_probs[i]) for i in range(len(violation_types))}
    }

# ======================
# 10. 主函数
# ======================

def main():
    # 模拟数据（实际应用中需要加载真实数据）
    num_samples = 100
    num_nodes = 5  # 图中的节点数
    time_steps = 10  # 时间步长
    features_per_node = 4  # 每个节点的特征数
    
    # 模拟图像路径
    image_paths = [f'path/to/image_{i}.jpg' for i in range(num_samples)]
    
    # 模拟邻接矩阵
    adj_matrices = [np.random.rand(num_nodes, num_nodes) for _ in range(num_samples)]
    
    # 模拟时间特征
    temporal_features = [np.random.rand(num_nodes, time_steps, features_per_node) for _ in range(num_samples)]
    
    # 模拟标签（0: 闯红灯, 1: 逆行, 2: 占用机动车道, 3: 超速行驶）
    labels = np.random.randint(0, 4, size=num_samples)
    
    # 创建数据集
    dataset = ElectricBikeDataset(image_paths, adj_matrices, temporal_features, labels)
    dataloader = DataLoader(dataset, batch_size=8, shuffle=True)
    
    # 初始化模型
    model = ElectricBikeViolationPredictor()
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    
    # 训练模型
    print("开始训练模型...")
    trained_model = train_model(model, dataloader, optimizer, criterion, num_epochs=5)
    
    # 评估模型
    print("\n评估模型...")
    evaluate_model(trained_model, dataloader)
    
    # 保存模型
    torch.save(trained_model.state_dict(), 'electric_bike_violation_predictor.pth')
    print("\n模型已保存为 'electric_bike_violation_predictor.pth'")
    
    # 测试单张图像预测
    print("\n测试单张图像预测...")
    test_image_path = 'path/to/test_image.jpg'
    test_adj_matrix = np.random.rand(num_nodes, num_nodes)
    test_temporal_features = np.random.rand(num_nodes, time_steps, features_per_node)
    
    prediction = predict_violation(trained_model, test_image_path, test_adj_matrix, test_temporal_features)
    
    print("\n预测结果:")
    print(f"预测违规行为: {prediction['predicted_violation']}")
    print(f"置信度: {prediction['confidence']:.4f}")
    print(f"风险评分: {prediction['risk_score']:.4f}")
    print(f"风险等级: {prediction['risk_level']}")
    print("各类违规行为概率:")
    for violation_type, prob in prediction['all_probabilities'].items():
        print(f"  {violation_type}: {prob:.4f}")

if __name__ == '__main__':
    main()