import os
import json
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
from sklearn.model_selection import train_test_split
from PIL import Image
import random
import matplotlib.pyplot as plt
from tqdm import tqdm

# -------------------- 配置参数 --------------------
config = {
    'data_root': 'F:/机器学习数据集/tusimple/',
    'batch_size': 4,
    'num_workers': 4,
    'lr': 1e-4,
    'epochs': 50,
    'input_size': (288, 512),
    'device': 'cuda',  #if torch.cuda.is_available() else 'cpu',
    'weight_decay': 5e-5,
    'patience': 30,
    'grad_clip': 1.0,
    'warmup_epochs': 5,
    'label_smooth': 0.1
}


# -------------------- 数据增强 --------------------
class LaneAugmentation:
    def __init__(self):
        self.prob = 0.6

    def __call__(self, img):
        img = np.array(img)

        # 透视变换
        if random.random() < 0.3:
            h, w = img.shape[:2]
            src = np.float32([[0.1 * w, h], [0.9 * w, h], [0, 0], [w, 0]])
            dst = np.float32([
                [0.1 * w + random.randint(-30, 30), h],
                [0.9 * w + random.randint(-30, 30), h],
                [random.randint(-50, 50), random.randint(0, 100)],
                [w + random.randint(-50, 50), random.randint(0, 100)]
            ])
            M = cv2.getPerspectiveTransform(src, dst)
            img = cv2.warpPerspective(img, M, (w, h))

        # 颜色增强
        if random.random() < 0.5:
            hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
            hsv[..., 1] = hsv[..., 1] * random.uniform(1.2, 1.5)
            img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)

        return Image.fromarray(img)


train_transform = transforms.Compose([
    transforms.Resize(config['input_size']),
    LaneAugmentation(),
    transforms.ColorJitter(0.3, 0.3, 0.3, 0.1),
    transforms.RandomHorizontalFlip(0.5),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])


# -------------------- 数据集类 --------------------
class TuSimpleDataset(Dataset):
    def __init__(self, root, json_files, transform=None):
        self.root = root
        self.transform = transform
        self.annotations = []

        for json_file in json_files:
            json_path = os.path.join(root, 'labels', json_file)
            if not os.path.exists(json_path):
                continue

            with open(json_path, 'r') as f:
                for line in f:
                    try:
                        ann = json.loads(line.strip())
                        if self._validate_annotation(ann):
                            ann['raw_file'] = os.path.join(root, ann['raw_file'])
                            self.annotations.append(ann)
                    except:
                        continue

    @staticmethod
    def split_dataset(root, test_size=0.2):
        """安全分割数据集，处理空数据情况"""
        all_json = ['label_data_0313.json', 'label_data_0531.json', 'label_data_0601.json']
        valid_files = []
        labels_dir = os.path.join(root, 'labels')

        # 1. 检查标签目录是否存在
        if not os.path.exists(labels_dir):
            raise FileNotFoundError(f"标签目录 {labels_dir} 不存在。")

        # 2. 收集存在的文件
        for fname in all_json:
            file_path = os.path.join(labels_dir, fname)
            if os.path.isfile(file_path):
                valid_files.append(fname)
            else:
                print(f"警告：文件 {file_path} 不存在。")

        # 3. 检查有效文件数量
        if not valid_files:
            raise ValueError("没有找到任何有效的数据文件。")

        # 4. 动态调整测试集比例
        n_files = len(valid_files)
        if n_files == 1:
            # 只有一个文件时，全作为训练集
            return valid_files, []
        elif n_files == 2:
            # 两个文件时，1个训练，1个验证
            return [valid_files[0]], [valid_files[1]]
        else:
            # 正常分割
            return train_test_split(
                valid_files,
                test_size=test_size,
                random_state=42,
                shuffle=True
            )

    def _validate_annotation(self, ann):
        return all(k in ann for k in ['lanes', 'h_samples', 'raw_file'])

    def __len__(self):
        return len(self.annotations)

    def __getitem__(self, idx):
        ann = self.annotations[idx]
        try:
            image = Image.open(ann['raw_file']).convert('RGB')
            orig_w, orig_h = image.size

            if self.transform:
                image = self.transform(image)

            label = self._create_label(ann, orig_h, orig_w)
            return image, label
        except:
            return self[np.random.randint(0, len(self))]

    def _create_label(self, ann, img_h, img_w):
        label = np.zeros((img_h, img_w), dtype=np.float32)
        y_samples = ann['h_samples']

        for lane in ann['lanes']:
            prev_point = None
            for x, y in zip(lane, y_samples):
                if x < 0 or y >= img_h:
                    continue

                thickness = max(4, int(12 * (1 - y / img_h)))
                cv2.circle(label, (int(x), int(y)), thickness, 1, -1)

                if prev_point is not None:
                    cv2.line(label, prev_point, (int(x), int(y)), 1, thickness)
                prev_point = (int(x), int(y))

        label = cv2.resize(label, (config['input_size'][1], config['input_size'][0]),
                           interpolation=cv2.INTER_NEAREST)
        return torch.from_numpy(label).unsqueeze(0)


# -------------------- 模型组件 --------------------
class PositionalEncoding2D(nn.Module):
    """2D位置编码（修复通道不匹配问题）"""

    def __init__(self, channels):
        super().__init__()
        # 使用1x1卷积将坐标信息映射到特征维度
        self.proj = nn.Conv2d(2, channels, kernel_size=1)
        nn.init.normal_(self.proj.weight, mean=0, std=0.02)
        self.proj.bias = None

    def forward(self, x):
        b, c, h, w = x.size()

        # 生成归一化坐标网格
        y_coord = torch.linspace(-1, 1, h, device=x.device).view(1, 1, h, 1).expand(b, 1, h, w)
        x_coord = torch.linspace(-1, 1, w, device=x.device).view(1, 1, 1, w).expand(b, 1, h, w)

        # 拼接坐标并投影到特征空间
        coords = torch.cat([x_coord, y_coord], dim=1)  # [b, 2, h, w]
        pos_enc = self.proj(coords)  # [b, c, h, w]

        return x + pos_enc


class EnhancedASPP(nn.Module):
    def __init__(self, in_channels, out_channels=256):
        super().__init__()
        rates = [1, 6, 12, 18]

        self.convs = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(in_channels, out_channels, 3, padding=r, dilation=r, bias=False),
                nn.BatchNorm2d(out_channels),
                nn.ReLU()
            ) for r in rates
        ])

        self.se = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(out_channels * 4, out_channels // 4, 1),
            nn.ReLU(),
            nn.Conv2d(out_channels // 4, out_channels * 4, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        features = [conv(x) for conv in self.convs]
        concat = torch.cat(features, dim=1)
        weights = self.se(concat)
        return concat * weights


class SpatialGate(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(2, 1, kernel_size=7, padding=3),
            nn.Sigmoid()
        )

    def forward(self, x):
        avg = torch.mean(x, dim=1, keepdim=True)
        max_val, _ = torch.max(x, dim=1, keepdim=True)
        att = self.conv(torch.cat([avg, max_val], dim=1))
        return x * att


class CNNTransformer(nn.Module):
    def __init__(self):
        super().__init__()
        backbone = models.resnet34(pretrained=True)

        # 修改后的特征提取路径
        self.stem = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
            backbone.bn1,
            backbone.relu,
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)  # 保持下采样
        )

        # 特征层级
        self.layer1 = backbone.layer1  # 64
        self.layer2 = backbone.layer2  # 128
        self.layer3 = backbone.layer3  # 256
        self.layer4 = backbone.layer4  # 512

        # 修改后的位置编码
        self.pos_enc = PositionalEncoding2D(512)  # 与layer4的输出通道匹配

        # Transformer配置
        self.transformer = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=512,
                nhead=16,
                dim_feedforward=2048,
                dropout=0.1,
                activation='gelu'
            ),
            num_layers=4
        )

        self.aspp = EnhancedASPP(512)
        self.decoder = nn.Sequential(
            nn.Conv2d(1024, 512, 1),
            SpatialGate(),
            nn.Upsample(scale_factor=2),
            nn.Conv2d(512, 256, 3, padding=1),
            nn.ReLU()
        )


class LaneDetectionNet(nn.Module):
    def __init__(self, input_size=(288, 512)):
        super().__init__()
        self.input_size = input_size
        # 计算目标特征图尺寸（原图的1/4）
        self.target_h = input_size[0] // 4  # ✨ 显式定义实例变量
        self.target_w = input_size[1] // 4

        self.encoder = CNNTransformer()

        # 修正1：补全Upsample的参数括号
        self.skip_conv1 = nn.Sequential(
            nn.Conv2d(256, 128, 1),
            nn.Upsample(size=(self.target_h, self.target_w)),  # ✨ 补全括号
            nn.Conv2d(128, 128, 3, padding=1)
        )

        self.skip_conv2 = nn.Sequential(
            nn.Conv2d(128, 64, 1),
            nn.Upsample(size=(self.target_h, self.target_w)),
            nn.Conv2d(64, 64, 3, padding=1)
        )

        self.aspp_adapter = nn.Sequential(
            nn.Conv2d(1024, 256, 1),
            nn.Upsample(size=(self.target_h, self.target_w)),
            nn.Conv2d(256, 256, 3, padding=1)
        )

        # 最终输出模块
        self.head = nn.Sequential(
            nn.Conv2d(256 + 128 + 64, 256, 3, padding=1),
            nn.ReLU(),
            nn.Upsample(scale_factor=4, mode='bilinear'),
            nn.Conv2d(256, 1, 1)
        )

        # 其余代码保持不变...


    def forward(self, x):
        # Encoder路径
        x0 = self.encoder.stem(x)  # [b,64,h/4,w/4]
        x1 = self.encoder.layer1(x0)  # [b,64,h/4,w/4]
        x2 = self.encoder.layer2(x1)  # [b,128,h/8,w/8]
        x3 = self.encoder.layer3(x2)  # [b,256,h/16,w/16]
        x4 = self.encoder.layer4(x3)  # [b,512,h/32,w/32]

        # Transformer处理
        b, c, h, w = x4.shape
        x4 = self.encoder.pos_enc(x4)
        x_trans = x4.view(b, c, -1).permute(2, 0, 1)
        x_trans = self.encoder.transformer(x_trans)
        x_trans = x_trans.permute(1, 2, 0).view(b, c, h, w)

        # ASPP处理
        x_aspp = self.encoder.aspp(x_trans)  # [b,1024,h/32,w/32]

        # 统一所有特征图到目标尺寸
        x_main = self.aspp_adapter(x_aspp)  # [b,256,target_h,target_w]
        s1 = self.skip_conv1(x3)  # [b,128,target_h,target_w]
        s2 = self.skip_conv2(x2)  # [b,64,target_h,target_w]

        # 通道维度合并
        merged = torch.cat([x_main, s1, s2], dim=1)  # [b,448,target_h,target_w]

        return self.head(merged)


# -------------------- 损失函数 --------------------
class EdgeAwareLoss(nn.Module):
    """支持混合精度训练的边缘感知损失函数"""

    def __init__(self, device='cuda'):
        super().__init__()
        # 初始化Sobel算子（显式指定dtype）
        self.sobel_x = nn.Conv2d(1, 1, kernel_size=3, padding=1, bias=False).to(device)
        self.sobel_y = nn.Conv2d(1, 1, kernel_size=3, padding=1, bias=False).to(device)

        # 初始化固定权重
        self._init_sobel_weights()

        # 锁定参数
        for param in self.parameters():
            param.requires_grad_(False)
            param.data = param.data.to(torch.float32)  # 显式保持float32

    def _init_sobel_weights(self):
        """初始化Sobel卷积核"""
        sobel_kernel_x = torch.tensor([
            [[[-1, 0, 1],
              [-2, 0, 2],
              [-1, 0, 1]]]
        ], dtype=torch.float32)  # 保持float32

        sobel_kernel_y = torch.tensor([
            [[[1, 2, 1],
              [0, 0, 0],
              [-1, -2, -1]]]
        ], dtype=torch.float32)

        self.sobel_x.weight.data = sobel_kernel_x.to(self.sobel_x.weight.device)
        self.sobel_y.weight.data = sobel_kernel_y.to(self.sobel_y.weight.device)

    def forward(self, pred, target):
        """
        前向传播（显式类型转换）
        参数:
            pred (Tensor): 模型预测值 [B,1,H,W] (可能是float16)
            target (Tensor): 真实标签 [B,1,H,W] (float32)
        """
        # 确保在float32下计算边缘检测
        with torch.cuda.amp.autocast(enabled=False):  # 禁用自动混合精度
            # 转换输入到float32
            pred_float32 = pred.float()
            target_float32 = target.float()

            # 基础交叉熵损失
            bce_loss = F.binary_cross_entropy_with_logits(pred_float32, target_float32)

            # 目标边缘检测
            edge_x = self.sobel_x(target_float32)
            edge_y = self.sobel_y(target_float32)
            target_edges = torch.sqrt(edge_x ** 2 + edge_y ** 2 + 1e-6)

            # 预测边缘检测
            pred_sigmoid = torch.sigmoid(pred_float32)
            pred_edge_x = self.sobel_x(pred_sigmoid)
            pred_edge_y = self.sobel_y(pred_sigmoid)
            pred_edges = torch.sqrt(pred_edge_x ** 2 + pred_edge_y ** 2 + 1e-6)

            # 边缘L1损失
            edge_loss = F.l1_loss(pred_edges, target_edges)

            # 组合损失
            total_loss = 0.7 * bce_loss + 0.3 * edge_loss

        return total_loss

# -------------------- 训练流程 --------------------
def train():
    train_files, val_files = TuSimpleDataset.split_dataset(config['data_root'])
    train_set = TuSimpleDataset(config['data_root'], train_files, train_transform)
    val_set = TuSimpleDataset(config['data_root'], val_files, train_transform)

    train_loader = DataLoader(train_set, batch_size=config['batch_size'], shuffle=True,
                              num_workers=config['num_workers'], pin_memory=True)
    val_loader = DataLoader(val_set, batch_size=config['batch_size'],
                            num_workers=config['num_workers'], pin_memory=True)

    model = LaneDetectionNet().to(config['device'])
    criterion = EdgeAwareLoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=config['lr'],
                                  weight_decay=config['weight_decay'])
    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=config['lr'] * 1.2,
        steps_per_epoch=len(train_loader),
        epochs=config['epochs'],
        pct_start=0.3
    )
    scaler = torch.cuda.amp.GradScaler()

    best_loss = float('inf')
    patience_counter = 0

    for epoch in range(config['epochs']):
        if epoch < config['warmup_epochs']:
            for param_group in optimizer.param_groups:
                param_group['lr'] = config['lr'] * (epoch + 1) / config['warmup_epochs']

        model.train()
        train_loss = 0
        train_bar = tqdm(train_loader, desc=f'Train Epoch {epoch + 1}')

        for images, labels in train_bar:
            images = images.to(config['device'])
            labels = labels.to(config['device'])

            smooth_labels = labels * (1 - config['label_smooth']) + 0.5 * config['label_smooth']

            optimizer.zero_grad()

            with torch.cuda.amp.autocast():
                outputs = model(images)
                loss = criterion(outputs, smooth_labels)

            scaler.scale(loss).backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), config['grad_clip'])
            scaler.step(optimizer)
            scaler.update()
            scheduler.step()

            train_loss += loss.item() * images.size(0)
            train_bar.set_postfix({'loss': f"{loss.item():.4f}"})

        model.eval()
        val_loss = 0
        val_bar = tqdm(val_loader, desc='Validating')

        with torch.no_grad():
            for images, labels in val_bar:
                images = images.to(config['device'])
                labels = labels.to(config['device'])

                outputs = model(images)
                loss = criterion(outputs, labels)
                val_loss += loss.item() * images.size(0)
                val_bar.set_postfix({'val_loss': f"{loss.item():.4f}"})

        train_loss /= len(train_set)
        val_loss /= len(val_set)

        print(f"\nEpoch {epoch + 1}/{config['epochs']}")
        print(f"Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f}")

        if val_loss < best_loss:
            best_loss = val_loss
            patience_counter = 0
            torch.save(model.state_dict(), 'best_model.pth')
        else:
            patience_counter += 1
            if patience_counter >= config['patience']:
                print("Early stopping!")
                break

# -------------------- 可视化 --------------------
# 修改后的可视化函数
def visualize_results(model, image_path):
    transform = transforms.Compose([
        transforms.Resize(config['input_size']),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    orig_img = Image.open(image_path).convert('RGB')
    input_tensor = transform(orig_img).unsqueeze(0).to(config['device'])

    with torch.no_grad():
        output = model(input_tensor)
        pred = torch.sigmoid(output)
        prob_map = pred.squeeze().cpu().numpy()

    # 打印关键信息
    print("概率图范围 - 最小值: {:.4f}, 最大值: {:.4f}".format(prob_map.min(), prob_map.max()))

    # 动态计算阈值（示例：最大值的25%）
    if prob_map.max() > 0:
        dynamic_thresh = 0.16 * prob_map.max()
    else:
        dynamic_thresh = 0.0

    # 可视化
    plt.figure(figsize=(18, 6))

    # 原始图像
    plt.subplot(1, 3, 1)
    plt.imshow(orig_img)
    plt.title("Original Image")

    # 概率图
    plt.subplot(1, 3, 2)
    plt.imshow(prob_map, cmap='jet', vmin=0, vmax=1)
    plt.colorbar()
    plt.title("Probability Map")

    # 二值化掩膜
    plt.subplot(1, 3, 3)
    binary_mask = prob_map > dynamic_thresh
    plt.imshow(binary_mask, cmap='gray')
    plt.title(f"Threshold={dynamic_thresh:.2f}")

    plt.show()




if __name__ == '__main__':
    train()

    model = LaneDetectionNet().to(config['device'])
    model.load_state_dict(torch.load('best_model.pth'))
    test_img = os.path.join(config['data_root'], 'clips/0313-1/6040/20.jpg')
    visualize_results(model, test_img)