import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import os
import numpy as np
from tqdm import tqdm
import math
import random


# 网络构建函数 (保持不变)
def make_net(nplanes_in, kernels, features, bns, acts, dilats, bn_momentum=0.1, padding=1):
    depth = len(features)
    layers = []

    for i in range(depth):
        if i == 0:
            in_feats = nplanes_in
        else:
            in_feats = features[i - 1]

        # 卷积层
        conv = nn.Conv2d(in_feats, features[i], kernel_size=kernels[i],
                         dilation=dilats[i], padding=padding, bias=not bns[i])
        # 初始化
        n = kernels[i] * kernels[i] * features[i]
        conv.weight.data.normal_(0, math.sqrt(2. / n))
        layers.append(conv)

        # 批量归一化
        if bns[i]:
            bn = nn.BatchNorm2d(features[i], momentum=bn_momentum)
            n = kernels[i] * kernels[i] * features[i]
            bn.weight.data.normal_(0, math.sqrt(2. / n))
            bn.bias.data.zero_()
            layers.append(bn)

        # 激活函数
        if acts[i] == 'relu':
            layers.append(nn.ReLU(inplace=True))
        elif acts[i] == 'linear':
            pass  # 无激活

    return nn.Sequential(*layers)


# InfoNCE对比损失 (保持不变)
class InfoNCELoss(nn.Module):
    def __init__(self, temperature=0.1):
        super().__init__()
        self.temperature = temperature
        self.cross_entropy = nn.CrossEntropyLoss()

    def forward(self, features):
        batch_size = features.shape[0]
        features = nn.functional.normalize(features, dim=1)

        # 相似度矩阵
        sim_matrix = torch.matmul(features, features.T) / self.temperature

        # 对角线是正样本对
        labels = torch.arange(batch_size, device=features.device)

        return self.cross_entropy(sim_matrix, labels)


# 新的数据集类 - 直接从目录加载所有图像
class SimpleImageDataset(Dataset):
    def __init__(self, root_dir, patch_size=64):
        self.root_dir = root_dir
        self.patch_size = patch_size
        self.image_files = [os.path.join(root_dir, f) for f in os.listdir(root_dir)
                            if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp'))]
        self.to_tensor = transforms.ToTensor()

        if not self.image_files:
            raise RuntimeError(f"No images found in {root_dir}")

    def __len__(self):
        return len(self.image_files)

    def __getitem__(self, idx):
        # 加载图像
        img = Image.open(self.image_files[idx]).convert('RGB')

        # 随机裁剪参数
        i, j, h, w = transforms.RandomCrop.get_params(
            img, output_size=(self.patch_size, self.patch_size))

        # 两个增强视图
        img1 = transforms.functional.crop(img, i, j, h, w)
        img2 = transforms.functional.crop(img, i, j, h, w)

        # 随机调整亮度/对比度
        brightness = np.random.uniform(0.8, 1.2)
        contrast = np.random.uniform(0.8, 1.2)
        img1 = transforms.functional.adjust_brightness(img1, brightness)
        img1 = transforms.functional.adjust_contrast(img1, contrast)

        brightness = np.random.uniform(0.8, 1.2)
        contrast = np.random.uniform(0.8, 1.2)
        img2 = transforms.functional.adjust_brightness(img2, brightness)
        img2 = transforms.functional.adjust_contrast(img2, contrast)

        return self.to_tensor(img1), self.to_tensor(img2)


# 训练函数 (保持不变)
def train(model, loader, optimizer, criterion, device, epoch):
    model.train()
    total_loss = 0

    pbar = tqdm(loader, desc=f'Epoch {epoch}')
    for img1, img2 in pbar:
        img1, img2 = img1.to(device), img2.to(device)

        optimizer.zero_grad()

        # 前向传播
        feat1 = model(img1).flatten(1)  # [B, C*H*W]
        feat2 = model(img2).flatten(1)

        # 合并特征计算对比损失
        features = torch.cat([feat1, feat2], dim=0)
        loss = criterion(features)

        # 反向传播
        loss.backward()
        optimizer.step()

        total_loss += loss.item()
        pbar.set_postfix({'loss': total_loss / (pbar.n + 1)})

    return total_loss / len(loader)


def main():
    # 配置参数
    config = {
        'data_path': '/home/wc/disk1/DocTamper/DocTamperV1/DocTamperV1-TrainingSet/img',
        'batch_size': 64,
        'patch_size': 64,
        'epochs': 50,
        'lr': 1e-3,
        'temperature': 0.1,
        'num_levels': 15,  # 15层网络
        'out_channel': 1  # 输出通道数
    }

    # 设备设置
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'Using device: {device}')

    # 检查数据路径是否存在
    if not os.path.exists(config['data_path']):
        raise FileNotFoundError(f"Data path not found: {config['data_path']}")

    # 创建网络
    model = make_net(
        nplanes_in=3,
        kernels=[3] * config['num_levels'],
        features=[64] * (config['num_levels'] - 1) + [config['out_channel']],
        bns=[False] + [True] * (config['num_levels'] - 2) + [False],
        acts=['relu'] * (config['num_levels'] - 1) + ['linear'],
        dilats=[1] * config['num_levels'],
        bn_momentum=0.1,
        padding=1
    ).to(device)

    # 数据加载 - 使用新的SimpleImageDataset
    try:
        dataset = SimpleImageDataset(config['data_path'], config['patch_size'])
        print(f"Found {len(dataset)} images in dataset")
    except Exception as e:
        print(f"Error loading dataset: {e}")
        return

    loader = DataLoader(
        dataset,
        batch_size=config['batch_size'],
        shuffle=True,
        num_workers=4,
        pin_memory=True
    )

    # 优化器和损失函数
    optimizer = optim.Adam(model.parameters(), lr=config['lr'])
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5)
    criterion = InfoNCELoss(config['temperature'])

    # 训练循环
    best_loss = float('inf')
    for epoch in range(1, config['epochs'] + 1):
        loss = train(model, loader, optimizer, criterion, device, epoch)
        scheduler.step(loss)

        # 保存最佳模型
        if loss < best_loss:
            best_loss = loss
            torch.save(model.state_dict(), 'best_dncnn.pth')
            print(f'New best model saved with loss: {best_loss:.4f}')

        # 每5个epoch保存一次
        if epoch % 5 == 0:
            torch.save(model.state_dict(), f'dncnn_epoch_{epoch}.pth')

    print('Training completed')


if __name__ == '__main__':
    main()