import sys
import os
import time
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import torch.optim as optim
import torch.nn as nn
from witin_nn import GlobalConfigFactory, LayerConfigFactory
from witin_nn import WitinConv2d, WitinBatchNorm2d, WitinGELU, WitinLinear, HandleNegInType

# 设置训练参数
train_batch_size = 64
test_batch_size = 64
num_workers = 2  # 线程数
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
lr = 0.001
momentum = 0.9
num_epochs = 10  # 训练的总 epoch 数

# 加载数据集
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
train_dataset = torchvision.datasets.CIFAR10('../data/CIFAR-10', train=True, transform=transform, download=True)
test_dataset = torchvision.datasets.CIFAR10('../data/CIFAR-10', train=False, transform=transform, download=True)
train_loader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=num_workers)
test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=False, num_workers=num_workers)

# 数据可视化
def imshow(img):
    img = img / 2 + 0.5
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()

examples = enumerate(train_loader)
idx, (examples_data, examples_target) = next(examples)
imshow(torchvision.utils.make_grid(examples_data))

# 构建网络（使用Witin算子版本）
class CNNNet(nn.Module):
    def __init__(self):
        super(CNNNet, self).__init__()
        
        # 卷积层配置
        config_conv1 = LayerConfigFactory.get_default_config()
        config_conv1.use_quantization = True
        #config_conv1.use_quantization = False
        config_conv1.noise_level = 4
        config_conv1.x_quant_bit = 8
        config_conv1.y_quant_bit = 8
        config_conv1.scale_x = 16
        config_conv1.scale_y = 16
        config_conv1.scale_weight = 16
        config_conv1.use_auto_scale = True
        config_conv1.handle_neg_in = HandleNegInType.PN
        self.conv1 = WitinConv2d(3, 16, 5, stride=1, layer_config=config_conv1)

        config_bn1 = LayerConfigFactory.get_default_config()
        config_bn1.use_quantization = True
        #config_bn1.use_quantization = False
        config_bn1.noise_level = 4
        config_bn1.x_quant_bit = 8
        config_bn1.y_quant_bit = 8
        config_bn1.scale_x = 16
        config_bn1.scale_y = 16
        config_bn1.scale_weight = 16
        config_bn1.use_auto_scale = True
        config_bn1.handle_neg_in = HandleNegInType.PN
        self.bn1 = WitinBatchNorm2d(16, layer_config=config_bn1)
        
        # 下采样层配置
        config_down1 = LayerConfigFactory.get_default_config()
        config_down1.use_quantization = True
        #config_down1.use_quantization = False
        config_down1.noise_level = 4
        config_down1.x_quant_bit = 8
        config_down1.y_quant_bit = 8
        config_down1.scale_x = 16
        config_down1.scale_y = 16
        config_down1.scale_weight = 16
        config_down1.use_auto_scale = True
        config_down1.handle_neg_in = HandleNegInType.PN
        self.down1 = WitinConv2d(16, 16, 2, stride=2, layer_config=config_down1)
        
        # 第二卷积层配置
        config_conv2 = LayerConfigFactory.get_default_config()
        config_conv2.use_quantization = True
        #config_conv2.use_quantization = False
        config_conv2.noise_level = 4
        config_conv2.x_quant_bit = 8
        config_conv2.y_quant_bit = 8
        config_conv2.scale_x = 16
        config_conv2.scale_y = 16
        config_conv2.scale_weight = 16
        config_conv2.use_auto_scale = True
        config_conv2.handle_neg_in = HandleNegInType.PN
        self.conv2 = WitinConv2d(16, 36, 3, stride=1, layer_config=config_conv2)

        config_bn2 = LayerConfigFactory.get_default_config()
        config_bn2.use_quantization = True
        #config_bn2.use_quantization = False
        config_bn2.noise_level = 4
        config_bn2.x_quant_bit = 8
        config_bn2.y_quant_bit = 8
        config_bn2.scale_x = 16
        config_bn2.scale_y = 16
        config_bn2.scale_weight = 16
        config_bn2.use_auto_scale = True
        config_bn2.handle_neg_in = HandleNegInType.PN
        self.bn2 = WitinBatchNorm2d(36, layer_config=config_bn2)
        
        # 第二下采样层配置
        config_down2 = LayerConfigFactory.get_default_config()
        config_down2.use_quantization = True
        #config_down2.use_quantization = False
        config_down2.noise_level = 4
        config_down2.x_quant_bit = 8
        config_down2.y_quant_bit = 8
        config_down2.scale_x = 16
        config_down2.scale_y = 16
        config_down2.scale_weight = 16
        config_down2.use_auto_scale = True
        config_down2.handle_neg_in = HandleNegInType.PN
        self.down2 = WitinConv2d(36, 36, 2, stride=2, layer_config=config_down2)
        
        # 全连接层配置
        config_fc1 = LayerConfigFactory.get_default_config()
        config_fc1.use_quantization = False
        config_fc1.x_quant_bit = 8
        config_fc1.y_quant_bit = 8
        config_fc1.scale_x = 16
        config_fc1.scale_y = 16
        config_fc1.scale_weight = 16
        config_fc1.use_auto_scale = True
        config_fc1.handle_neg_in = HandleNegInType.PN
        self.fc1 = WitinLinear(36 * 6 * 6, 128, layer_config=config_fc1)
        
        config_fc2 = LayerConfigFactory.get_default_config()
        config_fc2.use_quantization = False
        config_fc2.x_quant_bit = 8
        config_fc2.y_quant_bit = 8
        config_fc2.scale_x = 16
        config_fc2.scale_y = 16
        config_fc2.scale_weight = 16
        config_fc2.use_auto_scale = True
        config_fc2.handle_neg_in = HandleNegInType.PN
        self.fc2 = WitinLinear(128, 10, layer_config=config_fc2)
        
        # 激活函数
        config_gelu = LayerConfigFactory.get_default_config()
        config_gelu.use_quantization = True
        #config_gelu.use_quantization = False
        config_gelu.noise_level = 4
        config_gelu.x_quant_bit = 8
        config_gelu.y_quant_bit = 8
        config_gelu.scale_x = 16
        config_gelu.scale_y = 16
        config_gelu.scale_weight = 16
        config_gelu.use_auto_scale = True
        config_gelu.handle_neg_in = HandleNegInType.PN
        self.gelu = WitinGELU(layer_config=config_gelu)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.gelu(x)
        x = self.down1(x)
        
        x = self.conv2(x)
        x = self.bn2(x)
        x = self.gelu(x)
        x = self.down2(x)
        
        x = x.view(-1, 36 * 6 * 6)
        x = self.fc1(x)
        x = self.gelu(x)
        x = self.fc2(x)
        return x

# 初始化模型
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = CNNNet().to(device)
print('--------------查看网络结构-----------')
print(model)

# 计算模型的大小
def get_model_size(model, quantized=False):
    total_params = 0
    for param in model.parameters():
        if param.requires_grad:
            total_params += param.numel()  # 每个参数的元素个数
            
    # 量化后的参数为8位（1字节），否则为32位（4字节）
    if quantized:
        model_size_bytes = total_params  # 每个参数占用1字节（量化后）
    else:
        model_size_bytes = total_params * 4  # 每个参数占用4字节（float32）
    
    model_size_mb = model_size_bytes / (1024 ** 2)  # 转换为MB
    return model_size_mb

# 打印量化前后的模型大小
print(f"量化前模型大小: {get_model_size(model, quantized=False):.2f} MB")
print(f"量化后模型大小: {get_model_size(model, quantized=True):.2f} MB")

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)

# 训练和测试函数
def train(model, train_loader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    for inputs, labels in train_loader:
        inputs, labels = inputs.to(device), labels.to(device)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item()
        _, predicted = outputs.max(1)
        total += labels.size(0)
        correct += predicted.eq(labels).sum().item()
    
    train_loss = running_loss / len(train_loader)
    train_acc = 100. * correct / total
    return train_loss, train_acc

def test(model, test_loader, criterion, device):
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            
            running_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()
    
    test_loss = running_loss / len(test_loader)
    test_acc = 100. * correct / total
    return test_loss, test_acc

# 训练和测试循环
train_losses, train_accs = [], []
test_losses, test_accs = [], []

for epoch in range(num_epochs):
    train_loss, train_acc = train(model, train_loader, criterion, optimizer, device)
    test_loss, test_acc = test(model, test_loader, criterion, device)
    
    train_losses.append(train_loss)
    train_accs.append(train_acc)
    test_losses.append(test_loss)
    test_accs.append(test_acc)
    
    print(f'Epoch {epoch + 1}/{num_epochs}: '
          f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%, '
          f'Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%')

# 保存模型
torch.save(model.state_dict(), 'cifar10_cnn.pth')
print("模型已保存为 cifar10_cnn.pth")

# 推理延时测试
def measure_inference_time(model, test_loader, device, num_runs=100):
    """测量模型在测试集上的平均推理延时"""
    model.eval()
    total_time = 0.0
    
    with torch.no_grad():
        for i, (inputs, _) in enumerate(test_loader):
            if i >= num_runs:  # 只测试前num_runs个batch
                break
            inputs = inputs.to(device)
            
            # 开始计时
            start_time = time.time()
            _ = model(inputs)
            end_time = time.time()
            
            total_time += (end_time - start_time)
    
    avg_time = total_time / num_runs * 1000  # 转换为毫秒
    print(f"平均推理延时: {avg_time:.2f} ms")
    return avg_time

# 抗噪性测试
def add_noise_to_data(data, noise_type="gaussian", noise_level=0.1):
    """向数据中添加噪声"""
    if noise_type == "gaussian":
        noise = torch.randn_like(data) * noise_level
    elif noise_type == "salt_pepper":
        noise = torch.rand_like(data)
        noise = (noise > 0.5).float() - (noise < 0.5).float()
        noise *= noise_level
    else:
        raise ValueError("Unsupported noise type")
    
    noisy_data = data + noise
    return torch.clamp(noisy_data, 0, 1)  # 将数据限制在[0, 1]范围内

def test_with_noise(model, test_loader, criterion, device, noise_type="gaussian", noise_level=0.1):
    """在测试集上加入噪声并评估模型性能"""
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            
            # 向输入数据中添加噪声
            noisy_inputs = add_noise_to_data(inputs, noise_type, noise_level)
            
            outputs = model(noisy_inputs)
            loss = criterion(outputs, labels)
            
            running_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()
    
    test_loss = running_loss / len(test_loader)
    test_acc = 100. * correct / total
    print(f"噪声类型: {noise_type}, 噪声强度: {noise_level}, 测试准确率: {test_acc:.2f}%")
    return test_loss, test_acc

import torch.nn.utils.prune as prune

# 剪枝操作
def prune_model(model, pruning_amount=0.2):
    """对模型进行剪枝，剪去一定比例的连接"""
    # 对卷积层进行剪枝
    for name, module in model.named_modules():
        if isinstance(module, WitinConv2d):
            prune.l1_unstructured(module, name="weight", amount=pruning_amount)
        if isinstance(module, WitinLinear):
            prune.l1_unstructured(module, name="weight", amount=pruning_amount)
    
    # 在剪枝后移除剪枝掩码以简化后续操作
    for name, module in model.named_modules():
        if isinstance(module, WitinConv2d):
            prune.remove(module, 'weight')
        if isinstance(module, WitinLinear):
            prune.remove(module, 'weight')
    
    return model

# 测量推理时间的函数
def measure_inference_time(model, test_loader, device, num_runs=100):
    """测量模型在测试集上的平均推理延时"""
    model.eval()
    total_time = 0.0
    
    with torch.no_grad():
        for i, (inputs, _) in enumerate(test_loader):
            if i >= num_runs:  # 只测试前num_runs个batch
                break
            inputs = inputs.to(device)
            
            # 开始计时
            start_time = time.time()
            _ = model(inputs)
            end_time = time.time()
            
            total_time += (end_time - start_time)
    
    avg_time = total_time / num_runs * 1000  # 转换为毫秒
    print(f"平均推理延时: {avg_time:.2f} ms")
    return avg_time

# 在训练结束后调用以下函数
print("----------推理延时测试----------")
measure_inference_time(model, test_loader, device)

print("----------剪枝前推理延时测试----------")
original_inference_time = measure_inference_time(model, test_loader, device)

# 剪枝模型
pruned_model = prune_model(model, pruning_amount=0.2)

# 计算剪枝后的推理延时
print("----------剪枝后推理延时测试----------")
pruned_inference_time = measure_inference_time(pruned_model, test_loader, device)

# 输出推理延时结果
print(f"剪枝前的推理延时: {original_inference_time:.2f} ms")
print(f"剪枝后的推理延时: {pruned_inference_time:.2f} ms")

print("----------抗噪性测试----------")
noise_types = ["gaussian", "salt_pepper"]
noise_levels = [0.05, 0.1, 0.2]

for noise_type in noise_types:
    for noise_level in noise_levels:
        print(f"测试噪声类型: {noise_type}, 噪声强度: {noise_level}")
        test_with_noise(model, test_loader, criterion, device, noise_type, noise_level)
