#my_net run ok
#get 97.6% test accuracy

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import time
import torch.nn.functional as F

# 超参数
input_size = 784  # 28x28
hidden_size = 500
num_classes = 10
num_epochs = 20
batch_size = 256
learning_rate = 0.02

class DivActivation(nn.Module):
    def __init__(self, divisor):
        super(DivActivation, self).__init__()
        self.divisor = divisor

    def forward(self, x):
            return x / self.divisor

def l2_regularization(model, l2_alpha=0.01):
    l2_loss = []
    for module in model.modules():
        if type(module) is nn.Linear:
            l2_loss.append((module.weight ** 2).sum() / 2.0)
    return l2_alpha * sum(l2_loss)


class Quantize(nn.Module):
    def __init__(self, num_bits=8):
        super(Quantize, self).__init__()
        self.num_bits = num_bits
        self.register_buffer('scale', torch.tensor(0.0))

    def forward(self, x):
        qmin = -(2. ** (self.num_bits - 1))
        qmax = 2. ** (self.num_bits - 1) - 1

        max_val = x.abs().max()
        self.scale = qmax / max_val  # 改变为乘法缩放

        q_x = (x * self.scale).round().clamp(qmin, qmax)  # 量化操作变为乘法
        return q_x

    def quantize(self, x):
        q_x = (x * self.scale).round().clamp(-(2. ** (self.num_bits - 1)), 2. ** (self.num_bits - 1) - 1)
        return q_x

    def dequantize(self, q_x):
        x = q_x / self.scale  # 反量化操作变为除法
        return x

    def quan_loss(self, x):
        qmin = -(2. ** (self.num_bits - 1))
        qmax = 2. ** (self.num_bits - 1) - 1

        max_val = x.abs().max()
        self.scale = qmax / max_val  # 缩放因子

        q_x = (x * self.scale).round().clamp(qmin, qmax)  # 量化操作

        # 计算量化损失，这里采用均方误差损失
        loss_fn = nn.MSELoss()
        quantization_loss = loss_fn(x*self.scale , q_x )

        return quantization_loss


# 检查是否可以使用 GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# MNIST 数据集
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((1.0 / 255,), (1.0 / 255,))
])

train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transform, download=True)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)

test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transform, download=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)


# 定义仅使用全连接层和激活层的神经网络
class MLP(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes, num_bits=8):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.quantize1 = Quantize(num_bits)  # 第一层权重量化
        self.div1 = DivActivation(10000)  # Custom activation (division by 10000)
        self.fc2 = nn.Linear(hidden_size, num_classes)
        self.quantize2 = Quantize(num_bits)  # 第二层权重量化
        self.div2 = DivActivation(10000)  # Custom activation (division by 10000)
        self.num_bits = num_bits
        self.relu = nn.ReLU()

    def forward(self, x):
        x = x.view(x.size(0), -1)  # 将输入展平为(batch_size, 784)

        x = self.fc1(x)
        x = self.div1(x)  # Apply division-based activation

        x = self.fc2(x)
        x = self.div1(x)  # Apply division-based activation


        return x

    def quantize(self):
        # Quantize weights (assuming this method is for weight quantization)
        self.fc1.weight.data = self.quantize1(self.fc1.weight)
        self.fc2.weight.data = self.quantize2(self.fc2.weight)

    def quan_loss(self):
        loss1 = self.quantize1.quan_loss(self.fc1.weight)
        loss2 = self.quantize2.quan_loss(self.fc2.weight)
        return loss1 + loss2


model = MLP(input_size, hidden_size, num_classes).to(device)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型
print(model.quan_loss() )
model.quantize()
for epoch in range(num_epochs):
    start_time = time.time()  # 记录轮次开始时间
    model.train()
    correct = 0
    total = 0
    l2loss = 0
    for i, (images, labels) in enumerate(train_loader):
        if (epoch % 5 == 0 and i==0 and epoch != 0):
            learning_rate = learning_rate
            optimizer = optim.Adam(model.parameters(), lr=learning_rate)

        # if (epoch % 6 == 0 and i == 0 ):
        #     weight1 = model.fc1.weight.data
        #     weight2 = model.fc2.weight.data
        #     ss = 0
        images, labels = images.to(device), labels.to(device)  # 将数据移动到 GPU
        # print(i)
        outputs = model(images)
        quan_loss = model.quan_loss()
        loss = criterion(outputs, labels) #+ quan_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()
    if (epoch % 2 == 0  and epoch != 0):
        model.quantize()
        # weight1 = model.fc1.weight.data
        # weight2 = model.fc2.weight.data
        # ss = 100

    train_accuracy = 100 * correct / total
    print(f'Epoch [{epoch + 1}/{num_epochs}], Training Accuracy: {train_accuracy:.2f}% loss:{loss}')
    # print(f"Epoch [{epoch + 1}/{num_epochs}], L2 Loss: {l2loss.item() if l2loss != 0 else 0}")
    print(f"Epoch [{epoch + 1}/{num_epochs}], model.quan_loss(): {model.quan_loss()}")

    # 测试模型
    if (epoch % (num_epochs - 1) == 0  and epoch != 0):
        print("sss")
        model.quantize()
    # model.quantize()
    model.eval()
    # model.quantize()
    correct = 0
    total = 0
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)  # 将数据移动到 GPU
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    test_accuracy = 100 * correct / total
    end_time = time.time()  # 记录轮次结束时间
    epoch_time = end_time - start_time  # 计算轮次时间
    print(f'Epoch [{epoch + 1}/{num_epochs}], Test Accuracy: {test_accuracy:.2f}%, Time: {epoch_time:.2f} seconds')

print("Training Complete")
print(model.quan_loss())

weight1 = model.fc1.weight.data
weight2 = model.fc2.weight.data
# print(weight1,weight2)
ss = 100
