import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import copy, time
import util

epoch=10
batch_size=64

train_time:float
test_time:float

class Task3DataLoader(DataLoader):
    def __init__(self, data, labels):
        self.data = data
        self.labels = labels

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return  self.data[idx], self.labels[idx]


class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv_layers = nn.Sequential(
            nn.Conv2d(1, 32, 3),  # [32, 30, 30]
            nn.ReLU(),
            nn.MaxPool2d(2),      # [32, 15, 15]
            nn.Conv2d(32, 64, 3), # [64, 13, 13]
            nn.ReLU(),
            nn.MaxPool2d(2)       # [64, 6, 6]
        )
        self.fc_layers = nn.Sequential(
            nn.Linear(64 * 6 * 6, 128),
            nn.ReLU(),
            nn.Linear(128, 10)
        )

    def forward(self, x):
        x:torch.Tensor = self.conv_layers(x)
        x = x.view(x.size(0), -1)
        return self.fc_layers(x)
    
def train(model:nn.Module,train_loader:DataLoader,validate_loader:DataLoader,optimizer,criterion,epoch:int,device):
    model=model.to(device)
    best:nn.Module=None
    best_acc=0.0
    start_time=time.time()
    for epoch in range(epoch):
        model.train()
        for data, labels in train_loader:
            data=data.reshape((-1,1,*util.resize_size))
            data, labels = data.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(data)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

        # 评估
        model.eval()
        correct = 0
        total = 0
        with torch.no_grad():
            for data, labels in validate_loader:
                data=data.reshape((-1,1,*util.resize_size))
                data, labels = data.to(device), labels.to(device)
                outputs = model(data)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
        
        acc = 100 * correct / total
        print(f"Epoch {epoch+1}: Test Accuracy {acc:.2f}%")
        if acc>best_acc:
            best_acc=acc
            best=copy.deepcopy(model)

    global train_time
    train_time=time.time()-start_time

    return best

def test(model:nn.Module,test_loader:DataLoader,device):
    model=model.to(device)
    model.eval()
    prediction=[]
    true_labels=[]
    start_time=time.time()
    with torch.no_grad():
        for data,labels in test_loader:
            data=data.reshape((-1,1,*util.resize_size)).to(device)
            outputs = model(data)
            _, predicted = torch.max(outputs.data, 1)
            prediction.append(predicted.cpu().numpy())
            true_labels.append(labels.cpu().numpy())
    
    global test_time
    test_time=time.time()-start_time
    util.show_result(np.concatenate(true_labels),np.concatenate(prediction))

def save(model:nn.Module,path:str):
    torch.save(model,path)

def load(path:str):
    return torch.load(path)

def launch_train():
    # 加载数据
    train_data, train_labels = util.load_data_tensor("number_data/training/")
    test_data, test_labels = util.load_data_tensor("number_data/testing/")
    
    train_dataset = Task3DataLoader(train_data, train_labels)
    test_dataset = Task3DataLoader(test_data, test_labels)

    # 创建DataLoader
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)

    # 初始化模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = CNN()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    return train(model,train_loader,test_loader,optimizer,criterion,epoch,device)

def launch_test(model:nn.Module):
    # 加载数据
    test_data, test_labels = util.load_data_tensor("number_data/testing/")
    test_dataset = Task3DataLoader(test_data, test_labels)

    # 创建DataLoader
    test_loader = DataLoader(test_dataset, batch_size=batch_size)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    test(model,test_loader,device)

def report_time():
    print(f"task3 trainning cost {train_time}s")
    print(f"task3 testing cost {test_time}s")

def main():
    res_model=launch_train()
    launch_test(res_model)
    save(res_model,"result/task3/task3.pt")
    report_time()


if __name__ == "__main__":
    main()