import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import ToTensor, Resize, Compose, Normalize
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
import os
import matplotlib.pyplot as plt

class config:
    device = "cuda:0"             # 使用GPU or CPU.　GPU能够显著提高运算速度，本实验大概仅需要1G显存
    img_size = (128, 128, 3)      # 输入网络的图像尺寸，需要先进行Resize()
    batch_size = 32               # batch大小，即一次输入网络的图片数量
    num_workers = 0               # 数据加载时使用的线程数。windows系统只能设置为0
    learning_rate = 0.005         # 学习率
    epochs = 30                   # 迭代次数
    label2idx = ["bulbasaur", "charmander", "mewtwo", "pikachu", "squirtle"]

class PokemonDataset(Dataset):
    def __init__(self, dataset_path, mode="train"):
        assert mode in ["train", "val", "test"], "The dataset has no such mode."

        self.transform = Compose([
            Resize(config.img_size[:-1]), # Resize
            ToTensor(),                   # 归一化并转换为torch的数据类型Tensor
            Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225]) # 将数据转换为标准正态分布，容易收敛
        ])
        self.dataset = dataset_path
        self.img_path_ls = []
        self.img_label_ls = []
        path = os.path.join(dataset_path, mode+".txt")
        with open(path,"r",encoding="utf-8") as fp:
            for line in fp:
                img_path,label = line.strip().split()
                self.img_path_ls.append(img_path)
                self.img_label_ls.append(eval(label))
    
    def __len__(self):
        return len(self.img_label_ls)

    def __getitem__(self,idx):
        img = self.transform(Image.open(os.path.join(self.dataset, self.img_path_ls[idx])))
        label = torch.tensor(self.img_label_ls[idx])
        return img, label

class ClassifyNetWork(nn.Module):
    def __init__(self):
        super().__init__()
        # (3, 128, 128) -> (32, 128, 128)
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(1,1),stride=1,padding=0),
            nn.BatchNorm2d(32),
            nn.LeakyReLU(0.2)
        )
        # (32, 128, 128) -> (64, 64, 64)
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels=32, out_channels= 64, kernel_size=(3,3),stride=1,padding=1),
            nn.MaxPool2d(2),
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.2)
        )

        # (64, 64, 64) -> (128, 32, 32)
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels= 128, kernel_size=(3,3),stride=1,padding=1),
            nn.MaxPool2d(2),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2)
        )

        # (128, 32, 32) -> (256, 16, 16)
        self.conv4 = nn.Sequential(
            nn.Conv2d(in_channels=128, out_channels= 256, kernel_size=(3,3),stride=1,padding=1),
            nn.MaxPool2d(2),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2)
        )

        # (256, 16, 16) -> (512, 4, 4)
        self.conv5 = nn.Sequential(
            nn.Conv2d(in_channels=256, out_channels= 512, kernel_size=(3,3),stride=1,padding=1),
            nn.MaxPool2d(4),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2)
        )
        # (512, 4, 4) -> (64, 4, 4)
        self.conv6 = nn.Sequential(
            nn.Conv2d(in_channels=512, out_channels= 64, kernel_size=(3,3),stride=1,padding=1),
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.2)           
        )

        self.fc = nn.Sequential(
            nn.Linear(1024,512),
            nn.LeakyReLU(0.2),
            nn.Linear(512,5),
            nn.Softmax(dim=-1)
        )


    def forward(self,x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.conv6(x)
        x = x.view(-1, 1024)
        x = self.fc(x)
        return x

def inference(model_path, img_path):
    model = ClassifyNetWork().to(config.device)
    model.load_state_dict(torch.load(model_path))
    model.eval()
    transform = Compose([
            Resize(config.img_size[:-1]),
            ToTensor(),
            Normalize(mean = [0.485, 0.456, 0.406],std = [0.229, 0.224, 0.225])
        ])
    src = Image.open(img_path)
    img = transform(Image.open(img_path)).to(config.device)
    img = img.view(1, config.img_size[2], *config.img_size[:-1])
    res = torch.argmax(model(img)).item()
    label_name = config.label2idx[res]
    plt.imshow(src)
    plt.text(x=10,y=50,s="predict: "+label_name)
    plt.show()
    

def test(dataset_path, model_path):
    test_dataset = PokemonDataset(dataset_path,"test")
    test_dataloader = DataLoader(dataset=test_dataset, num_workers=config.num_workers, batch_size=config.batch_size, shuffle=True)
    
    model = ClassifyNetWork().to(config.device)
    model.load_state_dict(torch.load(model_path))
    model.eval()
    total_test_acc = 0.0
    for imgs,labels in test_dataloader:
        imgs = imgs.to(config.device)
        labels = labels.to(config.device)
        outputs = model(imgs)
        total_test_acc += (outputs.argmax(1)==labels).sum().item()
    print("test accuracy:\t",total_test_acc/test_dataset.__len__())



def train(dataset_path):
    # -------------------------------------------------------------------------- #
    # 构造train，val，test数据集，三者无交集
    # train用于训练，val用于在训练过程中验证模型效果，test在训练完成后测试模型性能
    # -------------------------------------------------------------------------- #
    train_dataset = PokemonDataset(dataset_path,"train")
    val_dataset = PokemonDataset(dataset_path,"val")

    # --------------------------------------------------------------- #
    # 分别构造dataloader
    # batch_size 一次丢进网络的数据量，即一次网络丢尽多少张照片
    # num_workers 定数据处理的进程数，但在windows系统下无法正常使用
    # shuffle 是否将数据打乱
    # --------------------------------------------------------------- #
    train_dataloader = DataLoader(dataset=train_dataset, num_workers=config.num_workers, batch_size=config.batch_size, shuffle=True)
    val_dataloader = DataLoader(dataset=val_dataset, num_workers=config.num_workers, batch_size=config.batch_size, shuffle=True)

    # ---------------------------------- # 
    # 创建模型实例。
    # 根据实际情况是否需要转移到GPU
    # ---------------------------------- # 
    model = ClassifyNetWork().to(config.device)

    # ------------------------------ # 
    # 定义损失函数
    # 根据实际情况是否需要转移到GPU
    # ------------------------------ # 
    loss_fn = nn.CrossEntropyLoss().to(config.device)

    # --------------------------- #
    # 定义优化器
    # --------------------------- #
    optimizer = torch.optim.SGD(model.parameters(), lr=config.learning_rate)

    # --------------------------------- # 
    # 定义日志记录
    # --------------------------------- # 
    writer = SummaryWriter("./exp")
    for epoch in range(config.epochs):
        # train
        model.train()
        total_train_loss = 0.0
        total_train_acc = 0.0
        for imgs,labels in train_dataloader:
            imgs = imgs.to(config.device)
            labels = labels.to(config.device)
            outputs = model(imgs)
            loss = loss_fn(outputs,labels)

            total_train_loss += loss
            total_train_acc += (outputs.argmax(1)==labels).sum()

            #反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        writer.add_scalar("train/loss",total_train_loss/train_dataset.__len__(),epoch)
        writer.add_scalar("train/accuracy",total_train_acc/train_dataset.__len__(),epoch)
    
        # eval
        model.eval()
        total_test_acc=0
        total_test_loss=0.0
        for imgs,labels in val_dataloader:
            imgs = imgs.to(config.device)
            labels = labels.to(config.device)
            outputs = model(imgs)
            loss = loss_fn(outputs,labels)
            total_test_acc += (outputs.argmax(1)==labels).sum()
            total_test_loss += loss
        writer.add_scalar("test/loss",total_test_loss/val_dataset.__len__(),epoch)
        writer.add_scalar("test/accuracy",total_test_acc/val_dataset.__len__(),epoch)
        
        print("{}th epoch:\ttrain_loss:{:.4f}\t val_loss:{:.4f}\t val_acc:{:.4f}"\
            .format(epoch,total_train_loss/train_dataset.__len__(),total_test_loss/val_dataset.__len__(),total_test_acc/val_dataset.__len__()))
    torch.save(model.state_dict(),"./exp/model.pt")

if __name__ == "__main__":
    # train(r'E:\Data\pokemon')
    # test("./exp/model.pt")
    inference("./exp/model.pt","test.jpg")

