import csv
import os

import matplotlib.pyplot as plt
import pandas as pd
import torch
import torch.optim.lr_scheduler as lr_scheduler
import torchvision.transforms as transforms
import yaml
from PIL import Image
from tqdm import tqdm

from dataset.dataset import get_dataloader
from model.ResNet import ResNet50, ResNet101, ResNet152
from model.VGG16 import Vgg16

test_transform = transforms.Compose(
    [
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
)


class main():
    def __init__(self, config, model_name, mode=None, ckpt=None):
        # args为参数文件路径列表
        with open(config, 'r', encoding='utf-8') as file:
            self.arg = yaml.safe_load(file)
        self.model_name = model_name
        self.mode = mode if mode is not None else self.arg['mode']
        self.ckpt = ckpt
        self.epochs = self.arg['epochs']
        self.model = None
        self.criterion = None
        self.optim = None
        self.scheduler = None
        self.save_fre = self.arg['save_fre']
        self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        print(f'\n{self.device}')
        # 实例化模型
        self.model = self.get_model()
        self.get_ckpt()
        # 实例化损失函数、优化器、学习器
        self.criterion, self.optim, self.scheduler = self.get_train_instance(self.arg)
        # 获取dataloader
        self.trainloader, self.valloader, self.testloader, self.int2char = get_dataloader(self.arg['batchsize'])
        #
        self.transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])

    def get_ckpt(self):
        ckpt = self.ckpt
        if not ckpt:
            if self.mode == 'train':
                ckpt = self.arg['pre_ckpt']
            elif self.mode == 'predict':
                ckpt = self.arg['ckpt']
        if ckpt:
            self.model.load_state_dict(torch.load(ckpt, map_location=self.device))

    def get_model(self):
        name = self.model_name
        if name == 'Vgg16':
            model = Vgg16().to(self.device)
        elif name == 'ResNet50':
            model = ResNet50().to(self.device)
        elif name == 'ResNet101':
            model = ResNet101().to(self.device)
        elif name == 'ResNet152':
            model = ResNet152().to(self.device)
        else:
            model = None
        return model

    def get_train_instance(self, arg):

        # 损失函数
        criterion = torch.nn.CrossEntropyLoss().to(self.device)
        # 优化器
        if arg['optim'] == 'Adam':
            optimizer = torch.optim.Adam(self.model.parameters(), lr=float(arg['lr']))
        elif arg['optim'] == 'SGD':
            optimizer = torch.optim.SGD(self.model.parameters(), lr=float(arg['lr']), momentum=arg['momentum'],
                                        weight_decay=arg['weight_decay'])
        else:
            raise ValueError("请检查配置文件中optim是否正确，你的输入为{}，应为Adam or SGD".format(arg['optim']))
        # scheduler
        if arg['scheduler']:
            scheduler = lr_scheduler.StepLR(optimizer, step_size=arg['step_size'], gamma=arg['gamma'])
        else:
            scheduler = None
        return (criterion, optimizer, scheduler)

    def train(self):
        self.savedir = self.creat_savedir(self.arg['savedir'])
        best = (0, 0, 0)
        self.creat_log()
        self.metrics_list = []
        for epoch in tqdm(range(self.epochs), desc='training epoch'):
            # 训练
            train_loss, train_acc = self.train_step(self.trainloader)
            print(f"\ntrain_step:\nLoss[{train_loss:4f}]\tacc[{train_acc:4f}]------")
            # 验证
            val_loss, val_acc = self.val_step(self.valloader)
            print(f"val_step:\nLoss[{val_loss:4f}]\tacc[{val_acc:4f}]------")
            # 测试
            test_loss, test_acc = self.val_step(self.testloader)
            print(f"test_step:\nLoss[{test_loss:4f}]\tacc[{test_acc:4f}]------")
            self.metrics = {'epoch': epoch,
                            'train_loss': train_loss, 'val_loss': val_loss, 'test_loss': test_loss,
                            'train_acc': train_acc, 'val_acc': val_acc, 'test_acc': test_acc}
            self.metrics_list.append(self.metrics)
            # 更新日志
            self.updata_log()

            # 权重保存规则
            # 按频率保存
            if epoch % self.save_fre == 0:
                name = f'epoch{epoch}-loss{test_loss:.2f}-acc{test_acc:.2f}.pth'
                torch.save(self.model.state_dict(), os.path.join(self.savedir, name))
            # 获取最优权重
            if test_acc > best[0]:
                # 获取best权重
                best_name = f'best-epoch{epoch}-loss{test_loss:.2f}-acc{test_acc:.2f}.pth'
                best = (test_acc, self.model.state_dict(), best_name)
            # 保存最后last和best
            if epoch == self.epochs - 1:
                # 保存last权重
                name = f'last-epoch{epoch}-loss{test_loss:.2f}-acc{test_acc:.2f}.pth'
                torch.save(self.model.state_dict(), os.path.join(self.savedir, name))
                # 保存best权重
                print(f'best_acc:{best[0]}')
                if best[1] != 0:
                    torch.save(best[1], os.path.join(self.savedir, best[2]))
                # 保存yaml文件
                # ruamel.yaml.scalarstring.walk_tree(str)
                # ruamel.yaml.scalarstring.DEFAULT_ENCODING = 'utf-8'
                self.arg['ckpt'] = best[2]

        self.draw(show=False, save=True)

    def creat_log(self):
        log = open(os.path.join(self.savedir, 'log.csv'), 'w', newline='')
        fieldnames = ['epoch', 'train_loss', 'val_loss', 'test_loss', 'train_acc', 'val_acc', 'test_acc']
        self.log = csv.DictWriter(log, fieldnames=fieldnames)
        self.log.writeheader()

    def updata_log(self):
        self.log.writerow(self.metrics)

    def creat_savedir(self, savedir):
        '''./savedir/model_name/train_num/'''
        savedir = os.path.join(savedir, self.model_name)
        os.makedirs(savedir, exist_ok=True)
        li = [int(i) for i in os.listdir(savedir)]
        li.sort()
        step = int(li[-1]) + 1 if li else 0
        savedir = savedir + '/' + str(step)
        os.makedirs(savedir, exist_ok=True)
        return savedir

    def train_step(self, loader):

        self.model.train()
        totle_loss = 0
        preds = []
        reals = []
        for datas, labels in loader:
            datas = datas.to(self.device)
            labels = labels.to(self.device)

            pred = self.model(datas)

            loss = self.criterion(pred, labels)

            self.optim.zero_grad()
            loss.backward()
            self.optim.step()
            #
            _, pred = torch.max(pred, dim=1)
            totle_loss += loss.item()
            preds.extend(pred.tolist())
            reals.extend(labels.tolist())
        if self.scheduler is not None:
            self.scheduler.step()
        acc, loss = self.get_acc_loss(preds, reals, totle_loss)

        return loss, acc

    def val_step(self, loader):
        self.model.eval()
        totle_loss = 0
        preds = []
        reals = []
        for datas, labels in loader:
            datas = datas.to(self.device)
            labels = labels.to(self.device)
            with torch.no_grad():
                pred = self.model(datas)
                loss = self.criterion(pred, labels)
                #
                _, pred = torch.max(pred, dim=1)
                totle_loss += loss.item()
                preds.extend(pred.tolist())
                reals.extend(labels.tolist())
        acc, loss = self.get_acc_loss(preds, reals, totle_loss)
        return loss, acc

    def predict(self, path):
        img = Image.open(path).convert('RGB')
        img = self.transform(img)
        img = img[None, :]
        print(img.shape)

        self.model.eval()
        img = img.to(self.device)
        with torch.no_grad():
            pred = self.model(img)
            _, pred = torch.max(pred, dim=1)
            pred = pred.item()
            char_pred = self.int2char[pred]
        return char_pred

    def get_acc_loss(self, preds, reals, loss):
        len_ = len(preds)
        count = 0
        for i in range(len_):
            if preds[i] == reals[i]:
                count += 1
        acc = count / len_
        return acc, loss / len_

    def draw(self, show=False, save=True):
        df = pd.DataFrame(self.metrics_list)
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
        ax1.plot(df['epoch'], df['train_loss'], 'r', label='Training Loss')
        ax1.plot(df['epoch'], df['val_loss'], 'g', label='Validation Loss')
        ax1.plot(df['epoch'], df['test_loss'], 'b', label='Test Loss')
        ax1.set_title('Loss')
        ax1.set_xlabel('Epochs')
        ax1.set_ylabel('Loss')
        ax1.legend()

        ax2.plot(df['epoch'], df['train_acc'], 'r', label='Training Accuracy')
        ax2.plot(df['epoch'], df['val_acc'], 'g', label='Validation Accuracy')
        ax2.plot(df['epoch'], df['test_acc'], 'b', label='Test Accuracy')
        ax2.set_title('Accuracy')
        ax2.set_xlabel('Epochs')
        ax2.set_ylabel('Accuracy')
        ax2.legend()
        if save:
            plt.savefig(os.path.join(self.savedir, "metrics_plot.png"))
        if show:
            plt.show()


if __name__ == '__main__':
    config = r'./default.yaml'

