# -*- coding: utf-8 -*-
from flyai.data_helper import DataHelper
from flyai.framework import FlyAI
from flyai.utils.log_helper import train_log

from path import *
from config import args, parser_info
from model import get_models, unfreeze_model
from confusion_matrix import confusion_matrix
from warm_scheduler import get_scheduler
from timer import Timer
from logger import logger
from losses import mse_loss as criterion_mse, FocalLoss
from ModelContainer import *

import torch
from torch.utils.data import DataLoader
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR

import numpy as np
import pandas as pd
import shutil
import os

torch.cuda.set_device(0)
logger.logger.debug(parser_info)


'''
此项目为FlyAI2.0新版本框架，数据读取，评估方式与之前不同
2.0框架不再限制数据如何读取
样例代码仅供参考学习，可以自己修改实现逻辑。
模版项目下载支持 PyTorch、Tensorflow、Keras、MXNET、scikit-learn等机器学习框架
第一次使用请看项目中的：FlyAI2.0竞赛框架使用说明.html
使用FlyAI提供的预训练模型可查看：https://www.flyai.com/models
学习资料可查看文档中心：https://doc.flyai.com/
常见问题：https://doc.flyai.com/question.html
遇到问题不要着急，添加小姐姐微信，扫描项目里面的：FlyAI小助手二维码-小姐姐在线解答您的问题.png
'''
if not os.path.exists(MODEL_PATH):
    os.makedirs(MODEL_PATH)


class Main(FlyAI):
    def __init__(self):
        pass

    def download_data(self):
        data_helper = DataHelper()
        data_helper.download_from_ids("BaldClassification")

    def train(self):
        from dataset import BaldClassificationDataset
        if args.multi_model:
            net1, net2 = get_models()
        else:
            net1 = get_models()

        dataset_train = BaldClassificationDataset(split='train')
        dataloader_train = DataLoader(dataset_train, batch_size=args.BATCH, shuffle=args.shuffle)
        if args.train_ratio < 1:
            dataset_val = BaldClassificationDataset(split='val')
            dataloader_val = DataLoader(dataset_val, batch_size=args.BATCH)
        if not args.train_flyai:
            dataset_test = BaldClassificationDataset(split='test')
            dataloader_test = DataLoader(dataset_test, batch_size=args.BATCH)
        criterion_ce = FocalLoss() if args.use_focal else nn.BCELoss()
        criterion_ce = criterion_ce.cuda()

        writer = SummaryWriter(LOG_PATH)

        single_train_1 = SingleTrainContainer(model_name=args.model1,
                                              model=net1,
                                              dataset=dataset_train,
                                              dataloader=dataloader_train,
                                              criteria=[criterion_ce],
                                              writer=writer)
        if args.multi_model:
            single_train_2 = SingleTrainContainer(model_name=args.model2,
                                                  model=net2,
                                                  dataset=dataset_train,
                                                  dataloader=dataloader_train,
                                                  criteria=[criterion_ce],
                                                  writer=writer)
            multi_train = MultiTrainContainer(containers=[single_train_1, single_train_2],
                                              dataset=dataset_train,
                                              dataloader=dataloader_train,
                                              criteria=[criterion_ce],
                                              writer=writer)

        single_val_1 = SingleValContainer(model_name=args.model1,
                                          model=net1,
                                          dataset=dataset_val,
                                          dataloader=dataloader_val,
                                          criteria=[criterion_ce],
                                          writer=writer)
        if args.multi_model:
            single_val_2 = SingleValContainer(model_name=args.model2,
                                              model=net2,
                                              dataset=dataset_val,
                                              dataloader=dataloader_val,
                                              criteria=[criterion_ce],
                                              writer=writer)
            multi_val = MultiValContainer(containers=[single_val_1, single_val_2],
                                          dataset=dataset_val,
                                          dataloader=dataloader_val,
                                          criteria=[criterion_ce],
                                          writer=writer)

        if not args.train_flyai:
            single_test_1 = SingleTestContainer(model_name=args.model1,
                                                model=net1,
                                                dataset=dataset_test,
                                                dataloader=dataloader_test,
                                                criteria=[criterion_ce],
                                                writer=writer)
            if args.multi_model:
                single_test_2 = SingleTestContainer(model_name=args.model2,
                                                    model=net2,
                                                    dataset=dataset_test,
                                                    dataloader=dataloader_test,
                                                    criteria=[criterion_ce],
                                                    writer=writer)
                multi_test = MultiTestContainer(containers=[single_test_1, single_test_2],
                                                dataset=dataset_test,
                                                dataloader=dataloader_test,
                                                criteria=[criterion_ce],
                                                writer=writer)
        timer = Timer('global')
        for epoch in range(args.EPOCHS):
            if (epoch + 1) == args.unfreeze_epoch:
                unfreeze_model(net1)
                single_train_1.optimizer.param_groups[0]['params'] = list(filter(lambda p: p.requires_grad, net1.parameters()))
                if args.multi_model:
                    unfreeze_model(net2)
                    single_train_2.optimizer.param_groups[0]['params'] = list(filter(lambda p: p.requires_grad, net2.parameters()))

            if args.multi_model:
                multi_train.pass_data_epoch()
                multi_val.pass_data_epoch()
                if not args.train_flyai:
                    multi_test.pass_data_epoch()
            else:
                single_train_1.pass_data_epoch()
                single_val_1.pass_data_epoch()
                if not args.train_flyai:
                    single_test_1.pass_data_epoch()

            if args.train_flyai:
                if args.multi_model:
                    train_log(train_loss=multi_train.losses_epoch[0] / multi_train.n_dataloader,
                              train_acc=multi_train.n_correct_epoch / multi_train.n_dataset,
                              val_loss=multi_val.losses_epoch[0] / multi_val.n_dataloader,
                              val_acc=multi_val.n_correct_epoch / multi_val.n_dataset)
                else:
                    train_log(train_loss=single_train_1.losses_epoch[0] / single_train_1.n_dataloader,
                              train_acc=single_train_1.n_correct_epoch / single_train_1.n_dataset,
                              val_loss=single_val_1.losses_epoch[0] / single_val_1.n_dataloader,
                              val_acc=single_val_1.n_correct_epoch / single_val_1.n_dataset)

            timer.clock(event=f'epoch {epoch + 1}')
        writer.close()

        if args.multi_model:
            net1.load_state_dict(torch.load(MODEL_FILE_PATH12))
            net2.load_state_dict(torch.load(MODEL_FILE_PATH22))
            multi_val.pass_data_epoch()
            logger.logger.info(f'\n[final acc ensemble by separate best: '
                               f'{args.prefix2}{multi_val.n_correct_epoch / multi_val.n_dataset}{args.suffix}')

            if not args.train_flyai:
                net1.load_state_dict(torch.load(MODEL_FILE_PATH14))
                net2.load_state_dict(torch.load(MODEL_FILE_PATH24))
                multi_test.pass_data_epoch()
                logger.logger.info(f'\n[final ensemble acc on test dataset by separate best: '
                                   f'{args.prefix2}{multi_test.n_correct_epoch / multi_test.n_dataset}{args.suffix}')


if __name__ == '__main__':
    if os.path.exists(LOG_PATH):
        shutil.rmtree(LOG_PATH)
    main = Main()
    if args.train_flyai:
        main.download_data()
    main.train()