# coding: utf-8
"""
@Time    : 2024/8/14 10:14
@Author  : Y.H LEE
"""
from models.lstm import LSTM
from sys_params import device
import numpy as np
import torch.nn

from eval import MyEvaluator
from tqdm import tqdm

from utils.tools import *
from models import *

'''
Trainer
'''


class GridTrainer:

    def __init__(self):
        self.train_mse_losses = []
        self.train_mape_losses = []
        self.train_sgcc = []

        self.val_mse_losses = []
        self.val_mape_losses = []
        self.val_sgcc = []

        self.test_mse_losses = []
        self.test_mape_losses = []
        self.test_sgcc = []

    def __call__(self, h_params, train_dataloader, valid_dataloader, test_dataloader, save_model=False):
        """build model"""
        model = self.__get_model__(h_params)
        optimizer = torch.optim.Adam(model.parameters(), h_params['learning_rate'])

        # lr_warmup = LRWarmUp(optimizer, h_params['end_epoch'] - h_params['start_epoch'],
        #                      max_lr=h_params['learning_rate'], warm_up_steps=h_params['warm_up_steps'])
        # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=train_batch_len // 5,
        #                                             gamma=h_params['decay_rate'])

        """checkpoint"""
        start_epoch = h_params['start_epoch']
        # start_epoch = load_exists_model(abs_path=model_save_dir + 'model_' + h_params['dataset_name'] + '.pth',
        #                                 model=model)

        """start training & evaluating"""
        evaluator = MyEvaluator()
        train_logs = h_params['train_logs_save_dir'] + h_params['train_logs']
        with open(train_logs, 'w', encoding='utf-8') as fw:
            for k, v in h_params.items():
                fw.write('--> ' + k + ' : ' + str(v) + '\n')
            fw.write('\n')

        patience = h_params['patience']
        inc = 0
        early_stopping = False
        best_test_mse_loss = 9999.
        best_test_mape_loss = 9999.
        best_test_sgcc = 0.

        for epoch in tqdm(range(start_epoch, h_params["end_epoch"]), total=h_params["end_epoch"], position=0):
            torch.cuda.empty_cache()

            print('epoch:{0}'.format(epoch))
            print('lr:{0}'.format(h_params['learning_rate']))

            """model training"""
            model.train()
            # pbar = tqdm(range(train_batch_len), desc='Train', total=train_batch_len)
            for step, data in enumerate(train_dataloader):
                mse_loss, mape_loss, sgcc = model(data)
                optimizer.zero_grad()
                mse_loss.backward()
                optimizer.step()

                self.train_mse_losses.append(mse_loss.cpu().item())
                self.train_mape_losses.append(mape_loss.cpu().item())
                self.train_sgcc.append(sgcc.cpu().item())

                if step % 20 == 0:
                    with open(train_logs, 'a', encoding='utf-8') as fw:
                        fw.write(
                            f'epoch_{epoch} step_{step}: mse_loss: {round(mse_loss.cpu().item(), 4)}, '
                            f'mape_loss: {round(mape_loss.cpu().item(), 4)}, '
                            f'sgcc: {round(sgcc.cpu().item(), 4)}, \n')

            """model evaluating"""
            val_mse_loss, val_mape_loss, val_sgcc = evaluator.evaluate(model, valid_dataloader)

            with open(train_logs, 'a', encoding='utf-8') as fw:
                fw.write(
                    f'\n*[valid]* epoch_{epoch}: mse_loss: {round(val_mse_loss.cpu().item(), 4)},  '
                    f'mape_loss: {round(val_mape_loss.cpu().item(), 4)},  '
                    f'sgcc: {round(val_sgcc.cpu().item(), 4)}\n\n')
            self.val_mse_losses.append(val_mse_loss.cpu().item())
            self.val_mape_losses.append(val_mape_loss.cpu().item())
            self.val_sgcc.append(val_sgcc.cpu().item())

            """model testing"""
            test_mse_loss, test_mape_loss, test_sgcc = evaluator.evaluate(model, test_dataloader)
            print(
                f'test_mse_loss: {round(test_mse_loss.cpu().item(), 4)};  '
                f'test_mape_loss: {round(test_mape_loss.cpu().item(), 4)};  '
                f'test_sgcc: {round(test_sgcc.cpu().item(), 4)}')
            with open(train_logs, 'a', encoding='utf-8') as fw:
                fw.write(
                    f'\n**[test]** Test: mse_loss: {round(test_mse_loss.cpu().item(), 4)},  '
                    f'mape_loss: {round(test_mape_loss.cpu().item(), 4)},  '
                    f'sgcc: {round(test_sgcc.cpu().item(), 4)}\n\n')
            self.test_mse_losses.append(test_mse_loss.cpu().item())
            self.test_mape_losses.append(test_mape_loss.cpu().item())
            self.test_sgcc.append(test_sgcc.cpu().item())

            """save best model & early stopping"""

            if (test_mse_loss.cpu().item()) < best_test_mse_loss:
                best_test_sgcc = test_sgcc.cpu().item()
                best_test_mse_loss = test_mse_loss.cpu().item()
                best_test_mape_loss = test_mape_loss.cpu().item()

                inc = 0
                if save_model is True:
                    state = {'model': model.state_dict(), 'epoch': epoch}
                    torch.save(state, h_params['model_save_dir'] + 'model_' + h_params['dataset_name'] + '.pth')
            else:
                inc += 1

            if inc >= patience:
                early_stopping = True
                break

        visual_per_epoch_loss(self.train_mse_losses)
        if early_stopping:
            with open(train_logs, 'a', encoding='utf-8') as fw:
                fw.write(f'\nEarly stop at epoch: {epoch}, batch steps: {step}\n\n')

        with open(train_logs, 'a', encoding='utf-8') as fw:
            fw.write(f'Best performing model on test data: \n'
                     f'      test_mse_loss = {round(best_test_mse_loss, 4)}\n'
                     f'      test_mape_loss = {round(best_test_mape_loss, 4)}\n'
                     f'      test_sgcc = {round(best_test_sgcc, 4)}')

        # print(f'Best performing model on test data: \n'
        #       f'      test_recall_10 = {round(best_test_recall_10, 4)}\n'
        #       f'      test_recall_20 = {round(best_test_recall_20, 4)}\n'
        #       f'      test_ndcg = {round(best_test_ndcg, 4)}')

        # plot training metric graph
        plt.figure(2, figsize=(10, 5))
        plt.title(" Training metric")
        plt.plot(self.train_mse_losses, label="train_mse_loss")
        plt.plot(self.train_mape_losses, label="train_mape_loss")
        plt.plot(self.train_sgcc, label="train_sgcc")
        plt.xlabel("time step (=iterations)")
        plt.ylabel("accuracy")
        plt.legend()
        plt.savefig(h_params['train_logs_save_dir'] + '/train.png')
        plt.clf()

        # plot validation metric graph
        plt.figure(3, figsize=(10, 5))
        plt.title(" Validation metric")
        plt.plot(self.val_mse_losses, label="val_mse_loss")
        plt.plot(self.val_mape_losses, label="val_mape_loss")
        plt.plot(self.val_sgcc, label="val_sgcc")
        plt.xlabel("epoch")
        plt.ylabel("accuracy")
        plt.legend()
        plt.savefig(h_params['train_logs_save_dir'] + '/valid.png')
        plt.clf()

        # plot testing metric graph
        plt.figure(3, figsize=(10, 5))
        plt.title(" Testing metric")
        plt.plot(self.test_mse_losses, label="test_mse_loss")
        plt.plot(self.test_mape_losses, label="test_mape_loss")
        plt.plot(self.test_sgcc, label="test_sgcc")
        plt.xlabel("epoch")
        plt.ylabel("accuracy")
        plt.legend()
        plt.savefig(h_params['train_logs_save_dir'] + '/test.png')
        plt.clf()

        return best_test_mse_loss, best_test_mape_loss, best_test_sgcc

    def __get_model__(self, h_params):
        model_name = h_params['model']
        model = None
        if model_name == 'lstm':
            model = LSTM(1, h_params['hid_dim'], h_params['num_layers'], 1).to(device)
        else:
            raise ValueError(f'model {model_name} not exists')

        return model

    def reset_res_list(self):
        self.train_mse_losses = []
        self.train_mape_losses = []
        self.train_sgcc = []

        self.val_mse_losses = []
        self.val_mape_losses = []
        self.val_sgcc = []

        self.test_mse_losses = []
        self.test_mape_losses = []
        self.test_sgcc = []
