# coding: utf-8
"""
@Time    : 2024/8/14 10:15
@Author  : Y.H LEE
"""

import copy
import itertools

from torch.utils.data import DataLoader

from utils.tools import *
from sys_params import device, train_logs_save_dir, model_save_dir, h_params, search_params
from data_loader.simple_dataset import SimpleDataset
from grid_trainer import GridTrainer
import os

'''
grid search
    假设某些超参数之间的影响是独立的
'''


class GridSearch:

    def __init__(self, h_params, s_params):
        self.exp_params = [(k, v) for k, v in s_params.items() if v != 'None']
        self.all_params = [p for p in list(h_params.keys())]

        self.trainer = GridTrainer()
        self.h_params = h_params
        self.h_params['device'] = device
        self.h_params['model_save_dir'] = model_save_dir

        self.best_params = {}
        self.best_results = {'mes_loss': 9999., 'mape_loss': 9999., 'sgcc': 0.}

    def train_with_hyperparams_grid_search(self, save_model=False):
        """由于目前网格搜索的可变参数不涉及Dataset, 节省时间不重复构建了"""
        train_dataset = SimpleDataset(data_path=self.h_params['data_path'], seq_window=self.h_params['seq_window'])
        train_dataloader = DataLoader(dataset=train_dataset,
                                      batch_size=self.h_params['batch_size'],
                                      drop_last=True)

        valid_dataset = SimpleDataset(data_path=self.h_params['data_path'], seq_window=self.h_params['seq_window'],
                                      dataset_type='valid')
        valid_dataloader = DataLoader(dataset=valid_dataset,
                                      batch_size=self.h_params['batch_size'],
                                      drop_last=True)

        test_dataset = SimpleDataset(data_path=self.h_params['data_path'], seq_window=self.h_params['seq_window'],
                                     dataset_type='test')
        test_dataloader = DataLoader(dataset=test_dataset,
                                     batch_size=self.h_params['batch_size'],
                                     drop_last=True)

        count = 0
        best_mse_loss = 9999.
        hp_list = list(itertools.product(*[p[1] for p in self.exp_params]))  # *list用于解压列表, 取笛卡尔积

        for i, var_p in enumerate(hp_list):
            grid_id = f'{i + 1}'
            for p_name, value in zip(self.exp_params, var_p):
                self.h_params[p_name[0]] = value
                # grid_id += f'{p_name[0]}-{value}_'

            self.h_params['train_logs_save_dir'] = train_logs_save_dir + 'grid_search/' + self.h_params['model'] + '_' + \
                                                   self.h_params['dataset_name']
            if not os.path.exists(self.h_params['train_logs_save_dir']):
                os.makedirs(self.h_params['train_logs_save_dir'])
            self.h_params['train_logs'] = '/' + grid_id + '.txt'

            count += 1

            print(f'exp: {count}/{len(hp_list)}')
            self.trainer.reset_res_list()
            mse_loss, mape_loss, sgcc = self.trainer(self.h_params, train_dataloader, valid_dataloader, test_dataloader,
                                                     save_model)

            if mse_loss < best_mse_loss:
                best_mse_loss = mse_loss

                self.best_params = copy.deepcopy(self.h_params)

                self.best_results['mse_loss'] = mse_loss
                self.best_results['mape_loss'] = mape_loss
                self.best_results['sgcc'] = sgcc

        self.save_best(name='grid_search/grid_search_res')

    def train_with_hyperparams(self, save_model=False):
        pass

    def save_best(self, name):
        with open(train_logs_save_dir + name + '.txt', 'w', encoding='utf-8') as fw:
            fw.write('Best hyperparams are: \n')
            for k, v in self.best_params.items():
                fw.write(f'{k} : {v}\n')
            fw.write('\n')
            fw.write('Best results are: \n')
            for k, v in self.best_results.items():
                fw.write(f'{k} : {v}\n')


if __name__ == '__main__':
    """load params and grid search params"""
    hp_params = load_json(h_params)
    sc_params = load_json(search_params)
    gs = GridSearch(hp_params, sc_params)
    train_dataset = SimpleDataset(data_path=gs.h_params['data_path'], seq_window=gs.h_params['seq_window'])
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=gs.h_params['batch_size'],
                                  drop_last=True)
    for step, data in enumerate(train_dataloader):
        print(data)
    print(0)
