# -*- coding: utf-8 -*-
"""
Created on Sun Jan 17 10:37:19 2021
多线程运行
@author: 59567
"""
import pickle
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from copy import deepcopy
from tjd_slicer.tjd_slicer import Slicer
from loader.loader import Loader
from constructor.rnns_pytorch import RNNs
from constructor import darnn
from constructor.tcn import TCN
from constructor.linear_models import LinearModel
from trainer.trainer import Trainer
from trainer.trainer_pytorch import TorchTrainer
from tjd_config.config import SubEdges
from saver.saver import Saver
from tjdutils.utils import current_time, load_pickle
from selector.filter import Filter, CorrelationFilter
from selector.embedded import Embedded
from selector.distance import Distance
from collector.collector import run_collector
from multiprocessing import Pool


def basic_selector(x, y, vs, space_time, i_batch):
    for i in tqdm(range(len(vs)), desc='run selector with batch>>>' + str(i_batch)):
        v = deepcopy(vs[i])
        print('i_batch', '>>>', i_batch, '   ', 'i', '>>>', i)
        slicer_ = Slicer(x=x, y=y, freq=v['freq'], end_datetime=v['end_datetime'], periods=v['periods'],
                         test_len=v['test_len'], y_name=v['y_name'], features=[])
        sliced_x, sliced_y = slicer_.x, slicer_.y

        if v['selector'] == 'Filter':
            selector = Filter(sliced_x, sliced_y, n_selected_features=1000)
            selected_features = selector.select_features(score_func=v['score_func'], mode=v['mode'])

        elif v['selector'] == 'Correlation_filter':
            selector = CorrelationFilter(sliced_x, sliced_y, n_selected_features=1000)
            selected_features = selector.select_features(method=v['method'])

        elif v['selector'] == 'Embedded':
            selector = Embedded(sliced_x, sliced_y, n_selected_features=1000)
            selected_features = selector.select_features(estimator=v['estimator'])

        elif v['selector'] == 'Distance':
            selector = Distance(sliced_x, sliced_y, n_selected_features=1000)
            selected_features = selector.select_features(metric=v['metric'])
        else:
            raise NameError('No this feature selector.')

        vs[i]['features'] = selected_features

    dir_path = ''.join(['../output/selector/selector_', space_time, '/'])
    # print(dir_path)
    # print('checking exists')
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
    path = dir_path + str(i_batch) + '.pkl'
    with open(path, 'wb') as f:
        pickle.dump((vs), f)
        print('保存成功', '>>', path)
    return path


def run_selector(x, y, selector_subspace, space_time, n_cores=6, num_split=50):
    i_start = 0
    num = len(selector_subspace)
    cell = round(num / num_split)
    args_li, results, addresses = [], [], []
    for i in range(num_split):
        if i == 0:
            vs = selector_subspace[:cell]
            i_start = cell
            i_end = cell
        else:
            i_end = i_start + cell
            if i < num_split - 1:
                vs = selector_subspace[i_start:i_end]
            else:
                vs = selector_subspace[i_start:]
            i_start = i_end
        print('i_start', '>>>', i_start, '  ', 'i_end', '>>>', i_end)
        args_li.append((x, y, vs, space_time, i))
        results.append(i)
    p = Pool(n_cores)
    for i in range(num_split):
        results[i] = p.apply_async(basic_selector, args_li[i])
    p.close()
    p.join()
    addresses = [result.get() for result in results]
    return addresses


def basic_trainer(x, y, selector_path, trainer_subspace):
    name_from_selector = selector_path.split('/')[-2].replace('selector', 'trainer')
    i_batch = selector_path.split('/')[-1].split('.')[0]
    selector_result = load_pickle(selector_path)
    trainer_result = []

    for i_selector in tqdm(range(len(selector_result)), desc='run trainer with batch>>>' + str(i_batch)):
        vs = deepcopy(selector_result[i_selector])
        constructor_path = ''.join(
            ['../output/constructor/', vs['y_name'], '/', selector_path.split('/')[-2].replace('selector', 'model'),
             '/', str(i_batch)])
        constructor_path = constructor_path.replace(":", "~")

        if not os.path.exists(constructor_path):
            os.makedirs(constructor_path)

        ks = deepcopy(vs)
        temp_ks = ks.pop("features")
        for vt in trainer_subspace:
            if vt['learner'] == 'machine_learning':
                dataset_ = Slicer(x=x, y=y, freq=vs['freq'], end_datetime=vs['end_datetime'], periods=vs['periods'],
                                  features=list(vs['features']), y_name=vs['y_name'], test_len=vs['test_len'],
                                  n_features=vt['n_features'], x_time_step=vt['x_time_step'],
                                  y_time_step=vt['y_time_step'], prediction=vt['prediction'], valid_pct=vt['valid_pct'])

                (x_train, y_train, x_valid, y_valid, x_test) = dataset_.dataset
                y_test = dataset_.y_test

                loader_ = Loader(dataset_.dataset, tensor_loader=False)
                model_ = LinearModel(vt['model'])
                trainer_ = Trainer(loader_.loader, model_.model)
                (y_train_p, y_train, y_valid_p, y_valid, y_test_p) = trainer_.output
                y_test = np.squeeze(dataset_.y_test)
                y_test = y_test.reshape(-1, 1) if y_test.ndim == 0 or 1 else y_test
                output = (y_train_p, y_train, y_valid_p, y_valid, y_test_p, y_test, np.NAN, np.NAN)

                saver_ = Saver(key1=str(ks), value1=vs, key2=str(vt), value2=vt, model=model_, trainer=trainer_,
                               loader=loader_,
                               output=output, constructor_path=constructor_path, deep_learning=False)
                trainer_result.append(saver_.trainer_space)

            if vt['learner'] == 'deep_learning':

                dataset_ = Slicer(x=x, y=y, freq=vs['freq'], end_datetime=vs['end_datetime'], periods=vs['periods'],
                                  features=list(vs['features']), y_name=vs['y_name'], test_len=vs['test_len'],
                                  n_features=vt['n_features'], x_time_step=vt['x_time_step'],
                                  y_time_step=vt['y_time_step'], prediction=vt['prediction'], valid_pct=vt['valid_pct'])

                (x_train, y_train, x_valid, y_valid, x_test) = dataset_.dataset
                y_test = dataset_.y_test

                if vt['model'] == 'DARNN':
                    if vt["prediction"] == True:
                        X = dataset_.xx[:-vs['test_len']].to_numpy()
                        Y = np.array(dataset_.yy[:-vs['test_len']].to_numpy())
                        time_index = list(dataset_.yy[:-vs['test_len']].index.strftime("%Y-%m-%d"))
                    else:
                        X = dataset_.xx[1:-vs['test_len']].to_numpy()
                        Y = np.array(dataset_.yy[:-vs['test_len']].to_numpy())
                        time_index = list(dataset_.yy[:-vs['test_len']].index.strftime("%Y-%m-%d"))
                    # Initialize model
                    print("==> Initialize DA-RNN model ...")
                    model = darnn.DA_RNN(X=X, y=Y, T=vt['x_time_step'] + 1, encoder_num_hidden=vt['nhidden_encoder'],
                                         decoder_num_hidden=vt['nhidden_decoder'],
                                         batch_size=vt['batch_size'], learning_rate=vt['learning_rate'],
                                         epochs=vt['epochs'], valid_pct=vt['valid_pct'])
                    # Train
                    print("==> Start training ...")
                    # model.train()
                    y_train_pred, y_valid_pred, iter_losses, epoch_losses, encoder_optimizer, decoder_optimizer, Encoder, Decoder = darnn.train(
                        train_timesteps=model.train_timesteps, batch_size=model.batch_size, epochs=model.epochs,
                        shuffle=model.shuffle, T=model.T, input_size=model.input_size, y=model.y, X=model.X,
                        encoder_optimizer=model.encoder_optimizer, decoder_optimizer=model.decoder_optimizer,
                        Encoder=model.Encoder, device=model.device, Decoder=model.Decoder, criterion=model.criterion)
                    if vt["prediction"] == True:
                        X = dataset_.xx[-model.T - vs['test_len'] + 2:].to_numpy()
                        Y = np.array(dataset_.yy[
                                     -model.T - vs['test_len'] - vt['y_time_step'] + 2:-vt['y_time_step']].to_numpy())
                        test_time_index = list(dataset_.yy[-vs['test_len']:].index.strftime("%Y-%m-%d"))
                    else:
                        if vt['y_time_step'] == 1:
                            X = dataset_.xx[-model.T - vs['test_len'] - vt['y_time_step'] + 3:].to_numpy()
                        else:
                            X = dataset_.xx[
                                -model.T - vs['test_len'] - vt['y_time_step'] + 3:-vt['y_time_step'] + 1].to_numpy()
                        X = np.array(np.row_stack((X, np.zeros((1, X.shape[1])))))
                        Y = np.array(dataset_.yy[-model.T - vs['test_len'] - vt['y_time_step'] + 2:].to_numpy())
                        test_time_index = list(dataset_.yy[-vs['test_len']:].index.strftime("%Y-%m-%d"))

                    # Prediction
                    result_dict = {}
                    y_test_pred = darnn.test(test_len=vs['test_len'], T=model.T, X=X, batch_size=model.batch_size, y=Y,
                                             device=model.device, Encoder=Encoder, Decoder=Decoder)
                    result_dict["y_train_predictions"] = y_train_pred
                    result_dict["y_valid_predictions"] = y_valid_pred
                    result_dict["y_test_predictions"] = y_test_pred
                    if vt["prediction"] == True:
                        result_dict["y_train_targets"] = dataset_.yy[model.T - 1:model.T + len(y_train_pred) - 1]
                        result_dict["y_valid_targets"] = dataset_.yy[model.T + len(y_train_pred) - 1:model.T + len(
                            y_train_pred) + len(y_valid_pred) - 1]
                        result_dict["y_test_targets"] = dataset_.yy[-vs['test_len']:]
                    else:
                        result_dict["y_train_targets"] = dataset_.yy[model.T - 1: model.T + len(y_train_pred) - 1]
                        result_dict["y_valid_targets"] = dataset_.yy[model.T + len(y_train_pred) - 1:model.T + len(
                            y_train_pred) + len(y_valid_pred) - 1]
                        result_dict["y_test_targets"] = dataset_.yy[-vs['test_len']]

                    result_dict["train_loss"] = iter_losses
                    result_dict["valid_loss"] = iter_losses
                    for item in ["y_train_predictions", "y_train_targets", "y_valid_predictions", "y_valid_targets",
                                 "y_test_predictions", "y_test_targets", "train_loss", "valid_loss"]:
                        if np.array(result_dict[item]).ndim == 0 or 1:
                            result_dict[item] = np.array(result_dict[item]).reshape(-1, 1)
                        else:
                            result_dict[item] = np.array(result_dict[item])
                    result_dict["encoder_optimizer"] = encoder_optimizer
                    result_dict["decoder_optimizer"] = decoder_optimizer
                    result_dict["Encoder"] = Encoder
                    result_dict["Decoder"] = Decoder
                    result_dict["model_path"] = ''.join(
                        ['..//output//trainer//', name_from_selector, '//', str(i_batch), '.pkl'])
                    result_dict["trainer_key"] = ''.join([str(ks), str(vt)])
                    trainer_result.append({**vs, **vt, **result_dict})
                else:
                    if vt['model'] in ('RNN', 'GRU', 'LSTM'):
                        model_ = RNNs(model_name=vt['model'], input_size=vt['n_features'], hidden_size=10,
                                      batch_first=True, time_step=vt['x_time_step'], output_size=vt['y_time_step'])
                    elif vt['model'] == 'TCN':
                        model_ = TCN(input_size=vt['n_features'], output_size=vt['y_time_step'], num_channels=[30] * 8,
                                     kernel_size=2, dropout=0)
                    else:
                        model_ = None

                    loader_ = Loader(dataset_.dataset, batch_size=vt['batch_size'])
                    trainer_ = TorchTrainer(loader_.loader, model_, loss_func=vt['loss_func'],
                                            optimizer=vt['optimizer'],
                                            learning_rate=vt['learning_rate'], epochs=vt['epochs'])
                    trainer_.train()

                    y_train_predictions = trainer_.train_predictions
                    train_loss = trainer_.train_loss
                    y_valid_predictions = trainer_.valid_predictions
                    valid_loss = trainer_.valid_loss
                    output = (y_train_predictions, y_train, y_valid_predictions, y_valid, y_test,
                              train_loss, valid_loss)
                    saver_ = Saver(key1=str(ks), value1=vs, key2=str(vt), value2=vt, model=model_, trainer=trainer_,
                                   loader=loader_,
                                   output=output, constructor_path=constructor_path, deep_learning=True)

                    trainer_result.append(saver_.trainer_space)

    dir_path = ''.join(['../output/trainer/', name_from_selector, '/'])
    print('checking exists')
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
    path = dir_path + str(i_batch) + '.pkl'
    with open(path, 'wb') as f:
        pickle.dump(trainer_result, f)
        print('保存成功', '>>', path)
    return path


def run_trainer(x, y, selector_addresses, trainer_subspace, n_cores=6):  # 如果CPU有8个物理核心,建议用6个
    tp = Pool(n_cores)
    results = []
    for selector_path in selector_addresses:
        results.append(tp.apply_async(basic_trainer, (x, y, selector_path, trainer_subspace)))
    tp.close()
    tp.join()
    return [result.get() for result in results]


def run_start(x, y, edges):
    all_path = dict()
    selector_subspace = SubEdges(edges, vertex='engineer')()
    space_time = current_time(date_format="us")

    selector_space_path = '../output/space/selector_subspace_' + space_time + '.pkl'
    trainer_space_path = '../output/space/trainer_subspace_' + space_time + '.pkl'

    with open(selector_space_path, "wb") as f:
        pickle.dump(selector_subspace, f)
    trainer_subspace = SubEdges(edges, vertex="trainer")()
    with open(trainer_space_path, "wb") as f:
        pickle.dump(trainer_subspace, f)
    n_combinations = len(selector_subspace) * len(trainer_subspace)
    print(n_combinations)

    if n_combinations > 5000000:
        raise ValueError('减少参数搜索空间，降低遍历参数组合')
    else:
        selector_dir = run_selector(x, y, selector_subspace, space_time)
        trainer_dir = run_trainer(x, y, selector_dir, trainer_subspace)
        all_path["selector_space_path"] = selector_space_path
        all_path["trainer_space_path"] = trainer_space_path
        all_path["selector_result_path"] = selector_dir
        all_path["trainer_result_path"] = trainer_dir
        trainer_final_time = current_time("us")
        all_path_str = "../output/path/" + trainer_final_time + '.pkl'

        with open(all_path_str, "wb") as f:
            pickle.dump(all_path, f)
    return all_path, selector_space_path, trainer_space_path, selector_dir, trainer_dir, trainer_final_time


def run_a_y(y_name):
    from tjdutils.utils import output_dir
    from tjd_config.config import get_edges
    import pandas as pd
    edges = get_edges(y_name)
    output_dir()
    x_all = edges['x']
    y = edges['y']
    edges["Slicer1"]["y_name"] = [y_name]
    y.index = pd.to_datetime(y.index)
    x = x_all
    v = run_start(x, y, edges)
    all_path, selector_space_path, trainer_space_path, selector_dir, trainer_dir, trainer_final_time = v
    s_dir = '/'.join(selector_dir[0].split('/')[:-1])
    collector_path, result = run_collector(selector_space_path, trainer_space_path, selector_dir, trainer_dir, s_dir)
    all_path["collector_result_path"] = collector_path
    all_path_str = "../output/path/" + trainer_final_time + '_collector.pkl'
    with open(all_path_str, "wb") as f:
        pickle.dump(all_path, f)


if __name__ == '__main__':
    print(666)

