from torch.utils.data import TensorDataset, DataLoader
import pandas as pd
import os.path as op
import numpy as np
from utils.models import LSTMNet
from utils.pytorchtools import EarlyStopping
from utils.mySummary import SummaryLogger
import torch
import os
import torch.nn as nn
# from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from utils.train_utils import evaluate_model, epoch_trainer, epoch_validation
import argparse
import time
import yaml
import shutil
import tsaug as ts
from utils.augmentation import * 
from torch.utils.tensorboard import SummaryWriter


da_methods_mapping = {'convolve': ts.Convolve(window="hann"),
                    'pool': ts.Pool(size=3),
                    'jitter': ts.AddNoise(scale=0.05),
                    'quantize': ts.Quantize(n_levels=17),
                    'reverse': ts.Reverse(),
                    'timewarp': ts.TimeWarp(n_speed_change=4, max_speed_ratio=1.5),
                    'spawner': spawner,
                    'scaling': scaling,
                    'magnitude_warp': magnitude_warp,
                    'window_warp': window_warp
                    }

from torch.utils.data import DataLoader
from prefetch_generator import BackgroundGenerator

class DataLoaderX(DataLoader):
    def __iter__(self):
        return BackgroundGenerator(super().__iter__())

class DataPrefetcher(object):
    def __init__(self, loader, device):
        self.loader = loader
        self.dataset = loader.dataset
        self.stream = torch.cuda.Stream()
        self.next_input = None
        self.next_target = None
        self.device = device

    def __len__(self):
        return len(self.loader)

    def preload(self):
        try:
            self.next_input, self.next_target = next(self.loaditer)
        except StopIteration:
            self.next_input = None
            self.next_target = None
            return
        with torch.cuda.stream(self.stream):
            self.next_input = self.next_input.cuda(device=self.device, non_blocking=True)
            self.next_target = self.next_target.cuda(device=self.device, non_blocking=True)

    def __iter__(self):
        count = 0
        self.loaditer = iter(self.loader)
        self.preload()
        while self.next_input is not None:
            torch.cuda.current_stream().wait_stream(self.stream)
            input = self.next_input
            target = self.next_target
            self.preload()
            count += 1
            yield input, target

    
class DaFinance:
    @staticmethod
    def main():
        torch.backends.cudnn.benchmark = True
        args = DaFinance.parse_args()

        from datetime import datetime
        timestamp = datetime.now().strftime("%m-%d-%Y-%H-%M-%S")
        run_path = f"./results/{timestamp}/{args.run_path}"

        # data_dir = op.join(op.expanduser('~'), 'data/')

        is_cuda = torch.cuda.is_available()
        if is_cuda:
            device = torch.device("cuda")
            # torch.cuda.set_device(args.gpu_number)
            print(f"current_device():{torch.cuda.current_device()}")
        else:
            device = torch.device("cpu")
        print(f"device: {device}")

        copied_script_name = op.basename(__file__)
        if (run_path != './'):
            os.popen('./cpfiles.sh '+run_path).read()
            shutil.copy(__file__, op.join(run_path, copied_script_name)) 
        date_ = time.strftime("%Y-%m-%d_%H%M")
        with open(op.join(run_path, date_+'_parameters.yml'), 'w') as outfile:
            yaml.dump(vars(args), outfile, default_flow_style=False)
        DaFinance.run_experiments(args, device, run_path)

    @staticmethod
    def parse_args():
        parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser.add_argument('--run_path', default='./results', help='experiment directoy')
        parser.add_argument('--batch_size', type=int, default=128, help='batch size')
        parser.add_argument('--hidden_dim', type=int, default=32, help='hidden dimension of LSTM')
        parser.add_argument('--n_layers', type=int, default=4, help='number of layers in the LSTM')
        parser.add_argument('--n_epochs', type=int, default=600, help='number of epochs for training')
        parser.add_argument('--dropout_prob', type=float, default=0.0, help='dropout probability')
        parser.add_argument('--init_sp', type=int, default=0, help='initial data split')
        parser.add_argument('--end_sp', type=int, default=1, help='final data split')
        parser.add_argument('--gpu_number', type=int, default=1, help=' ')
        parser.add_argument('--patience', type=int, default=10, help='patience for early stopping')
        parser.add_argument('--da_method', choices=list(da_methods_mapping.keys())+['None'], default='None', help='augmentation methods')
        parser.add_argument('--data_parallel', action='store_true')
        parser.add_argument('--profiling', action='store_true')
        parser.add_argument('--weight_decay', type=float, default=0.0)
        args = parser.parse_args()
        return args

    @staticmethod
    def create_directory(logdir):
        try:
            os.makedirs(logdir)
        except FileExistsError:
            pass

    @staticmethod
    def run_experiments(args, device, run_path):
        DaFinance.create_directory(op.join(run_path, 'output'))
        for i in range(args.init_sp, args.end_sp):
            path = op.join(run_path, 'output/study_period_'+str(i).zfill(2))
            DaFinance.create_directory(path)
            train_loader, valid_loader = DaFinance.augment_dataset(i, batch_size=args.batch_size, da_method=args.da_method)
            model = LSTMNet(1, hidden_dim=args.hidden_dim, output_dim=2, 
                            n_layers=args.n_layers, device=device,
                            dropout_prob=args.dropout_prob)
            model.to(device)
            
            if args.data_parallel:
                model = nn.DataParallel(model)
            metrics = DaFinance.train_eval_single_model(
                model, train_loader, valid_loader, args.n_epochs, path, i, device,
                args)
            print(metrics)

    @staticmethod
    def augment_dataset(i_sp, batch_size, da_method, augment_times=1):
        data_dir = 'data'
        train_x = np.load(op.join(data_dir, 'study_period_X_'+str(i_sp)+'_train.npy'))
        train_y = np.load(op.join(data_dir, 'study_period_Y_'+str(i_sp)+'_train.npy'))

        validation_split = 0.2
        dataset_size=train_x.shape[0]
        indices = list(range(dataset_size))
        split = dataset_size - int(np.floor(validation_split*dataset_size))

        trainX, trainY = train_x[:split], train_y[:split]
        if da_method in ['convolve', 'pool', 'jitter', 'quantize', 'reverse', 'timewarp']:
            trainX = np.concatenate([trainX, *[da_methods_mapping[da_method].augment(trainX) for i in range(augment_times)]])
            trainY = np.concatenate([trainY, *[trainY for i in range(augment_times)]])
        elif da_method in ['magnitude_warp', 'window_warp', 'scaling']:
            trainX = np.concatenate([trainX, *[da_methods_mapping[da_method](trainX) for i in range(augment_times)]])
            trainY = np.concatenate([trainY, *[trainY for i in range(augment_times)]])
        train_loader = DaFinance.build_dataloader(trainX, trainY, batch_size=batch_size)
        valid_loader = DaFinance.build_dataloader(train_x[split:], train_y[split:], batch_size=batch_size)
        return train_loader, valid_loader

    @staticmethod
    def build_dataloader(x_data, y_data, batch_size, shuffle=True):
            train_data = TensorDataset(torch.from_numpy(x_data).float(), torch.from_numpy(y_data))
            train_loader = DataLoaderX(
                train_data, shuffle=shuffle, batch_size=batch_size, drop_last=False, 
                num_workers=16, pin_memory=True)
            return train_loader


    @staticmethod
    def train_eval_single_model(
        model, train_loader, valid_loader, n_epochs, path, i_sp, device, args):
        logger = SummaryLogger(path)
        criterion = nn.CrossEntropyLoss()
        optimizer = torch.optim.RMSprop(
            model.parameters(), lr=0.001, weight_decay=args.weight_decay)
        early_stopping = EarlyStopping(patience=n_epochs, verbose=True, path=path)
        prefetcher = DataPrefetcher(train_loader, device)
        print('Start training')
        
        writer = SummaryWriter(log_dir=path)

        if args.profiling:
            prof = torch.profiler.profile(
                    schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
                    on_trace_ready=torch.profiler.tensorboard_trace_handler(f'{path}/profiler'),
                    record_shapes=True,
                    with_stack=True)
            prof.start()
        else:
            prof = None

        for epoch in range(n_epochs):
            loss, acc = epoch_trainer(
                model, prefetcher, optimizer, criterion, logger, device, prof)

            writer.add_scalar('Train/Loss', loss, epoch)
            writer.add_scalar('Train/Acc', acc, epoch)
            valid_loss, valid_acc = epoch_validation(model, valid_loader, logger, device)
            writer.add_scalar('Valid/loss', valid_loss, epoch)
            writer.add_scalar('Valid/Acc', valid_acc, epoch)
            print(epoch, loss, acc, valid_loss, valid_acc)
            early_stopping(valid_loss, model)
            if early_stopping.early_stop:
                print("Early stopping")
                break       
            
            # print(f"counter: {counter}")
        logger.close()
        model_file_name = os.path.join(path, 'checkpoint.pt')
        model.load_state_dict(torch.load(model_file_name))
        metrics = evaluate_model(model, path, i_sp, device)
        writer.close()

        if args.sprofiling:
            prof.stop()
        return metrics


if __name__ == "__main__":
    DaFinance.main()