from typing import Any, Union, Callable, Tuple, Dict, List

import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, LearningRateFinder, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import DataLoader
from torcheeg import transforms, trainers
from torcheeg.datasets import BaseDataset
from torcheeg.model_selection import KFold, train_test_split
from torcheeg.utils import get_random_dir_path
from torchinfo import summary
import optuna

from src.myintegration import PyTorchLightningPruningCallback
from src.norm import *
from src.const import *
from src.preprocess_v2 import *
from models import *

class MyModelFactory(ModelFactory):
    def model_class(self):
        return MyModel
    
    def objective(slef, trial:optuna.Trial) -> nn.Module:
        hid_chs = trial.suggest_int('hid_chs', 16, 64)
        depth = trial.suggest_int('depth', 2, 8)
        heads = trial.suggest_int('heads', 4, 16)
        head_chs = trial.suggest_int('head_chs', 32, 96)
        mlp_chs = trial.suggest_int('mlp_chs', 32, 96)
        dropout = trial.suggest_float('dropout', 0, 1)
        return MyModel(hid_chs=hid_chs, depth=depth, heads=heads, head_chs=head_chs, 
                       mlp_chs=mlp_chs, dropout=dropout)

class MyViTFactory(ModelFactory):
    def model_class(self):
        return MyViT

    def objective(slef, trial:optuna.Trial) -> nn.Module:
        kwargs = dict(
            hid_channels = trial.suggest_int('hid_channels', 16, 64),
            depth = trial.suggest_int('depth', 2, 8),
            heads = trial.suggest_int('heads', 2, 8),
            head_channels = trial.suggest_int('head_channels', 32, 96),
            mlp_channels = trial.suggest_int('mlp_channels', 32, 96),
            embed_dropout = trial.suggest_float('embed_dropout', 0, 1),
            dropout = trial.suggest_float('dropout', 0, 1),
            pool_func = trial.suggest_categorical('pool_func', ['mean', 'cls'])
        )
        return MyViT(**kwargs)

class MyConformerFactory(ModelFactory):
    def model_class(self):
        return MyConformer
    
class MyLSTMFactory(ModelFactory):
    def model_class(self):
        return MyLSTM
    
    def objective(slef, trial:optuna.Trial) -> nn.Module:
        kwargs = dict(
            hid_channels = trial.suggest_int('hid_channels', 32, 96),
        )
        return MyLSTM(**kwargs)
    
class MyEEGNetFactory(ModelFactory):
    def model_class(self):
        return MyEEGNet
    
    def objective(slef, trial:optuna.Trial) -> nn.Module:
        kwargs = dict(
            F1 = trial.suggest_int('F1', 4, 16),
            F2 = trial.suggest_int('F2', 8, 32),
            D = trial.suggest_int('D', 2, 5),
            kernel_1 = trial.suggest_int('kernel_1', 32, 96),
            kernel_2 = trial.suggest_int('kernel_2', 8, 32),
            dropout = trial.suggest_float('dropout', 0, 1)
        )
        return MyEEGNet(**kwargs)

class MyDataset(BaseDataset):
    def __init__(self,
                 root_path: str = './folder',
                 structure: str = 'subject_in_label',
                 read_fn: Union[None, Callable] = None,  # default_read_fn
                 online_transform: Union[None, Callable] = None,
                 offline_transform: Union[None, Callable] = None,
                 label_transform: Union[None, Callable] = None,
                 io_path: Union[None, str] = None,
                 io_size: int = 1048576,
                 io_mode: str = 'lmdb',
                 num_worker: int = 0,
                 verbose: bool = True,
                 **kwargs):
        if io_path is None:
            io_path = get_random_dir_path(dir_prefix='datasets')

        # pass all arguments to super class
        params = {
            'root_path': root_path,
            'structure': structure,
            'read_fn': read_fn,
            'online_transform': online_transform,
            'offline_transform': offline_transform,
            'label_transform': label_transform,
            'io_path': io_path,
            'io_size': io_size,
            'io_mode': io_mode,
            'num_worker': num_worker,
            'verbose': verbose
        }
        params.update(kwargs)
        super().__init__(**params)
        # save all arguments to __dict__
        self.__dict__.update(params)

    @staticmethod
    def process_record(file: Any = None,
                       offline_transform: Union[None, Callable] = None,
                       read_fn: Union[None, Callable] = None,
                       **kwargs):
        mode = kwargs.get('mode')
        split_mode = kwargs.get('split_mode')
        for name, SRR, generator in eeg_preprocess(file, mode=mode, split_mode=split_mode, verbose=False):
            for t_eeg, start, end, label, wid in generator:
                clip_id = f'{name}_{SRR}_{wid}'
                record_info = {
                    'subject_id': file,
                    'start_at': start,
                    'end_at': end,
                    'clip_id': clip_id,
                    'label': label
                }
                if offline_transform is not None:
                    t_eeg = offline_transform(eeg=t_eeg)['eeg']
                yield {'eeg': t_eeg, 'key': clip_id, 'info': record_info}
    
    def set_records(self,
                    root_path: str = './folder',
                    structure: str = 'subject_in_label',
                    **kwargs):
        files = kwargs.get('files')
        return files
    
    def __getitem__(self, index: int) -> Tuple[any, any, int, int, int]:
        info = self.read_info(index)

        eeg_index = str(info['clip_id'])
        eeg_record = str(info['_record_id'])
        eeg = self.read_eeg(eeg_record, eeg_index)

        signal = eeg
        label = info

        if self.online_transform:
            signal = self.online_transform(eeg=eeg)['eeg']

        if self.label_transform:
            label = self.label_transform(y=info)['y']
        
        # 补丁
        if isinstance(label, float):
            label = int(label)

        return signal, label
    
    @property
    def repr_body(self) -> Dict:
        return dict(
            super().repr_body, **{
                'root_path': self.root_path,
                # 'chunk_size': self.chunk_size,
                # 'overlap': self.overlap,
                # 'num_channel': self.num_channel,
                'online_transform': self.online_transform,
                'offline_transform': self.offline_transform,
                'label_transform': self.label_transform,
                # 'before_trial': self.before_trial,
                # 'after_trial': self.after_trial,
                'num_worker': self.num_worker,
                'verbose': self.verbose,
                'io_size': self.io_size
            })

def hook_model(model:nn.Module):
    summary(model, input_size=(64, 16, 100))
    exit(0)

def hook_loader(loader:DataLoader):
    data, label = next(iter(loader))
    print(data.shape)
    print(label.shape)
    print(torch.unique(label, return_counts=True))
    exit(0)

def sampling(*datasets, n_sample=64):
    '''采样'''
    return map(lambda x:[t_x for i, t_x in enumerate(x) if i < n_sample], datasets)

def create_callbacks(monitor:str='val_loss', mode:str="min",
                     ckpt_dirpath:Optional[str]=None, 
                     ckpt_filename:Optional[str]=None,
                     verbose:bool=False):
    callbacks = [
        EarlyStopping(monitor=monitor, mode=mode, patience=5, verbose=verbose), 
        LearningRateFinder()
    ]
    if ckpt_dirpath is not None and ckpt_filename is not None:
        create_path(ckpt_dirpath)
        mcp = ModelCheckpoint(dirpath=ckpt_dirpath, filename=ckpt_filename, 
                              monitor=monitor, mode=mode, save_weights_only=True, 
                              verbose=verbose)
        mcp.FILE_EXTENSION = ".pt"
        callbacks.append(mcp)
    return callbacks

def get_dataset(io_path:Optional[str]=None, select_label=True, 
                offline_transform:transforms.EEGTransform=None, **kwargs):
    offline_transform = offline_transform or transforms.MeanStdNormalize(axis=1)
    preset = dict(online_transform=transforms.ToTensor(),
                  offline_transform=offline_transform)
    if select_label:
        preset = dict(label_transform=transforms.Select(key='label'), **preset)
    mydataset:MyDataset = partial_class_factory(MyDataset, **preset)
    # print("io path:", io_path)
    # print("preset:", preset)
    return mydataset(root_path='./', io_path=io_path, **kwargs)

def train(dataset:BaseDataset, split_path:Optional[str], 
          model:nn.Module, model_name:str, 
          device:str='cuda', n_classes:int=3, 
          max_epochs:Optional[int]=None, 
          callbacks:Any=None, verbose:bool=True, 
          optimize:bool=False, **hparams):
    metric = 'accuracy'
    # cv = KFold(n_splits=5, shuffle=True, random_state=42, split_path=split_path)
    # dataset_gen = cv.split(dataset)
    dataset_gen = train_test_split(dataset=dataset, shuffle=True, random_state=12, split_path=split_path)
    dataset_gen = [dataset_gen]  # 嵌套使结构同一
    logger = TensorBoardLogger(save_dir='./', version=model_name)
    if optimize: logger.log_hyperparams(hparams)
    for i, (train_dataset, val_dataset) in enumerate(dataset_gen):
        # train_dataset, val_dataset = sampling(train_dataset, val_dataset)
        train_loader = DataLoader(train_dataset, batch_size=64)
        val_loader = DataLoader(val_dataset, batch_size=64)
        # hook_loader(train_loader)
        model.to(device)
        # trainer = trainers.EQLossTrainer(model, n_classes, val_loader, metrics=metrics)
        trainer = trainers.FocalLossTrainer(model, n_classes, val_loader, metrics=[metric], 
                                            accelerator=device)
        trainer.fit(train_loader, val_loader, max_epochs=max_epochs, callbacks=callbacks, logger=logger)
        # score = trainer.test(val_loader)[0]  # 测试集加载器数量: 1
        # del model  # 删除模型
        if verbose:
            with torch.no_grad():
                y_pred, y_true = [np.array([])]*2
                for batch in val_loader:
                    y_hat:torch.Tensor = trainer.predict_step(batch, None)
                    y_pred = np.append(y_pred, y_hat.max(dim=1)[1].cpu().numpy())

                    y:torch.Tensor = batch[1]
                    y_true = np.append(y_true, y.cpu().numpy())
            # print(f'Fold {i}', '\n', classification_report(y_true, y_pred))
            # print(trainer.test(val_loader, logger=False)[0])  # [f'test_{metric}']
            with open('temp1', 'a+') as f:
                print(f'<{model_name}>\n', classification_report(y_true, y_pred), file=f)
                
        if optimize:
            return trainer.trainer.callback_metrics[f'train_{metric}'].item()

def main_train(dataset:BaseDataset, split_path:Optional[str], 
               model_factory:ModelFactory, model_name:str=None, 
               device:str='cuda', n_classes:int=3, 
               max_epochs:Optional[int]=None, 
               callbacks:List=None, verbose:bool=True,
               n_trials:int=3, timeout:Optional[int]=None):
    pruner = optuna.pruners.MedianPruner()
    study = optuna.create_study(direction='maximize', pruner=pruner)

    model_name = model_name or model_factory.model_name
    callbacks = callbacks or []

    objective = lambda trial: train(
        dataset = dataset, 
        split_path = split_path, 
        model = model_factory.objective(trial=trial), 
        model_name = model_name, 
        callbacks = callbacks + [
            PyTorchLightningPruningCallback(trial, monitor="train_loss")
        ],
        device = device,
        n_classes = n_classes,
        max_epochs = max_epochs,
        verbose = verbose,
        optimize = True
    )
    
    study.optimize(objective, n_trials=n_trials, timeout=timeout)
    study.best_params
    trial = study.best_trial
    with open('temp2', 'a+') as f:
        print(f'model_name: {model_name}', file=f)
        print("Number of finished trials: {}".format(len(study.trials)), file=f)
        print("Best trial:", file=f)
        print(f'  Value: {trial.value}', file=f)
        print(f'  Params: ', file=f)
        for key, value in trial.params.items():
            print(f'    {key}: {value}', file=f)
        print('\n', file=f)

if __name__ == '__main__':
    vid = 'v3'
    dataset = get_dataset(TRAINPATH.joinpath(vid))
    split_path = SPLITPATH.joinpath(vid)
    # MyModelFactory(), 
    for mf in [MyViTFactory(), MyEEGNetFactory(), MyLSTMFactory()]:
        callbacks = create_callbacks(monitor='train_loss', mode='min')
        main_train(dataset=dataset, split_path=split_path, model_factory=mf, 
                   callbacks=callbacks, n_trials=100)
