import io
import os
import glob
import json
import time
import tqdm
import signal
import argparse
import numpy as np
import pandas as pd

import torch
import torch.utils.data
import torch.nn.functional

import torchvision as tv
from scipy.special import softmax

import ignite.engine as ieng
import ignite.metrics as imet
import ignite.handlers as ihan

from typing import Any
from typing import Dict
from typing import List
from typing import Type
from typing import Union
from typing import Optional

from termcolor import colored

from collections import defaultdict
from collections.abc import Iterable

from ignite_trainer import _utils
from ignite_trainer import _visdom
from ignite_trainer import _interfaces

VISDOM_HOST = 'localhost'
VISDOM_PORT = 8097
VISDOM_ENV_PATH = 'visdom_env'
BATCH_TRAIN = 128
BATCH_TEST = 1024
WORKERS_TRAIN = 0
WORKERS_TEST = 0
EPOCHS = 100
LOG_INTERVAL = 50
SAVED_MODELS_PATH = '/media/yaokun/Fast SSD/AI/Competition/iflytek/audio_cls/ESResNet/saved_models/IflyData_MFCC_ESRNA-CV1/IflyData_MFCC_ESRNA-CV1_ESRNA-CV1_performance=0.9355.pt'
#os.path.join(os.path.expanduser('~'), 'saved_models')


def testrun(experiment_name: str,
        model_class: str,
        model_args: Dict[str, Any],
        dataset_class: str,
        dataset_args: Dict[str, Any],
        batch_train: int,
        batch_test: int,
        workers_train: int,
        workers_test: int,
        transforms: List[Dict[str, Union[str, Dict[str, Any]]]],
        saved_models_path: str,
        orig_stdout: Optional[io.TextIOBase] = None):
        print(dataset_class, batch_train,batch_test, saved_models_path)

    
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        transforms_train = list()
        transforms_test = list()

        for idx, transform in enumerate(transforms):
            use_train = transform.get('train', True)
            use_test = transform.get('test', True)

            transform = _utils.load_class(transform['class'])(**transform['args'])

            if use_train:
                transforms_train.append(transform)
            if use_test:
                transforms_test.append(transform)

            transforms[idx]['test'] = use_test

        transforms_test = tv.transforms.Compose(transforms_test)

        Dataset: Type = _utils.load_class(dataset_class)

        eval_loader = _utils.get_test_loaders(
            Dataset,
            dataset_args,
            batch_test,
            workers_test,
            transforms_test
        )

        Network: Type = _utils.load_class(model_class)
        model: _interfaces.AbstractNet = Network(**model_args)
        model.load_state_dict(state_dict=torch.load( saved_models_path), strict=True)
        model = model.to(device)
        model.eval()


        submission = []

        test_preds = []
        paths = []
        with torch.no_grad():
            for x, label,path in eval_loader:
                test_preds.append(model(x.to("cuda")))
                paths.extend(np.array(path))
            

            test_preds = torch.cat(test_preds)
            print(test_preds.size())
            submission.append(test_preds.cpu().numpy())
            print(submission)
            submission_ensembled = 0
            for sub in submission:
                submission_ensembled += softmax(sub, axis=1) / len(submission)
            print(submission_ensembled)

            result = np.argmax(np.array(submission_ensembled),axis=1) + 1
            print(paths)
            print(result)
            
            pd_paths = pd.DataFrame(paths, columns=['id']) #
            pd_pred = pd.DataFrame(result, columns=['label']) #
            pd_csv = pd.concat([pd_paths,pd_pred],axis=1)
            print(pd_csv)
            pd_csv.to_csv("./saved_models/IflyData_MFCC_ESRNA-CV1/submission.csv", index=False)
   
            '''
            test_info = pd.read_csv('/media/yaokun/Fast SSD/AI/Competition/iflytek/audio_cls/ESResNet/iflydata/sample_submission2.csv')
            test_info.iloc[:,0] = paths
            test_info.iloc[:, 1:] = submission_ensembled
            print(test_info)
            '''


           
        

def test():
    with _utils.tqdm_stdout() as orig_stdout:
        parser = argparse.ArgumentParser()

        parser.add_argument('-c', '--config', type=str, required=True)
        parser.add_argument('-H', '--visdom-host', type=str, required=False)
        parser.add_argument('-P', '--visdom-port', type=int, required=False)
        parser.add_argument('-E', '--visdom-env-path', type=str, required=False)
        parser.add_argument('-b', '--batch-train', type=int, required=False)
        parser.add_argument('-B', '--batch-test', type=int, required=False)
        parser.add_argument('-w', '--workers-train', type=int, required=False)
        parser.add_argument('-W', '--workers-test', type=int, required=False)
        parser.add_argument('-e', '--epochs', type=int, required=False)
        parser.add_argument('-L', '--log-interval', type=int, required=False)
        parser.add_argument('-M', '--saved-models-path', type=str, required=False)
        parser.add_argument('-R', '--random-seed', type=int, required=False)
        parser.add_argument('-s', '--suffix', type=str, required=False)

        args, unknown_args = parser.parse_known_args()

        if args.batch_test is None:
            args.batch_test = args.batch_train

        if args.random_seed is not None:
            args.suffix = '{}r-{}'.format(
                '{}_'.format(args.suffix) if args.suffix is not None else '',
                args.random_seed
            )

            np.random.seed(args.random_seed)
            torch.random.manual_seed(args.random_seed)
            if torch.cuda.is_available():
                torch.cuda.manual_seed(args.random_seed)
                torch.backends.cudnn.deterministic = True
                torch.backends.cudnn.benchmark = False

        configs_found = list(sorted(glob.glob(os.path.expanduser(args.config))))
        print(configs_found)
        prog_bar_exps = tqdm.tqdm(
            configs_found,
            desc='Experiments',
            unit='setup',
            file=orig_stdout,
            dynamic_ncols=True
        )

        for config_path in prog_bar_exps:
            config = json.load(open(config_path))

            if unknown_args:
                tqdm.tqdm.write('\nParsing additional arguments...')

            args_not_found = list()
            for arg in unknown_args:
                if arg.startswith('--'):
                    keys = arg.strip('-').split('.')

                    section = config
                    found = True
                    for key in keys:
                        if key in section:
                            section = section[key]
                        else:
                            found = False
                            break

                    if found:
                        override_parser = argparse.ArgumentParser()

                        section_nargs = None
                        section_type = type(section) if section is not None else str

                        if section_type is bool:
                            if section_type is bool:
                                def infer_bool(x: str) -> bool:
                                    return x.lower() not in ('0', 'false', 'no')

                                section_type = infer_bool

                        if isinstance(section, Iterable) and section_type is not str:
                            section_nargs = '+'
                            section_type = {type(value) for value in section}

                            if len(section_type) == 1:
                                section_type = section_type.pop()
                            else:
                                section_type = str

                        override_parser.add_argument(arg, nargs=section_nargs, type=section_type)
                        overridden_args, _ = override_parser.parse_known_args(unknown_args)
                        overridden_args = vars(overridden_args)

                        overridden_key = arg.strip('-')
                        overriding_value = overridden_args[overridden_key]

                        section = config
                        old_value = None
                        for i, key in enumerate(keys, 1):
                            if i == len(keys):
                                old_value = section[key]
                                section[key] = overriding_value
                            else:
                                section = section[key]

                        tqdm.tqdm.write(
                            colored(f'Overriding "{overridden_key}": {old_value} -> {overriding_value}', 'magenta')
                        )
                    else:
                        args_not_found.append(arg)

            if args_not_found:
                tqdm.tqdm.write(
                    colored(
                        '\nThere are unrecognized arguments to override: {}'.format(
                            ', '.join(args_not_found)
                        ),
                        'red'
                    )
                )

            config = defaultdict(None, config)

            experiment_name = config['Setup']['name']

           
            batch_train = int(_utils.arg_selector(
                args.batch_train, config['Setup']['batch_train'], BATCH_TRAIN
            ))
            batch_test = int(_utils.arg_selector(
                args.batch_test, config['Setup']['batch_test'], BATCH_TEST
            ))
            workers_train = _utils.arg_selector(
                args.workers_train, config['Setup']['workers_train'], WORKERS_TRAIN
            )
            workers_test = _utils.arg_selector(
                args.workers_test, config['Setup']['workers_test'], WORKERS_TEST
            )
           
            saved_models_path = _utils.arg_selector(
                args.saved_models_path, config['Setup']['saved_models_path'], SAVED_MODELS_PATH
            )

            model_class = config['Model']['class']
            model_args = config['Model']['args']

            dataset_class = config['Dataset']['class']
            dataset_args = config['Dataset']['args']

            transforms = config['Transforms']

            tqdm.tqdm.write(f'\nStarting Test "{experiment_name}"\n')

            testrun(
                experiment_name=experiment_name,
                model_class=model_class,
                model_args=model_args,
                dataset_class=dataset_class,
                dataset_args=dataset_args,
                batch_train=batch_train,
                batch_test=batch_test,
                workers_train=workers_train,
                workers_test=workers_test,
                transforms=transforms,
                saved_models_path=saved_models_path,
                orig_stdout=orig_stdout
            )

        prog_bar_exps.close()

    tqdm.tqdm.write('\n')
