import os
import re
import shutil
from datetime import datetime
from enum import Enum
from collections import OrderedDict

import numpy as np
import pandas as pd

from util import *
from src.const import *
from src.norm import *
from src.mytrain import *

def search_model_file(folder:str, project_name:str, model_name:str):
    '''搜索模型文件'''
    pth_paths = search_file(folder, detectors=[
        (Detector.dirpath, lambda x:project_name in x),
        (Detector.filename, lambda x:model_name in x)
    ], exts=['pth'])
    pth_path = list(pth_paths)[0]
    shutil.copy(pth_path, './model.pth')

def get_collection_time(x:Any):
    '''获取收集时间'''
    pattern = r'.*?/(\d+)_EEG_Raw/.*'
    if isinstance(x, pd.Series):
        x = x.str.extract(pattern)[0]
    elif isinstance(x, str):
        x = re.match(pattern, x).group(1)
    return x

def _make_test_subset(df:pd.DataFrame, num_marker:str, size:int, 
                      upper_limit:int=1100, lower_limit:int=800):
    '''制作测试数据集'''
    total = df[num_marker].sum()
    choice = 'No'
    df2 = df.drop(df[df[num_marker]==0].index)
    while choice and choice.lower() not in ['yes', 'y']:
        tgt = 0
        while upper_limit < tgt or tgt < lower_limit:
            subset = df2.loc[np.random.choice(df2.index, size=size, replace=False)]
            tgt = subset[num_marker].sum()
        print(subset)
        print(f'Selection ratio: {tgt}/{total}')
        choice = input('confirm?yes/no[y/n]: ')
    return subset

def make_info(root:str, event:str='尖慢波', size:int=5, 
              file_desc_path:Optional[str]=None,              
              train_info_path:Optional[str]=None, 
              test_info_path:Optional[str]=None, 
              test_edf_paths:Optional[list[str]]=None) -> None:
    '''制作信息文件'''
    class Info(Enum):
        edf_path = '文件(edf)'
        file_size = '大小(MB)'
        modify_time = '修改时间'
        collection_time = '收集时间'
        event_num = f'{event}数量'
        ch_type = '通道类型'
        md5 = 'md5'

    df = pd.DataFrame(columns=[m.value for m in Info])
    for edf_path in find_edf_paths(root):
        tags_path = find_tags_path(edf_path)
        if tags_path is not None:
            event_data = loadJSON(tags_path)['EventData']
            event_num = len([data for data in event_data if data['EventName']==event])
            sample:dict = event_data[0]
            ch_type = '多通道' if sample.get('IsFull') else '单通道'
            collection_time = get_collection_time(edf_path)

            file_size = round(os.path.getsize(edf_path)/(1024*1024), 2)
            timestamp = os.path.getmtime(edf_path)
            modify_time = datetime.fromtimestamp(timestamp)
            modify_time = modify_time.strftime('%Y/%m/%d, %H:%S')
            md5 = calculate_md5(edf_path)
            
            df.loc[len(df.index)] = [
                edf_path, file_size, modify_time, collection_time, event_num, ch_type, md5
            ]
    
    paths = df[Info.edf_path.value].to_list()
    df[Info.edf_path.value] = path_shortening(paths, DATAPATH)
    df.sort_values(by=Info.collection_time.value)

    if file_desc_path is not None:
        df.to_excel(file_desc_path, index=None)
    if train_info_path is None or test_info_path is None:
        return
    # 删除重复文件及无关字段
    df.drop_duplicates(subset=Info.md5.value, keep='last', inplace=True)
    df.drop([Info.md5.value, Info.modify_time.value], axis=1, inplace=True)
    
    if test_edf_paths is None:
        test_subset = _make_test_subset(df, Info.event_num.value, size)
    else:
        test_subset = df[df[Info.edf_path.value].isin(test_edf_paths)]
    train_subset = df[~df.index.isin(test_subset.index)]

    train_subset.to_csv(train_info_path, header=None, index=None)
    test_subset.to_csv(test_info_path, header=None, index=None)

def load_info(root:str, commonpath:str, vid:str) -> list[str]:
    '''测试文件路径'''
    search_lis = list(search_file(root, exts=['csv']))
    df = pd.read_csv(next(filter(lambda x:vid in x, search_lis)), header=None)
    return list(map(lambda x:os.path.join(commonpath, x), df.loc[:, 0]))

# @lru_cache
def get_predict_ratio(y_pred:Union[np.ndarray, Iterable[int]], label:int=1) -> float:
    if isinstance(y_pred, np.ndarray):
        return sum(y_pred == label) / len(y_pred)
    else:
        return sum(1 for i in y_pred if i == label) / len(y_pred)
    
def _record_manage(rman:RecordManager, **kwargs):
    rman.extend_general('support', kwargs['support'], storage=True)
    rman.extend_fixed('model_name', kwargs['model_name'])
    rman['file_path'] = kwargs['file_path']
    rman.extend_mean('predict_ratio', kwargs['predict_ratio'])
    # rman.extend_mean('refd_predict_ratio', kwargs['refd_predict_ratio'])
    rman.extend_replenish(TestAlgorithm.weighted_average, 'score', 'support')
    rman._make_report(kwargs['score'])  # 在扩展方法之后

def test(dman:DataManager, model:nn.Module, 
         model_name:str, model_path:str, record_path:str, 
         label:int=1, device:str='cuda', 
         verbose:Optional[bool]=True):
    # 加载状态词典
    src_state_dict:dict[str, torch.Tensor] = torch.load(model_path, map_location=device)
    state_dict = OrderedDict()
    for key, value in src_state_dict['state_dict'].items():
        if key.startswith('model.'):
            state_dict[key.replace('model.', '')] = value
    
    model.to(device)
    model.load_state_dict(state_dict)
    # hook_model(model)
    with torch.no_grad():
        model.eval()
        test_dataset = dman.test_dataset
        # print(test_dataset)
        raw_dataset = dman.raw_dataset

        test_loader = DataLoader(test_dataset, batch_size=64)
        y_pred = np.array([], dtype=int)
        for batch_x, _ in test_loader:
            batch_y_pred:torch.Tensor = model(batch_x.to(device))
            batch_y_pred = batch_y_pred.max(dim=1)[1]
            y_pred = np.append(y_pred, batch_y_pred.cpu().numpy())

        with RecordManager(save_path=record_path, verbose=verbose) as rman:
            start = 0
            extractor = lambda x: (data[1][x] for data in test_dataset)
            # y_true = list(extractor('label'))
            for key, group in itertools.groupby(extractor('subject_id')):
                group_len = len(list(group))
                pos = slice(start, start+group_len)
                group_y_pred = y_pred[pos]
                start += group_len

                dst = tagscmp_dst(label, group_y_pred)
                src = tagscmp_src(label, raw_dataset, key)
                kwargs = dict(
                    score = TestAlgorithm.virtual_tagscmp(src, dst),
                    support = len(src),
                    model_name = model_name,
                    file_path = key.replace(f'{DATAPATH}/', ''),
                    predict_ratio = get_predict_ratio(group_y_pred, label),
                    # refd_predict_ratio = get_predict_ratio(y_true[pos], label)
                )
                _record_manage(rman, **kwargs)

def initialize_dataset(dman:DataManager, ssman:SearchSpaceManager, size:int=5):
    '''初始化数据集'''
    namespace = dman.namespace
    train_io_path = dman.path_mark(TRAINPATH)
    test_io_path = dman.path_mark(TESTPATH)
    raw_io_path = dman.path_mark(RAWPATH)
    
    if dman.update:
        train_info_path = dman.path_mark(INFOPATH, namespace.train_id, '.csv')
        test_info_path = dman.path_mark(INFOPATH, namespace.test_id, '.csv')
        make_info(DATAPATH, size=size, train_info_path=train_info_path, test_info_path=test_info_path)
    
    train_config = ssman.retry(
        lambda :ssman.load_preset('train'),
        lambda :ssman.make_preset(main_key='train', subkeys=['mode', 'split_mode', 'pad_mode'], 
                                multi_subvalues=[['train', 'bfill', 'ffill'], 
                                                ['train', 'mlack', 'interp']])
    )
    test_config = ssman.retry(
        lambda :ssman.load_preset('test'),
        lambda :ssman.make_preset(main_key='test', subkeys=['mode', 'split_mode'],
                                multi_subvalues=[['test', 'max']])
    )
        
    test_edf_paths = load_info(INFOPATH, DATAPATH, namespace.test_id)
    train_edf_paths = load_info(INFOPATH, DATAPATH, namespace.train_id)

    offline_transform = ssman.retry(
        lambda x:ssman.load_preset('offline_transform', values=x),
        lambda x:ssman.make_preset('offline_transform', values=x), 
        x = [
            transforms.MinMaxNormalize(axis=1),
            transforms.MeanStdNormalize(axis=1)
        ]
    )
    train_dataset = get_dataset(train_io_path, offline_transform=offline_transform, 
                                files=train_edf_paths, **train_config)
    test_dataset = get_dataset(test_io_path, select_label=False, files=test_edf_paths, **test_config)
    raw_dataset = get_dataset(raw_io_path, select_label=False, files=test_edf_paths, mode='raw')
    dman.register('train_dataset', train_dataset)
    dman.register('test_dataset', test_dataset)
    dman.register('raw_dataset', raw_dataset)

def hook_pred(save_name:str, y_pred:List[int], starts:List[int], ends:List[int]):
    np.savetxt(f'{save_name}.txt', np.array([y_pred, starts, ends], dtype=int).T, fmt='%d')
    exit()

def read_pred(edf_path:str, tags_path:str, step:int=50, window:int=100, unknown:int=-1) -> List[int]:
    '''从tags文件中反向读取预测信息'''
    label_map = get_label_map()
    y_pred, starts, ends = [], [], []
    SRR, n_times = get_SRR(edf_path, return_n_times=True)
    start, wi = 0, None
    for wi in read_tags(tags_path, SRR=SRR):
        while wi.start > start:
            y_pred.append(unknown)
            starts.append(start)
            ends.append(start + window)
            start += step

        y_pred.append(label_map[wi.label])
        starts.append(wi.start)
        ends.append(wi.end)
        start += step
    
    if wi is None:
        return []
    
    end = wi.end
    while end < n_times:
        y_pred.append(unknown)
        end += step
        starts.append(end - window)
        ends.append(end)
    # hook_pred('temp1', y_pred, starts, ends)
    return y_pred
        
def tagscmp_src_by_files(label:int, edf_path:str, tags_path:str) -> Sequence[int]:
    '''从文件中读取tags比较所需数据, 不推荐'''
    return [(wi.start, wi.end) for wi in read_tags(tags_path, get_SRR(edf_path)) if wi.label == label]

def tagscmp_dst(label:int, y_pred:Sequence[int], step:int=50, window:int=100) -> Sequence[int]:
    '''label所在位置'''
    return [(n*step, n*step+window) for n, i in enumerate(y_pred) if i==label]

def extract_y(dataset:BaseDataset, *keys:str, **limits:Dict[str, Any]) -> List[Union[Tuple[Any], Any]]:
    '''提取`y`字段数据'''
    return [
        (
            tuple(y[key] for key in keys) if len(keys) > 1 else y[keys[0]]
        ) for _, y in dataset 
        if all(y[k] == v for k, v in limits.items())
    ]

def tagscmp_src(label:int, dataset:BaseDataset, edf_path:str) -> Sequence[int]:
    '''从数据集中读取tags比较所需数据, 推荐'''
    return extract_y(dataset, 'start_at', 'end_at', label=label, subject_id=edf_path)

def virtual_test(dman:DataManager, label:int=1, test_edf_paths:list[str]=None, 
                 verbose:bool=True):
    '''虚拟测试, 需存在特定对象'''
    namespace = dman.namespace
    raw_dataset = dman.raw_dataset
    model_name = namespace.get_model_id(VIRMODELNAME, has_sign=False)
    resultpath = dman.path_mark(RESULTPATH, ext='.csv')
    
    if test_edf_paths is None:  # 同样的测试文件
        test_edf_paths = load_info(INFOPATH, DATAPATH, namespace.test_id)

    eventpath = dman.path_mark(VIRPATH, 'Event', no_mark=True)
    eventpath.mkdir(exist_ok=True)
    with RecordManager(save_path=resultpath, verbose=verbose) as rman:
        # test_dataset = dman.test_dataset
        for i, path in enumerate(test_edf_paths):
            assert isinstance(path, str), f'Incompatible type: {type(path)}'
            edf = dman.path_mark(VIRPATH, ext=f'_{i}.edf')
            if not os.path.exists(edf):
                shutil.copy(path, edf)
            tags = dman.path_mark(eventpath, ext=f'_{i}.tags')
            if not os.path.exists(tags):
                os.system(f'python v1/predict_edf.py {edf}')
                temp = str(edf).lower().replace('edf', 'tags')
                shutil.move(temp, tags)
            y_pred = read_pred(edf, tags)

            tagsbak = dman.path_mark(eventpath, ext=f'_{i}.tagsbak')
            if not os.path.exists(tagsbak):
                srctags = find_tags_path(path)
                shutil.copy(srctags, tagsbak)

            dst = tagscmp_dst(label, y_pred)
            src = tagscmp_src(label, raw_dataset, path)
            # y_true = extract_y(test_dataset, 'label', subject_id=path)
            kwargs = dict(
                score = TestAlgorithm.virtual_tagscmp(src, dst),
                support = len(src),
                model_name = model_name,
                file_path = path.replace(f'{DATAPATH}/', ''),
                predict_ratio = get_predict_ratio(y_pred, label),
                # refd_predict_ratio = get_predict_ratio(y_true, label)  # 未知
            )
            _record_manage(rman, **kwargs)

def main_test(dman:DataManager, ssman:SearchSpaceManager, modelfactory:ModelFactory, 
              n_classes:int=3, retrain:bool=False, has_sign:bool=False):
    
    model_name = dman.namespace.get_model_id(modelfactory.model_name, has_sign=has_sign)
    modelpath = dman.path_mark(MODELPATH, model_name, '.pt')
    split_path = dman.path_mark(SPLITPATH)
    resultpath = dman.path_mark(RESULTPATH, ext='.csv', has_sign=has_sign)
    dman.add_marked_path(LTLOGPATH)

    callbacks = create_callbacks(monitor='train_loss', mode='min', 
                                 ckpt_dirpath=MODELPATH,
                                 ckpt_filename=model_name)
    main_train()
    if not os.path.exists(modelpath) or retrain:  # 未完成训练
        train(dataset=dman.train_dataset, split_path=split_path, 
              model=modelfactory.create_model(), model_name=model_name, 
              n_classes=n_classes, callbacks=callbacks)
    test(dman, model=modelfactory.create_model(), model_name=model_name, 
         model_path=modelpath, record_path=resultpath)
    
def main():
    print('What mode did you choose?')
    mode = custom_selection(['None', 'restart'])
    if mode == 'restart':
        print('Restart or not?')
        select = custom_selection([True, False])
        if not select:
            mode = custom_selection(['None', 'retrain', 'restart'])

    retrain = False
    vid = 'v3'  # 已记录相关信息的版本
    skip_selections = [1, 2]  # 跳过选择, 从1开始, 仅ssman作用范围内
    has_sign = True  # 标记模型及相关信息
    with DataManager(log_path=LOGPATH, named_path=INFOPATH, vid=vid, mode='slack') as dman:
        log_path = dman.path_mark(INFOPATH, vid, ext='.json', has_sign=has_sign)
        with SearchSpaceManager(preset_path=PRESETPATH, log_path=log_path, 
                                skip_selections=skip_selections, skip_mode='follow') as ssman:
            if mode == 'restart':  # 重头开始
                dman.remove_version(vid)
                dman.update = True
            
            # elif mode == 'retrain':  # 重新训练, 弃用
            #     dman.remove_version(vid, retain_task='initialize_dataset')
            #     retrain = True

            dman.task('initialize_dataset', initialize_dataset, ssman=ssman, size=20)
            factories = [
                MyModelFactory(), 
                MyViTFactory(), 
                # MyConformerFactory(), 

                MyEEGNetFactory(), 
                MyLSTMFactory()
            ]
            mode = custom_selection(['all'] + factories + ['null'])
            if mode != 'null':
                mfs = factories if mode == 'all' else [mode]
                for mf in mfs:  # 所有模型/单个模型
                    dman.task(f'main_test<{mf.__class__.__name__}', main_test, ssman=ssman, 
                              retrain=retrain, modelfactory=mf, has_sign=has_sign)
            
            print('Execute the task<virtual_test>?')
            if custom_selection([False, True]):
                dman.task('virtual_test', virtual_test)

def special_test():
    special_vid = '20240425_EEG_Raw'
    file_desc_path = INFOPATH.joinpath(f'{special_vid}_file_desc.xlsx')
    if not file_desc_path.exists():
        make_info(DATAPATH, file_desc_path=file_desc_path)
    df = pd.read_excel(file_desc_path)
    edf_paths = df['文件(edf)'].sort_values(key=lambda x:get_collection_time(x))
    test_edf_paths = list(map(lambda x:str(DATAPATH.joinpath(x)), edf_paths))
    sp_raw_path = RAWPATH.joinpath(special_vid)
    raw_dataset = get_dataset(sp_raw_path, select_label=False, files=test_edf_paths, mode='raw')
    with DataManager(log_path=LOGPATH, named_path=INFOPATH, vid=special_vid, mode='slack') as dman:
        dman.register('raw_dataset', raw_dataset)
        dman.task('virtual_test', virtual_test, test_edf_paths=test_edf_paths, add_collection_time=True)

if __name__ == '__main__':
    # make_preset(PRESETPATH)
    main()
    # special_test()
