import glob
import hashlib
import json
import os
import pathlib
import shutil
import mne
import yaml
from functools import lru_cache
from typing import Iterable, Iterator, List, Optional, Callable, Tuple

import h5py
import numpy as np

from util.decorator import file_decorator

def loadYAML(path):
    with open(path, 'r', encoding='utf-8') as f:
        return yaml.load(f, Loader=yaml.SafeLoader)

def loadJSON(path):
    if not os.path.exists(path):
        writeJSON({}, path)
    with open(path, 'r', encoding='utf-8') as f:
        return json.load(f)

def writeJSON(data, path):
    if os.path.exists(path):
        old_data = loadJSON(path)
        data = {**old_data, **data}
    with open(path, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=4)

def create_path(path):
    path = pathlib.Path(path)
    if path.is_dir():
        path.mkdir(exist_ok=True)
    else:
        path.touch()

def remove_path(path):
    if os.path.exists(path):
        if os.path.isdir(path):
            shutil.rmtree(path)
        else:
            os.unlink(path)

def checkout_h5_repeat(h5_path, show:Optional[str]=None):
    '''检测h5文件数据是否存在重复值'''
    data, label = read_h5(h5_path)
    repeat_num = data.shape[0]-len(np.unique(data, axis=0))
    print(f'repeat: {repeat_num}/{data.shape[0]} = {repeat_num/data.shape[0]}')
    unique_data, unique_index = np.unique(data, axis=0, return_index=True)
    repeat_data = []
    repeat_index = []
    for i in range(data.shape[0]):
        if i not in unique_index:
            repeat_data.append(data[i, :])
            repeat_index.append(i)
    for ri, rd in zip(repeat_index, repeat_data):
        for i in range(len(data)):
            if np.all(rd==data[i]) and ri != i:
                print(f'{ri} <==> {i}')
                if 'label' in show:
                    print('label: {} <==> {}'.format(label[ri], label[i]))
                elif 'data' in show:
                    print('data: \n{}'.format(rd))
                else:
                    ValueError(f'Unsupported field for "show": {show}')
        print('-'*20)
    label = label[unique_index]
    return unique_data, label

def read_h5(h5_path):
    '''待议'''
    with h5py.File(h5_path, 'r') as f:
        data = f['data'][:]
        label = f['label'][:]
    return data, label

# def label_to_array(label):
#     '''待议'''
#     dtype = np.dtype([('label', np.int8), ('id', h5py.string_dtype())])
#     label = np.array(label, dtype=dtype)
#     return label

def find_edf_paths(*root_paths:str):
    '''查找edf文件路径'''
    edf_paths = []
    for root_path in root_paths:
        detectors = [(Detector.dirnames, lambda x:'Event' in x)]
        edf_paths.extend(search_file(root_path, detectors=detectors, exts=['edf', 'EDF']))
    return edf_paths

def find_tags_path(edf_path:str):
    '''根据edf文件路径找到tags文件路径'''
    root, file = edf_path.rsplit('/', 1)
    tags_path = os.path.join(root, 'Event', file.split('.')[0]+'.tags')
    if os.path.exists(tags_path):
        return tags_path
    return None

@lru_cache
def get_SRR(edf_path, new_freq=100, return_n_times=False):
    raw = mne.io.read_raw_edf(edf_path)
    SRR = raw.info['sfreq']/new_freq
    raw.close()
    if return_n_times:
        return SRR, raw.n_times/SRR
    return SRR

def add_h5_data(f:h5py.File, key, value):
    maxshape = (None, *value.shape[1:]) if len(value.shape)>1 else (None,)
    shape = (0, *value.shape[1:]) if len(value.shape)>1 else (0,)
    if key not in f:
        data = f.require_dataset(key, shape=shape, dtype=value.dtype, maxshape=maxshape)
    else:
        data = f[key]
    current_shape = data.shape
    data.resize(current_shape[0]+value.shape[0], axis=0)
    data[current_shape[0]:] = value

@file_decorator()
def write_h5(h5_path, data, label, mode='a'):
    if not isinstance(data, np.ndarray):
        data = np.array(data)
    if not isinstance(label, np.ndarray):
        label = np.array(label)
    with h5py.File(h5_path, mode) as f:
        if mode == 'a':
            add_h5_data(f, 'data', data)
            add_h5_data(f, 'label', label)
        elif mode == 'w':
            f.create_dataset('data', data=data)
            f.create_dataset('label', data=label)

class Detector:
    dirpath = 0
    dirnames = 1
    filename = 2

def search_file(path:str, detectors:Iterable[Tuple[Detector, Callable[..., bool]]]=None, 
                exts:Iterable[str]=None) -> Iterator[str]:
    '''
    功能：搜索文件
    参数：
        path：文件夹路径
        detectors：自定义过滤器，使用Detector类型
        exts：限定扩展名
    返回：（文件路径）
    '''
    detectors = detectors or []
    if exts:
        exts = [f'.{ext}' if not ext.startswith('.') else ext for ext in exts]
        detectors:list = list(detectors)  # 如果传入是迭代器
        detectors.append(
            (Detector.filename, 
             lambda x:os.path.splitext(x)[1] in exts)
        )
    for root, dirs, files in os.walk(path):
        flag = True  # 是否存在的标志
        for exc_type, exc_func in detectors:
            if exc_type == Detector.dirpath:
                flag = exc_func(root)
            elif exc_type == Detector.dirnames:
                flag = exc_func(dirs)
            elif exc_type == Detector.filename:
                files = [file for file in files if exc_func(file)]  # 排除过的files
        if not flag:
            continue
        for file in files:
            yield os.path.join(root, file)

def filename_base(path:str) -> str:
    '''不带扩展名的文件名'''
    return os.path.splitext(os.path.basename(path))[0]

def path_shortening(paths:List[str], commonprefix:str=None) -> List[str]:
    '''使路径变得简短易读'''
    if commonprefix is None:
        commonprefix = os.path.commonprefix(paths)
    if isinstance(commonprefix, pathlib.Path):
        commonprefix = str(commonprefix)
    if not commonprefix.endswith('/'):
        commonprefix += '/'
    paths = map(lambda x:x.replace(commonprefix, ''), paths)
    return list(paths)

def get_latest_modified_file(path):
        '''获取最新修改过的文件'''
        latest_file = None
        latest_file_time = 0
        for file_path in glob.glob(path):
            file_time = os.path.getmtime(file_path)
            if file_time > latest_file_time:
                latest_file = file_path
                latest_file_time = file_time
        return latest_file

def calculate_md5(file:str) -> str:
    '''计算文件md值'''
    md5 = hashlib.md5()
    with open(file, 'rb') as f:
        for chunk in iter(lambda: f.read(4096), b''):
            md5.update(chunk)
    return md5.hexdigest()
