import h5py 
import numpy as np
import glob
import tqdm
import h5py
import re
import fnmatch
from io import BytesIO
import time

def get_event_data(BASE_STAT='PKU28', CHN='Z', ST_VALID=[], ROOT='./data', DAY=None, NORM=False, DTYPE='float32', FTYPE='H5',SORT=False, WARN=False,UNIT='um/s', DEBUG=False):

    if SORT:
        ST_VALID.remove(BASE_STAT)
        st_valid = [BASE_STAT]+ST_VALID
    else:
        st_valid = ST_VALID
    VICE_STATs=None
    
    if DAY is None:
        files = glob.glob(ROOT+f"/BS{BASE_STAT}_C{CHN}_D*.h5")
        files.sort()
    else:
        files = glob.glob(ROOT+f"/BS{BASE_STAT}_C{CHN}_D{DAY}*.h5")
        files.sort()
        print(files)
    d1n = []
    N_each_day = []
    dates = []
    events_time_all=[]
    for file in files:
        d,date,events_time = load_event_file_h5(file, st_valid=st_valid, CHN=CHN, WARN=WARN)
        if d is None:
            continue
        if date not in dates:
            d1n.append(d.astype(DTYPE))
            dates.append(date)
            events_time_all.append(events_time)
            print(d.shape,date)
            N_each_day.append(d.shape[0])

        else:
            print('{} is repeated and skipped'.format(file))
        if DEBUG:
            break
    data_raw = np.concatenate(d1n, axis=0)

    if UNIT=='um/s':
        data_raw*=1e6

    return data_raw, st_valid, VICE_STATs, events_time_all, dates

def load_event_file_h5(filename,st_valid, CHN, WARN=False):
    print(filename)
    with h5py.File(filename,'r') as f:
        if 'events_data' not in f.keys():
            f.close()
            return None,None,None
            
        BASE_STAT_f  = f['BASE_STAT'][()].decode()
        CHN_f  = f['CHN'][()].decode()
        nc = len(CHN_f)
        ns = f['ns'][()]
        ne = f['ne'][()]

        stats = f['stats'][()]
        stats_decode = []
        for i in stats:
            stats_decode.append(i.decode('utf-8'))
        stats = stats_decode

        events_time=f['events_time'][:]
        date =f['DAY'][()]


        data = f['events_data']
        n1,n2,n3,nt = data.shape
        # print(stats)
        # print(st_valid)
        dd = np.zeros([ne, len(st_valid),len(CHN), nt], dtype=data.dtype)
        
        idx_all = []
        idx_st_valid = []
        for i,sts in enumerate(st_valid):
            if sts in stats:
                idx = stats.index(sts)
                idx_st_valid.append(i)
                idx_all.append(idx)
            else:
                if WARN:
                    print("\n {} in st_valid is not found and it is filled with 0".format(sts))
        # keep idx_sorted to be a increading array
        idx_sorted = sorted(zip(idx_all,idx_st_valid), key=lambda x: x[0])
        idx_all      = [i[0] for i in idx_sorted]
        idx_st_valid = [i[1] for i in idx_sorted]
        # print(idx_all)
        # print(idx_st_valid)
        for k,chn_k in enumerate(CHN):
            print(f'reading {chn_k}|{len(idx_all)} stats', end='\n', flush=True)
            if n2==ns and n3==nc:
                traces = data[:,idx_all,CHN_f.index(chn_k),:]
            if n2==nc and n3==ns:
                traces = data[:,CHN_f.index(chn_k),idx_all,:]

            dd[:,idx_st_valid,k,:] = traces
            
        data_raw = np.squeeze(dd)
    
        '''
                f.create_dataset('events_time', data=np.array(self.events_time))
                f.create_dataset('stats',data=np.array(self.stat_names, dtype='S'))
                f.create_dataset('events_data',data=self.events_data.astype('float32'))
                f.create_dataset('BASE_STAT',data=BASE_STAT)
                f.create_dataset('DAY',data=DAY)
                f.create_dataset('HST_DIRECTION',data=HST_DIRECTION)
                f.create_dataset('CHN',data=CHN)
                f.create_dataset('ns',data=self.NS)
                f.create_dataset('ne',data=self.NE)
                f.create_dataset('dt',data=DT)
        '''

    return data_raw,date,events_time

def load_continue_data(file_name, stat_read=None):
    with h5py.File(file_name,'r') as f:
        stats = f['stats'][()]  
        stats_decode = []
        for i in stats:
            stats_decode.append(i.decode('utf-8'))
        stats = stats_decode
        if stat_read is not None:
            if stat_read not in stats:
                raise ValueError('no such stat in file')
            idx = stats.index(stat_read)
            data = f['data'][idx:idx+1,:]
            stats = [stat_read]
        else:
            data = f['data'][:]
        
        date =f['date'][()]
        CHN =f['CHN'][()]
        dt =f['DT'][()]

    return data, stats, date, CHN, dt

def save_continue_data(file_name, data, stats,DATE, CHN, DT):
    ns,nd,nt = data.shape
    assert ns == len(stats)

    with h5py.File(file_name,'w') as f:
        f.create_dataset('data', data=data)
        f.create_dataset('stats',data=np.array(stats, dtype='S'))
        f.create_dataset('date',data=DATE)
        f.create_dataset('CHN',data=CHN)
        f.create_dataset('DT',data=DT)
    return 1

def save_h5_data(file_name='', data_in_dict={}, h5file=None, group_name='', attrs_dict=None,mode='w'):
    
    if h5file is None:
        f = h5py.File(file_name, mode)
    else:
        f = h5file
    
    if group_name and group_name not in f:
        f.create_group(group_name)
    if group_name:
        target_group = f[group_name]
    else:
        target_group = f

    keys = data_in_dict.keys()
    for key in keys:
        if key in target_group:
            print(f'delete key: {key} for recreate it')
            del target_group[key]
        target_group.create_dataset(f'{key}', data=data_in_dict[key])

    # 添加attributes功能
    if attrs_dict is not None:
        for key, value in attrs_dict.items():
            target_group.attrs[key] = value

    if h5file is None:
        f.close()
    
    return 1

def read_h5_data(file_name='', keys=[], bar=False, FILL_NONE=False, FILL_VALUE=0, h5file=None, group_name='', read_attrs=False):
    '''
    读取h5文件
    keys: 读取的keys
    BAR: 是否显示进度条
    FILL_NONE: 如果keys中没有的key，是否填充
    FILL_VALUE: 如果keys中没有的key，填充的值
    h5file: h5文件句柄
    group_name: h5文件组名
    read_attrs: 是否读取attributes

    return:
    data: 读取的数据，如果read_attrs=True则返回(data, attrs)
    '''
    if h5file is not None:
        f = h5file
    else:
        try:
            f = h5py.File(file_name, 'r')
        except:
            raise IOError('file not found:', file_name)
    
    # 确定要访问的group
    if group_name:
        target_group = f[group_name] if group_name in f else None
    else:
        target_group = f
    
    if target_group is None:
        print(f'group {group_name} not found in h5 file {file_name}')
        if h5file is None:
            f.close()
        return None
    
    # 读取attributes（如果需要）
    attrs_data = None
    if read_attrs:
        attrs_data = dict(target_group.attrs)
    
    # 处理keys为空的情况
    if len(keys) == 0:
        if target_group is not None:
            print(f'the key list in {file_name} group {group_name}:', list(target_group.keys()))
        if h5file is None:
            f.close()
        return [] if not read_attrs else ([], attrs_data)
    
    # 读取数据
    if bar:
        IT = tqdm.tqdm(keys)
    else:
        IT = keys

    data = []
    for key in IT:      

        try:
            key_data = target_group[key][()]
        except KeyError:
            if FILL_NONE:
                key_data = FILL_VALUE
                group_path = f"{group_name}/" if group_name else ""
                print(f"key '{group_path}{key}' not found in h5 file {file_name}, filling with {FILL_VALUE}")
            else:
                group_path = f"{group_name}/" if group_name else ""
                raise KeyError(f"key '{group_path}{key}' not found in h5 file {file_name}")
        data.append(key_data)

    if h5file is None:
        f.close()

    return (data, attrs_data) if read_attrs else data

def get_h5_keys(file_name):
    
    with h5py.File(file_name,'r') as f:
        keys = list(f.keys())
    return keys

def h5glob(h5_filename, pattern=None, regex_pattern=None, level=None, object_type=None, group_name='', STRIP_GROUP_NAME=False):
    """
    高级HDF5查找函数
    
    参数:
    h5_filename: HDF5文件路径
    pattern: 通配符模式 (如 "241014.*")
    regex_pattern: 正则表达式模式
    level: 限制层级深度（0表示根层级）
    object_type: 对象类型过滤 ('group', 'dataset', None表示不限制)
    group_name: 父级组名称
    STRIP_GROUP_NAME: 是否去除组名称
    
    返回:
    匹配的对象路径列表
    """
    matched_items = []
    
    def visit_func(name, obj):
        # 检查对象类型
        if object_type == 'group' and not isinstance(obj, h5py.Group):
            return
        if object_type == 'dataset' and not isinstance(obj, h5py.Dataset):
            return
            
        # 检查层级深度
        if level is not None and name.count('/') >= level:
            return
            
        # 模式匹配
        if pattern and not fnmatch.fnmatch(name, pattern):
            return
            
        # 正则表达式匹配
        if regex_pattern and not re.match(regex_pattern, name):
            return
        
        if group_name and not STRIP_GROUP_NAME:
            matched_items.append(f'{group_name}/{name}')
        else:
            matched_items.append(name)
    
    with h5py.File(h5_filename, 'r') as f:
        if group_name:
            f[group_name].visititems(visit_func)
        else:
            f.visititems(visit_func)

    
    return sorted(matched_items)

def get_all_matched_items(h5_filename, pattern=None, regex_pattern=None, level=None, object_type=None):
    pass

def item_exists(file_name, item_name):
    '''
    检查h5文件中是否存在指定项
    file_name: h5文件名
    item_name: 项名称
    '''
    try:
        with h5py.File(file_name, 'r') as f:
            return item_name in f
    except Exception:
        return False

def read_h5_group(file_name, group_name):
    """
    读取HDF5文件中指定组内的所有数据集和属性
    
    参数:
    file_name: HDF5文件路径
    group_name: 组名称
    
    返回:
    datasets: 数据集字典 {name: data}
    attributes: 属性字典 {name: value}
    """
    datasets = {}
    attributes = {}
    
    with h5py.File(file_name, 'r') as f:
        if group_name in f:
            group = f[group_name]
            
            # 读取所有数据集
            for key in group.keys():
                try:
                    datasets[key] = group[key][()]
                except Exception as e:
                    print(f"无法读取数据集 {key}: {e}")
            
            # 读取所有属性
            for key in group.attrs.keys():
                try:
                    attributes[key] = group.attrs[key]
                except Exception as e:
                    print(f"无法读取属性 {key}: {e}")
        else:
            print(f"组 {group_name} 在文件 {file_name} 中未找到")
    
    return datasets, attributes

class MemoryMappedH5:
    def __init__(self, file_path, load=True, save=False):
        """初始化内存映射HDF5阅读器"""
        self.file_path = file_path
        self.file_data = None
        self.memory_file = None
        self.h5_file = None
        if load:
            self._load_to_memory()
        if save:
            self._save_to_h5()

    def _load_to_memory(self):
        """将HDF5文件加载到内存"""
        print(f"正在加载 {self.file_path} 到内存...")
        
        with open(self.file_path, 'rb') as f:
            self.file_data = f.read()
        
        self.memory_file = BytesIO(self.file_data)
        self.h5_file = h5py.File(self.memory_file, 'r')
        
        print(f"加载完成! 文件大小: {len(self.file_data)/1024/1024:.2f} MB")
    
    def close(self):
        """释放资源"""
        if self.h5_file:
            self.h5_file.close()
        if self.memory_file:
            self.memory_file.close()
        self.file_data = None
    
    def __enter__(self):
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()
    
    def __getitem__(self, key):
        return self.get_dataset(key)
