import re
from pathlib import Path
from collections import defaultdict
from rime_utils.data.char_8105 import char_8105
from rime_utils.data.pinyin_8105_map import pinyin_8105_map
from rime_utils.utils.timer import timer
from rime_utils.data.header import get_header_pinyin_wx


@timer
def filter_8105(src_dir, out_dir, file_endswith_filter):
    dict_num = 0
    lines_total = []
    char_8105_set = set(char_8105)
    # 预转换pinyin映射为集合提升查询效率
    pinyin_8105_map_set = {k: set(v) for k, v in pinyin_8105_map.items()}

    # 读取所有符合条件的文件内容
    for filepath in src_dir.iterdir():
        if filepath.is_file() and filepath.name.endswith(file_endswith_filter):
            dict_num += 1
            print(f'☑️  已加载第 {dict_num} 份码表 » {filepath}')
            with open(filepath, 'r', encoding='utf-8') as f:
                lines_total.extend(f.readlines())
    
    # 预处理：按词长分组并初步清洗数据
    lines_by_length = defaultdict(list)
    for line in lines_total:
        parts = line.strip().split('\t')
        if len(parts) < 3:
            continue
        word, code, weight = parts[0], parts[1], parts[2]
        word_len = len(word)

        # 过滤部分词汇·拼音词库不能太大不能太小
        # if len(word) != 4 or word.endswith(('村', '庄', '吗', '吧', '了', '啊', '呢')):
        #     continue

        # 初步过滤：首字符不在常用字集或编码长度不符
        if not word or word[0] not in char_8105_set:
            continue
        code_parts = code.split(' ')
        if len(code_parts) != word_len:
            continue
        lines_by_length[word_len].append((word, code_parts, weight))
    
    # 生成最终词典文件
    out_file_path = out_dir / out_file_name
    with open(out_file_path, 'w', encoding='utf-8') as o:
        o.write(get_header_pinyin_wx(out_file_name))
        res_dict = {}
        line_count_sum = 0
        
        # 按词长0-25顺序处理保证输出有序性
        for word_len in range(26):
            if word_len not in lines_by_length:
                continue
            entries = lines_by_length[word_len]
            line_count = 0
            
            for word, code_parts, weight in entries:
                # 二次过滤：确保所有字符均符合要求
                if any(c not in char_8105_set for c in word):
                    continue
                # 校验每个字的拼音是否在映射表中
                valid = True
                for i, c in enumerate(word):
                    # 只检查拼音部分（分号前面的部分）
                    pinyin_part = code_parts[i].split(';')[0]
                    if pinyin_part not in pinyin_8105_map_set.get(c, set()):
                        valid = False
                        break
                if not valid:
                    continue
                
                # 去重处理并写入结果
                code_str = ' '.join(code_parts)
                if word not in res_dict:
                    res_dict[word] = {code_str}
                    line_count += 1
                    o.write(f"{word}\t{code_str}\t{weight}\n")
                else:
                    if code_str not in res_dict[word]:
                        res_dict[word].add(code_str)
                        line_count += 1
                        o.write(f"{word}\t{code_str}\t{weight}\n")
            
            if line_count > 0:
                line_count_sum += line_count
                print(f'✅  已合并处理生成 {word_len} 字词语，共计 {line_count} 行')
        
        print(f'☑️  共生成 {line_count_sum} 行数据')


if __name__ == '__main__':
    proj_dir = Path(__file__).resolve().parent.parent
    src_dir = proj_dir / 'dicts/rime-wx/cn_dicts'
    out_dir = proj_dir / 'out'
    out_file_name = 'jk_pinyin.dict.yaml'
    
    # 清空旧输出文件
    out_file_path = out_dir / out_file_name
    if out_file_path.exists():
        out_file_path.unlink()
    
    filter_8105(src_dir, out_dir, file_endswith_filter='')