import re
from pathlib import Path
from rime_utils.utils.wubi86_8105_map import wubi86_8105_map
from rime_utils.utils.timer import timer
from rime_utils.data.header import get_header_wubi_mini

@timer
def filter_8105(src_dir, out_dir, file_endswith_filter):
    dict_num = 0
    res_dict = {}
    word_len_limit = 27
    line_count_sum = 0
    
    # 预编译正则表达式
    tab_split_re = re.compile(r'\t+')
    
    # 一次性读取所有文件内容
    lines_total = []
    for filepath in src_dir.iterdir():
        if filepath.is_file() and (not file_endswith_filter or filepath.name.endswith(file_endswith_filter)):
            dict_num += 1
            print(f'☑️  已加载第 {dict_num} 份码表 » {filepath}')
            with open(filepath, 'r', encoding='utf-8') as f:
                lines_total.extend(f.readlines())
    
    # 预处理：过滤掉首字符不在wubi86_8105_map中的行
    filtered_lines = [line for line in lines_total if line and line[0] in wubi86_8105_map]
    
    # 按词长分组处理
    word_len_groups = {}
    for line in filtered_lines:
        parts = tab_split_re.split(line.strip())
        if len(parts) < 2:
            continue
            
        word = parts[0]
        word_len = len(word)
        if word_len > word_len_limit:
            continue
            
        if word_len not in word_len_groups:
            word_len_groups[word_len] = []
        word_len_groups[word_len].append(parts)
    
    # 准备输出内容
    output_lines = []
    for word_len in sorted(word_len_groups.keys()):
        line_count = 0
        for parts in word_len_groups[word_len]:
            word = parts[0]
            code = parts[1]
            weight = parts[2] if len(parts) >= 3 else '0'
            
            # 检查所有字符都在wubi86_8105_map中
            if all(c in wubi86_8105_map for c in word):
                if word not in res_dict:
                    res_dict[word] = {code}
                    output_lines.append(f"{word}\t{code}\t{weight}\n")
                    line_count += 1
                elif code not in res_dict[word]:
                    res_dict[word].add(code)
                    output_lines.append(f"{word}\t{code}\t{weight}\n")
                    line_count += 1
        
        if line_count > 0:
            line_count_sum += line_count
            print(f'✅  » 已合并处理生成 {word_len} 字词语，共计 {line_count} 行')
    
    print(f'☑️  共生成 {line_count_sum} 行数据')
    
    # 一次性写入文件
    with open(out_dir / out_file_name, 'w', encoding='utf-8') as o:
        o.write(get_header_wubi_mini(out_file_name))
        o.writelines(output_lines)

if __name__ == '__main__':
    proj_dir = Path(__file__).resolve().parent.parent
    print(proj_dir)
    
    default_src_dir = 'out/cn_dicts'
    default_out_dir = 'out/dist'
    default_file_endswith_filter = ''

    src_dir = input(f"输入文件目录（默认：{default_src_dir}）：").strip() or default_src_dir
    out_dir = input(f"输出文件目录（默认：{default_out_dir}）：").strip() or default_out_dir
    file_endswith_filter = default_file_endswith_filter

    src_dir = proj_dir / src_dir
    out_dir = proj_dir / out_dir

    out_file_name = 'jk_wubi_zj.dict.yaml'
    out_file_path = out_dir / out_file_name
    
    if out_file_path.exists():
        out_file_path.unlink()
        
    if not out_dir.exists():
        out_dir.mkdir()

    filter_8105(src_dir, out_dir, file_endswith_filter)