# 处理五笔词库 - 删除非国标 pkg_8105-2023 单字及其所组词语
# - wubi86_meta/wubi86_zj.dict.yaml
# - wubi86_meta/wubi86_jidian.dict.yaml
# - wubi86_meta/wubi86_jidian_extra.dict.yaml
# - wubi86_meta/wubi86_jidian_extra_district.dict.yaml
# 支持：
# ... 按字数合并排序
# ... 按字数分表生成
# 
import os
import sys
import re
import shutil
from pathlib import Path
from rime_utils.data.header import get_header_wubi
from rime_utils.data.char_8105 import char_8105
from rime_utils.utils.timer import timer
from rime_utils.utils.is_chinese_char import is_chinese_char
from collections import defaultdict
import hashlib



@timer
def convert(src_dir, out_dir, file_endswith_filter, multifile_out_mode):
	# 遍历源文件夹文件，处理
	dict_num = 0
	res_dict = {}
	code_list_set = set()
	lines_total = []

	for file_path in src_dir.iterdir():
		if file_path.is_file() and file_path.name.endswith(file_endswith_filter):
			dict_num = dict_num + 1
			print('☑️  已加载第 %d 份码表 » %s' % (dict_num, file_path))

			with open(file_path, 'r', encoding='utf-8') as f:
				lines = f.readlines()
				lines_total.extend(lines)

	# 设定最大统计字长列表 - 15个字
	word_len_list = list(range(25))

	for word_len in word_len_list:
		res = ''
		for line in lines_total:
			# 从首个包含 pkg_8105 单字开头的行开始处理
			if line[0] in char_8105:
				line_list = re.split(r'\t+',line.strip())
				word = line_list[0]
				code = line_list[1]
				weight = line_list[2] if len(line_list) > 2 else '0'

				# if len(code) < 3:
				# 	continue
				# if len(code) == 3:
				# 	code += 'z'

				# code = code[:2] + ';' + code[2:]


				# 按字长顺序过滤依次处理 1, 2, 3, 4 ...
				if len(word) == word_len and all(w in char_8105 for w in word):
					# 仅处理已合成词典中 不存在 或 已存在但编码不同的字词
					if word not in res_dict or code not in res_dict[word]:
						res = res + f'{word}\t{code}\t{weight}\n'
						code_list_set.add(code)
						res_dict[word] = code_list_set

		if len(res.strip()) > 0:
			multifile_out_mode = int(multifile_out_mode)
			# 按字长生成多个文件
			if multifile_out_mode == 1:
				with open(out_dir / f'wubi86_{word_len}.dict.yaml', 'a', encoding='utf-8') as o:
					print('✅  » 已合并处理生成 %s 字文件' % word_len)
					o.write(get_header_wubi(f'wubi86_{word_len}.dict.yaml'))
					o.write(res)
			# 统一生成在单个文件
			elif multifile_out_mode == 0:
				with open(out_dir / f'wubi86.dict.yaml', 'a', encoding='utf-8') as o:
					print('✅  » 已合并处理生成 %s 字词语' % word_len)
					word_len == 1 and o.write(get_header_wubi(f'wubi86.dict.yaml'))	# 仅字长为 1 时添加表头
					o.write(res)

def combine(src_dir):
    dict_num = 0
    res_dict = {}
    res_dict_weight = defaultdict(set)
    lines_total = []
    print('\n🔜  === 合并到用户词典 ===')
    
    file_path = src_dir / 'tiger.dict.yaml'
    with open(file_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        lines_total.extend(lines)

    # 去重未变动行
    lines_total = list(dict.fromkeys(lines_total))
    print(f"总词条数: {len(lines_total)}")

    for line in lines_total:
        if not is_chinese_char(line[0]):  # 忽略注释和特殊行
            continue
        md5_hash = hashlib.md5(line.encode('utf-8')).hexdigest()
        word, code, weight = line.strip().split('\t')
        code = code.strip()
        weight = int(weight)

        if word not in res_dict:
            res_dict[word + md5_hash] = (code, weight, len(code))  # 添加编码长度信息

    res = ''
    with open('../out/tiger.dict.yaml', 'w', encoding='utf-8') as o:
        # 第一级分组：按字长
        word_len_dict = defaultdict(list)
        for word_hash, (code, weight, code_len) in res_dict.items():
            word = word_hash[:-32]  # 去除MD5部分
            word_len = len(word)
            word_len_dict[word_len].append((word, code, weight, code_len))
        
        # 按字长排序
        for word_len in sorted(word_len_dict.keys()):
            # 第二级分组：按编码长度
            code_len_dict = defaultdict(list)
            for word, code, weight, code_len in word_len_dict[word_len]:
                code_len_dict[code_len].append((word, code, weight))
            
            # 按编码长度排序
            for code_len in sorted(code_len_dict.keys()):
                # 按编码字母顺序排序
                group_sorted = sorted(code_len_dict[code_len], key=lambda x: x[1])  # x[1]是code
                
                # 写入分组
                for word, code, weight in group_sorted:
                    res += f'{word}\t{code}\t{weight}\n'
                
                print(f'✅ 已处理 {word_len} 字词 | 编码长度 {code_len} | 词数 {len(group_sorted)}')
        
        o.write(res)
    print('合并完成')

if __name__ == '__main__':
	current_dir = Path.cwd()

	src = '../src'
	out = '../out'
	file_endswith_filter = 'tiger.dict.yaml'
	multifile_out_mode = 0

	out_file = 'tiger.dict.yaml'

	# 命令行输入选项
	# ... py scripts/wubi86.py [-i src] [-o out] [-f file_endswith_filter] [-m multifile_out_mode]
	for i, arg in enumerate(sys.argv):
		if arg == "-i":
			src = sys.argv[i + 1]
		elif arg == '-o':
			out = sys.argv[i + 1]
		elif arg == '-f':
			file_endswith_filter = sys.argv[i + 1]
		elif arg == '-m':
			multifile_out_mode = sys.argv[i + 1]

	src_dir = current_dir / src				# 设置待处理的拼音词库文件夹
	out_dir =  current_dir / out			# 转换后输出的五笔词库文件夹

	# 如果存在输出文件，先删除
	# if out_dir.exists():
	# 	shutil.rmtree(out_dir)
	# os.mkdir(out_dir)

	# convert(src_dir, out_dir, file_endswith_filter, multifile_out_mode)
	combine(src_dir)
