# -*- coding: utf-8 -*-
'''
# Designed to convert mdict to csv
# Supported dicts to be parsed include：
    # COCA Frequency 60000.mdx
    # 牛津英汉简明词典.mdx
    # 简明英汉汉英词典.mdx
    # 词根词缀词典.mdx
    # thes.mdx
'''

import os
import re
import subprocess
import pandas as pd
import numpy as np
from itertools import zip_longest
import tkinter as tk
from tkinter import filedialog, messagebox, ttk


def auto_encoding(func):
    def wrapper(*args, **kwargs):
        encodings = ['utf-8','gbk','ansi', 'latin1', 'ISO-8859-1', 'cp1252']  # 常见的编码列表
        for encoding in encodings:
            try:
                # 尝试使用不同的编码读取文件
                if 'encoding' not in kwargs:
                    kwargs['encoding'] = encoding
                else:
                    kwargs['encoding'] = encoding
                return func(*args, **kwargs)
            except Exception as e:
                # 如果当前编码失败，则继续尝试下一个编码
                continue
        # 如果所有编码都失败，抛出异常
        raise ValueError("无法找到合适的编码来读取文件")
    return wrapper
@auto_encoding
def pd_read_csv(*args,**kwargs):
    return pd.read_csv(*args,**kwargs)

def get_text_fp(mdx_file_path):
    file_name = os.path.basename(mdx_file_path)
    output_fp = os.path.join('../cache', file_name + '.txt')
    return output_fp

# 执行cmd命令转换mdx为txt
def convert_mdict_to_text(mdx_file_path):
    """
    将.mdx格式的字典导出为txt
    须安装mdict_utils
    命令行安装： pip install mdict_utils
    """
    command = [
        'mdict',
        '-x', mdx_file_path,
        '-d', './cache'
    ]
    # 执行命令
    subprocess.run(command,shell=True)
    output_fp=get_text_fp(mdx_file_path)
    return output_fp

def parse_oxford_simple(mdx_file_path):
    """
    解析牛津英汉简明词典
    :param mdx_file_path: 转化mdx后的txt文件的路径
    :return:  解析后dataframe格式的字典数据
    """
    # 读取导出的字典txt
    with open (mdx_file_path, 'r', encoding='utf-8') as file:
        content = file.read()

    content_series=pd.Series(content.split("</>\n"))

    # 设计正则表达式
    word_pattern = r'`1`(.*?)`2`'
    pronunciation_pattern = r'\[`9`(.*?)`2`\]'
    definition_pattern = r'`2`(.*?)`2`\n'

    # 应用正则表达式提取所需字段
    words = content_series.str.extractall(word_pattern).reset_index(level=1, drop=True)
    pronunciations = content_series.str.extract(pronunciation_pattern).fillna('')
    definitions = content_series.str.replace(pronunciation_pattern,'',regex=True).str.extract(definition_pattern)
    definitions = definitions.replace(r'`[0-9]`','',regex=True).replace(r'<br>','',regex=True)
    definitions = definitions.replace(r'&lt;','<',regex=True).replace(r'&gt;','>',regex=True)
    definitions = definitions.replace(r'&quot;','"',regex=True)

    # 拼合数据
    df=pd.concat([words, pronunciations, definitions],axis=1).dropna(how='all')
    df.columns=['word','pronunciations','definitions']
    df['word']=df['word'].str.strip()
    df['definitions']=df['definitions'].str.strip()

    return df

def parse_jmyh(mdx_file_path):
    """
    解析简明英汉汉英词典
    :param mdx_file_path: 转化mdx后的txt文件的路径
    :return:  解析后dataframe格式的词典数据
    """

    # 设计正则表达式
    word_pattern = r'<span class="DC">(.*?)</span>'
    dj_pattern=r'<span class="CB">D.J.:(.*?)</span>'
    kk_pattern=r'<span class="CB">K.K.:(.*?)</span>'
    pos_pattern = r'<span class="DX">(.*?)</span>'
    definition_pattern=r'<span class="entryDot">■</span>(.*?)</span>' #多个
    cn_pattern=r'<a href="entry://(.*?)">' #(.*?)</a>

    # 读取导出的字典txt
    with open (mdx_file_path, 'r', encoding='utf-8') as file:
        #for chunk in read_in_chunks(file, chunk_size=1024*1024):
        content = file.read()
    #print('reading chunk')


    #print(len(content))
    content_series=pd.Series(content.split("</>\n"))


    # 应用正则表达式提取所需字段
    words = content_series.str.extract(word_pattern)#.reset_index(level=1, drop=True)
    cut_ind=words[words.iloc[:,0]=='一'].index[0] #英中和中英分割点
    words1=words.iloc[:cut_ind]
    words2=words.iloc[cut_ind:]
    pronunciations = content_series.str.extract(dj_pattern)
    phon_dj=pronunciations.iloc[:cut_ind]
    phonetics_kk=content_series.str.extract(kk_pattern)
    phon_kk=phonetics_kk.iloc[:cut_ind]
    definitions_sr = content_series.str.findall(definition_pattern)
    definitions = definitions_sr.iloc[:cut_ind]
    cn_trans=content_series.str.findall(cn_pattern).iloc[cut_ind:]

    # 将多义项的列表变成字符串
    definitions=definitions.str.join(';')
    cn_trans = cn_trans.str.join(',')
    cn_complemantary=definitions_sr.iloc[cut_ind:]
    cn_complemantary=cn_complemantary.str.join(',')

    # 拼合数据
    df_en=pd.concat([words1,phon_dj, phon_kk, definitions],axis=1)#.dropna(how='all')
    df_en.columns=['word','dj','kk','definition']
    df_cn=pd.concat([words2, cn_trans,cn_complemantary],axis=1)
    df_cn.columns = ['cn', 'en','complementary']
    df_cn['en']=df_cn['en'].str.strip()#

    # 补缺失值: 有些数据需要不同的正则表达式定位(complementary)，奇怪的是在en列用fillna总是失败，又不想用非向量化操作担心太慢，只好曲线救国，用merge后的complementary_y列来fillna
    na_list=["无信义的", "一丘之貉", "一分钱一分货", "三维动画片", "三角债", "上传", "下海", "下网", "下载", "专卖店", "两个基本点", "中专生", "于天佐", "以人为本", "传销", "信息化", "信息港", "军嫂", "减肥中心", "功夫片", "北京", "协商一致", "博客", "卢", "参照年", "叶志钦", "在线", "外资", "大人物", "好高骛远", "学生减负", "学生处", "安居工程", "小康之家", "崔涛", "应试教育", "开放", "徐海龙", "心乱如麻", "心宽体胖", "心急如火", "心慌意乱", "快刀斩乱麻", "总裁助理", "手足口病", "抓大放小", "投标书", "拉拉队", "拍马屁", "拳头产品", "控股公司", "易拉罐", "李晶", "林轩", "武打片", "母夜叉", "潦", "热锅上的蚂蚁", "牛饮", "狐假虎威", "王有存", "瓮中之鳖", "瘪三", "白条", "知识产权", "硅谷", "社会治安情况", "神州六号/神六", "神经不正常", "素质教育", "综合国力", "综合治理", "网上交易平台", "网上冲浪", "网友", "网吧", "胆小如鼠", "自主创新", "舆论导向", "西部大开发", "许可费", "论文答辩", "证券营业部", "赤字", "郝丽丽", "部长级会议", "酒后驾车", "金山词霸", "金玉其外败絮其中", "陈呆希", "陈妤", "黄金时段"]
    df_cn_na=df_cn[df_cn['cn'].isin(na_list)]
    df_cn_merged = pd.merge(df_cn,df_cn_na,on='cn',how='left')
    df_cn_merged.loc[:,'complementary_y']=df_cn_merged.loc[:,'complementary_y'].fillna(df_cn_merged['en_x'])
    df_cn_merged=df_cn_merged[['cn','complementary_y']].dropna()
    df_cn_merged.columns=['cn','en']
    mask=~df_cn_merged['cn'].str.startswith('金山')
    df_cn=df_cn_merged[mask]

    return df_en,df_cn

def parse_coca6w(mdx_file_path):
    """
    解析coca 60000字典
    :param mdx_file_path: 转化mdx后的txt文件的路径
    :return:  解析后dataframe格式的字典数据
    """
    # 读取导出的字典txt
    with open (mdx_file_path, 'r', encoding='utf-8') as file:
        content = file.read()
        #content_series = np.array(content.split("\n</>\n"))
        content_series=pd.Series(content.split("\n</>\n"))
        content_series=content_series.str.split('\n').str[1]

    # 设计正则表达式
    word_pattern = r'<div class="word">(.*?)</div>'
    pos_pattern = r'<span class="pos">(.*?)</span>'
    rank_pattern = r'<span class="rank">(.*?)</span>'
    total_pattern = r'<div class="total">(.*?)</div>'

    # 应用正则表达式提取所需字段
    words = content_series.str.findall(word_pattern).dropna()
    pos_matches = content_series.str.findall(pos_pattern).dropna()
    rank_matches = content_series.str.findall(rank_pattern).dropna()
    total_matches = content_series.str.findall(total_pattern).dropna()

    # 加工数据：因一词多词性现象使得数据列全是列表，须转化为字符串或整型，另外rank和freq数值须做计算
    def fill_to_vec(series,fillvalue=0):
        "取最长的元素个数，按这个最大数补齐向量维度"
        filled=list(zip_longest(*series, fillvalue=fillvalue))#str.replace('[','').str.replace(']','')
        return np.array(filled).T

    words=words.str.join('')
    pos_matches = pos_matches.str.join(',')
    rank_matches = np.min(fill_to_vec(rank_matches,100000).astype('int'), axis=1)#因为要取最小数，用一个很大的数来填充空值区（coca60000中的rank，若一个单词有几个词性的不同rank，取最小的rank)
    total_matches = np.sum(fill_to_vec(total_matches,0).astype('int'), axis=1) #因为要取和，用0来填充空值区 (total freq,即词频，不同词性的词频应合并求和)
    print('filled', rank_matches)
    print('zipped: ',total_matches)

    # 拼合数据
    df=pd.concat([words, pos_matches, pd.Series(rank_matches), pd.Series(total_matches)],axis=1).dropna()
    df.columns=['word','pos','rank','freq']
    #print(df)
    return df

def parse_roots_affixes(mdx_file_path):
    """
    解析词根词缀词典
    :param mdx_file_path: 转化mdx后的txt文件的路径
    :return:  解析后dataframe格式的字典数据
    """
    # 读取导出的字典txt
    with open (mdx_file_path, 'r', encoding='utf-8') as file:
        content = file.read()
        #content_series = np.array(content.split("\n</>\n"))
    df=pd.DataFrame(content.split("\n</>\n"),columns=['content'])
    #print(content_series)
    df[['word','content']]=df['content'].str.split('\n<link',expand=True)

    # 设计正则表达式
    roots_pattern = r'<p id="note">(.*?)</p>'

    # 应用正则表达式提取所需字段
    df['roots'] = df['content'].str.findall(roots_pattern).fillna("")
    df=df.drop('content',axis=1).dropna()

    return df



def parse_thesaurus(mdx_file_path):
    from lxml import html
    import gc,shutil

    """
    解析 Thesaurus.com
    :param mdx_file_path: 转化mdx后的txt文件的路径
    :return:  解析后dataframe格式的字典数据
    """

    def parse_thesaurus_unit(text):
        tree = html.fromstring(text)

        # 提取Word
        word = tree.xpath('//h1/text()')[0].strip()

        # 提取Definition
        definitions = tree.xpath('//em[text()="as in"]/following-sibling::strong/text()')
        definition = ', '.join(definitions)

        # 提取syn & ant
        text_list = text.split('<h2 class')
        text_list = [''.join(['<h2 class', text]) for text in text_list[1:]]

        synonyms = {}
        antonyms = []
        for txt in text_list:
            text_tree = html.fromstring(txt)
            header = text_tree.xpath('//h2/text()')[0].strip()
            content = ', '.join(text_tree.xpath('//ul/li/span/a/text()'))

            if header == 'Synonyms':
                sub_definition = text_tree.xpath('//em/following-sibling::strong/text()')[0]
                synonyms[sub_definition] = content
            elif header == 'Antonyms':
                antonyms.append(content)

        result = {
            'Word': word,
            'Definition': definition,
            'Synonyms': synonyms,
            'Antonyms': antonyms
        }

        return result

    output_fp='Thesaurus'
    #  创建输出文件夹
    if not os.path.exists(output_fp):
        os.mkdir(output_fp)

    # 读取导出的词典txt
    with open(mdx_file_path, 'r', encoding='utf-8') as file:
        lines=file.readlines()
        #for line in file:
            # if i%50000==0:
            #     print('pos:',file.tell())
            #yield line


    global i
    i=0
    results=[]
    for line in lines:
        i += 1
        if line.startswith("<link href"):
            unit=parse_thesaurus_unit(line)
            results.append(unit)
            if i % 5000==0:
                print('Loop', i)
                gc.collect()
            if i%10000==0:
                fn='thes.mdx_'+str(i)+'.csv'
                file_path = os.path.join(output_fp,fn)
                data=pd.DataFrame.from_dict(results, orient='columns')
                save_csv(data, file_path)
                results.clear()
                gc.collect()

    if results:
        file_path = os.path.join(output_fp, 'thes.mdx_final.csv')
        data = pd.DataFrame.from_dict(results, orient='columns')
        save_csv(data, file_path)

    # 遍历thesaurus文件夹下所有csv文件,读取并concate为一个df,然后保存
    file_paths = [os.path.join(output_fp, file) for file in os.listdir(output_fp) if file.endswith('.csv')]
    data = pd.concat([pd.read_csv(file_path, encoding='utf-8') for file_path in file_paths], ignore_index=True)
    save_csv(data, 'thesaurus.csv')
    # 删除output_fp及其所有文件
    shutil.rmtree(output_fp)
    return

def save_csv(data, output_file_path='output.csv'):
    # 保存到CSV文件
    data.to_csv(output_file_path, index=False,encoding='utf-8')


def merge_and_save(output_file_path='merged.csv',fp1=None,fp2=None,on='word',how='left'):
    data1 = pd_read_csv(fp1)  # e.g. coca6w: word,pos,rank,freq
    data2 = pd_read_csv(fp2)  # e.g. oxford simple: word,pronunciation,definition
    data=data1.merge(data2, on=on, how=how)
    save_csv(data,output_file_path)




class DictionaryConverterApp:
    def __init__(self, root):
        self.root = root
        self.root.title("Mdict2Csv Converter")

        # 获取屏幕宽度和高度
        screen_width = root.winfo_screenwidth()
        screen_height = root.winfo_screenheight()

        # 设置窗口大小为屏幕分辨率的约1/4宽度和1/3高度
        window_width = screen_width // 4
        window_height = screen_height // 3

        # 计算窗口左上角的位置
        x = (screen_width - window_width) // 2
        y = (screen_height - window_height) // 2

        # 设置窗口大小和位置
        self.root.geometry(f"{window_width}x{window_height}+{x}+{y}")

        # 创建Notebook
        self.notebook = ttk.Notebook(self.root)
        self.notebook.pack(fill=tk.BOTH, expand=True)

        # 创建三个Tab页
        self.create_mdx_tab()
        self.create_parse_tab()
        self.create_merge_tab()

    def create_mdx_tab(self):
        mdx_frame = ttk.Frame(self.notebook)
        self.notebook.add(mdx_frame, text="提取mdict词典")

        # 标题
        mdx_title = tk.Label(mdx_frame, text="提取mdict词典", font=("Helvetica", 14, "bold"))
        mdx_title.pack(anchor=tk.W, padx=20, pady=10)

        # 文件选择
        file_label = tk.Label(mdx_frame, text="选择源词典文件:")
        file_label.pack(anchor=tk.W, padx=20, pady=(0, 5))
        file_frame = tk.Frame(mdx_frame)
        file_frame.pack(anchor=tk.W, padx=20)
        self.file_path = tk.StringVar()
        file_entry = tk.Entry(file_frame, textvariable=self.file_path, width=50)
        file_entry.pack(side=tk.LEFT)
        file_button = tk.Button(file_frame, text="浏览", command=self.select_file)
        file_button.pack(side=tk.LEFT, padx=5)

        # 转换按钮
        convert_button = tk.Button(mdx_frame, text="提取为txt", command=self.convert_to_txt)
        convert_button.pack(anchor=tk.W, padx=20, pady=(10, 0))

        # 提示文本
        note_label = tk.Label(mdx_frame, text="*须安装mdict_utils包（pip install mdict_utils）\n程序会调用cmd终端执行提取任务，提取后的文件可在cache文件夹下找到。", font=("Helvetica", 10), wraplength=400, justify=tk.LEFT)
        note_label.pack(anchor=tk.W, padx=20, pady=(10, 0))

    def create_parse_tab(self):
        parse_frame = ttk.Frame(self.notebook)
        self.notebook.add(parse_frame, text="解析词典")

        # 标题
        parse_title = tk.Label(parse_frame, text="解析词典", font=("Helvetica", 14, "bold"))
        parse_title.pack(anchor=tk.W, padx=20, pady=10)

        # 解析部分
        dict_label = tk.Label(parse_frame, text="选择词典名称:")
        dict_label.pack(anchor=tk.W, padx=20, pady=(0, 5))
        self.dict_name = tk.StringVar()
        dict_dropdown = ttk.Combobox(parse_frame, textvariable=self.dict_name, width=47)
        dict_dropdown['values'] = ("COCA Frequency 60000.mdx", "牛津英汉简明词典.mdx", "简明英汉汉英词典.mdx","词根词缀词典.mdx","thes.mdx")
        dict_dropdown.current(0)
        dict_dropdown.pack(anchor=tk.W, padx=20)
        parse_button = tk.Button(parse_frame, text="解析并保存为csv", command=self.parse_and_save)
        parse_button.pack(anchor=tk.W, padx=20, pady=(10, 0))

        # 提示文本
        note_label = tk.Label(parse_frame, text="*简明英汉汉英词典较大，会出现窗口假死，多等一会即可\n*thes.mdx过大，请耐心等待（实测约10分钟左右）", font=("Helvetica", 10), wraplength=400, justify=tk.LEFT)
        note_label.pack(anchor=tk.W, padx=20, pady=(10, 0))

    def create_merge_tab(self):
        merge_frame = ttk.Frame(self.notebook)
        self.notebook.add(merge_frame, text="合并词典")

        # 标题
        merge_title = tk.Label(merge_frame, text="合并词典", font=("Helvetica", 14, "bold"))
        merge_title.pack(anchor=tk.W, padx=20, pady=10)

        # 合并部分
        csv1_label = tk.Label(merge_frame, text="选择第一个csv文件:")
        csv1_label.pack(anchor=tk.W, padx=20, pady=(0, 5))
        csv1_frame = tk.Frame(merge_frame)
        csv1_frame.pack(anchor=tk.W, padx=20)
        self.csv1_path = tk.StringVar()
        csv1_entry = tk.Entry(csv1_frame, textvariable=self.csv1_path, width=50)
        csv1_entry.pack(side=tk.LEFT)
        csv1_button = tk.Button(csv1_frame, text="浏览", command=lambda: self.select_file(self.csv1_path))
        csv1_button.pack(side=tk.LEFT, padx=5)

        csv2_label = tk.Label(merge_frame, text="选择第二个csv文件:")
        csv2_label.pack(anchor=tk.W, padx=20, pady=(10, 5))
        csv2_frame = tk.Frame(merge_frame)
        csv2_frame.pack(anchor=tk.W, padx=20)
        self.csv2_path = tk.StringVar()
        csv2_entry = tk.Entry(csv2_frame, textvariable=self.csv2_path, width=50)
        csv2_entry.pack(side=tk.LEFT)
        csv2_button = tk.Button(csv2_frame, text="浏览", command=lambda: self.select_file(self.csv2_path))
        csv2_button.pack(side=tk.LEFT, padx=5)

        merge_label = tk.Label(merge_frame, text="选择合并方法:")
        merge_label.pack(anchor=tk.W, padx=20, pady=(10, 5))
        self.merge_method = tk.StringVar(value="left")
        merge_dropdown = ttk.Combobox(merge_frame, textvariable=self.merge_method, width=47)
        merge_dropdown['values'] = ("left", "right", "inner", "outer")
        merge_dropdown.current(0)
        merge_dropdown.pack(anchor=tk.W, padx=20)

        align_label = tk.Label(merge_frame, text="输入对齐列名:")
        align_label.pack(anchor=tk.W, padx=20, pady=(10, 5))
        self.align_column = tk.StringVar(value="word")  # 设置默认值为"word"
        align_entry = tk.Entry(merge_frame, textvariable=self.align_column, width=50)
        align_entry.pack(anchor=tk.W, padx=20)

        merge_button = tk.Button(merge_frame, text="合并并保存为csv", command=self.merge_files)
        merge_button.pack(anchor=tk.W, padx=20, pady=(10, 0))

        # 提示文本
        note_label = tk.Label(merge_frame, text="*参照pandas的merge函数", font=("Helvetica", 10), wraplength=400, justify=tk.LEFT)
        note_label.pack(anchor=tk.W, padx=20, pady=(10, 0))

    def select_file(self, variable=None):
        if variable is None:
            file_selected = filedialog.askopenfilename(filetypes=[("MDX/MDD files", "*.mdx *.mdd")])
            self.file_path.set(file_selected)
        else:
            file_selected = filedialog.askopenfilename(filetypes=[("CSV files", "*.csv")])
            variable.set(file_selected)

    def convert_to_txt(self):
        file_path = self.file_path.get()
        if not file_path:
            messagebox.showwarning("警告", "请选择词典文件")
            return

        if not file_path.endswith('.mdx') and not file_path.endswith('.mdd'):
            messagebox.showwarning("警告", "请选择有效的MDX或MDD文件")
            return

        progress = tk.Toplevel(self.root)
        progress.title("进度")
        progress.geometry("300x100")
        progress_label = tk.Label(progress, text="正在转换文件...", font=("Helvetica", 12))
        progress_label.pack(pady=20)

        txt_file_path = convert_mdict_to_text(file_path)
        progress_label.config(text=f"已将 {os.path.basename(file_path)} 转换为txt")

        progress_label.config(text="文件转换完成")
        confirm_button = tk.Button(progress, text="确定", command=progress.destroy)
        confirm_button.pack(pady=10)

    def parse_and_save(self):
        dict_name = self.dict_name.get()

        txt_file_path = os.path.join('./cache', ''.join([dict_name, '.txt']))
        print(txt_file_path)

        if not os.path.exists(txt_file_path):
            messagebox.showwarning("警告", f"未找到对应的txt文件，请先转换为txt")
            return

        progress = tk.Toplevel(self.root)
        progress.title("进度")
        progress.geometry("300x100")
        progress_label = tk.Label(progress, text="正在解析文件...", font=("Helvetica", 12))
        progress_label.pack(pady=20)

        if dict_name == "COCA Frequency 60000.mdx":
            df = parse_coca6w(txt_file_path)
            save_csv(df, f'{dict_name[:-4]}.csv')
        elif dict_name == "牛津英汉简明词典.mdx":
            df = parse_oxford_simple(txt_file_path)
            save_csv(df, f'{dict_name[:-4]}.csv')
        elif dict_name == "简明英汉汉英词典.mdx":
            df_en, df_cn = parse_jmyh(txt_file_path)
            save_csv(df_en, '简明英汉汉英词典(英汉).csv')
            save_csv(df_cn, '简明英汉汉英词典(汉英).csv')
            progress_label.config(text="解析完成，已保存为csv")
        elif dict_name == "词根词缀词典.mdx":
            df = parse_roots_affixes(txt_file_path)
            save_csv(df, f'{dict_name[:-4]}.csv')
        elif dict_name == "thes.mdx":
            parse_thesaurus(txt_file_path)
        else:
            messagebox.showwarning("警告", "未知的词典名称")
            progress.destroy()
            return


        progress_label.config(text="解析完成，已保存为csv")
        confirm_button = tk.Button(progress, text="确定", command=progress.destroy)
        confirm_button.pack(pady=10)

    def merge_files(self):
        csv1_path = self.csv1_path.get()
        csv2_path = self.csv2_path.get()
        merge_method = self.merge_method.get()
        align_column = self.align_column.get()

        if not csv1_path or not csv2_path or not align_column:
            messagebox.showwarning("警告", "请选择两个csv文件并输入对齐列名")
            return

        if not os.path.exists(csv1_path) or not os.path.exists(csv2_path):
            messagebox.showwarning("警告", "文件路径无效")
            return

        progress = tk.Toplevel(self.root)
        progress.title("进度")
        progress.geometry("300x100")
        progress_label = tk.Label(progress, text="正在合并文件...", font=("Helvetica", 12))
        progress_label.pack(pady=20)

        try:
            merged_data = merge_and_save(fp1=csv1_path, fp2=csv2_path, how=merge_method, on=align_column)
        except Exception as e:
            progress_label.config(text=f"合并失败：{e}")
            confirm_button = tk.Button(progress, text="确定", command=progress.destroy)
            confirm_button.pack(pady=10)
            return
        progress_label.config(text="文件已合并并保存为merged.csv")
        confirm_button = tk.Button(progress, text="确定", command=progress.destroy)
        confirm_button.pack(pady=10)

def run():
    root = tk.Tk()
    app = DictionaryConverterApp(root)
    root.mainloop()

if __name__ == '__main__':
    run()
