import csv
from sentence_transformers import SentenceTransformer
import numpy as np
import jieba
import re
from functools import lru_cache
from concurrent.futures import ThreadPoolExecutor
import time
import tkinter as tk
from tkinter import filedialog, messagebox
from tkinter import ttk
import configparser
import threading
import os
import string


# 缓存嵌入计算结果
@lru_cache(maxsize=128)
def get_embeddings(texts, model):
    """
    获取文本的嵌入向量
    """
    if not texts:
        return np.array([])
    return model.encode(texts)


def clean_text(text, lang='chinese'):
    """
    清理文本，去除特殊字符和多余空格，保留常用标点符号
    """
    if lang == 'chinese':
        pattern = r'[^\u4e00-\u9fa5\w\s。！？，.,;①\'\":()\[\]<>%$#@*&+-/\\]'
    else:
        pattern = r'[^\w\s,.!?;:\'\"()\[\]<>%$#@*&+-/\\]'
    text = re.sub(pattern, '', text)
    text = re.sub(r'\s+', ' ', text).strip()
    return text


def split_into_paragraphs(text):
    """
    将文本分割成段落
    """
    return [para for para in text.split('\n\n') if para.strip()]


def split_into_sentences(text):
    """
    使用jieba将中文段落分割成句子
    """
    text = clean_text(text, lang='chinese')
    if not text:
        return []
    sentences = []
    seg_list = jieba.cut(text)
    temp = []
    for word in seg_list:
        temp.append(word)
        if word in "。！？":
            sentences.append(''.join(temp))
            temp = []
    if temp:
        sentences.append(''.join(temp))
    return [s for s in sentences if s.strip()]


def split_keep_punctuation(text, delimiters=',.!?;:' ):
    escaped = re.escape(delimiters)
    pattern = rf"""
        (["“]?[^{escaped}]+[{escaped}]+["”]?\s*)
        |
        ([^",.!?;:]+$)
    """
    matches = re.findall(pattern, text, re.VERBOSE)
    return [''.join(filter(None, group)) for group in matches]


def split_into_fragments(text):
    """
    使用 split_keep_punctuation 将英文段落分割成片段
    """
    text = clean_text(text, lang='english')
    if not text:
        return []
    fragments = split_keep_punctuation(text)
    merged_fragments = []
    i = 0
    while i < len(fragments):
        if i + 1 < len(fragments) and len(fragments[i + 1].strip()) == 1 and fragments[i + 1].strip() in [',', '!', '?', '.', ';', ':']:
            merged_fragments.append(fragments[i] + fragments[i + 1])
            i += 2
        else:
            merged_fragments.append(fragments[i])
            i += 1
    return merged_fragments


def assign_fragments(chinese_sentences, english_fragments, model):
    if not english_fragments:
        return ["" for _ in chinese_sentences]

    chinese_embeddings = get_embeddings(tuple(chinese_sentences), model)
    num_chinese_sentences = len(chinese_sentences)
    num_english_fragments = len(english_fragments)

    # 预先计算所有可能的片段组合的嵌入向量
    fragment_embeddings = {}
    for start in range(num_english_fragments):
        for end in range(start + 1, num_english_fragments + 1):
            combination = ' '.join(english_fragments[start:end])
            if combination.strip():
                fragment_embeddings[(start, end)] = get_embeddings((combination,), model)[0]

    # 初始化分配结果
    assignment = ["" for _ in range(num_chinese_sentences)]
    used = [False] * num_english_fragments

    # 动态规划表，dp[i][j] 表示前 i 个中文句子和前 j 个英文片段的最大相似度
    dp = np.full((num_chinese_sentences + 1, num_english_fragments + 1), -np.inf)
    dp[0, 0] = 0

    # 记录路径，用于回溯找到分配方案
    path = np.empty((num_chinese_sentences + 1, num_english_fragments + 1), dtype=object)

    for i in range(1, num_chinese_sentences + 1):
        for j in range(1, num_english_fragments + 1):
            best_score = -np.inf
            best_k = 0
            for k in range(j + 1):
                if (k, j) in fragment_embeddings:
                    comb_embedding = fragment_embeddings[(k, j)]
                    similarity = np.dot(comb_embedding, chinese_embeddings[i - 1])
                    score = dp[i - 1, k] + similarity
                    if score > best_score:
                        best_score = score
                        best_k = k
            dp[i, j] = best_score
            path[i, j] = (i - 1, best_k)

    # 回溯找到分配方案
    i = num_chinese_sentences
    j = num_english_fragments
    while i > 0 and j > 0:
        prev_i, prev_j = path[i, j]
        assignment[i - 1] = ' '.join(english_fragments[prev_j:j])
        for idx in range(prev_j, j):
            used[idx] = True
        i = prev_i
        j = prev_j

    # 记录每个英文片段分配到的中文句子索引
    fragment_assignment = [-1] * num_english_fragments
    for sent_idx, en_sent in enumerate(assignment):
        en_frags = en_sent.split()
        for frag in en_frags:
            for frag_idx, full_frag in enumerate(english_fragments):
                if frag in full_frag:
                    fragment_assignment[frag_idx] = sent_idx
                    break

    # 如果仍有未使用的片段，尝试强制分配
    if not all(used):
        for i in range(num_english_fragments):
            if not used[i]:
                # 检查上一个片段结尾是否为逗号
                if i > 0 and used[i - 1] and english_fragments[i - 1].endswith(','):
                    prev_sentence_index = fragment_assignment[i - 1]
                    new_combination = ' '.join([assignment[prev_sentence_index], english_fragments[i]])
                    comb_embedding = get_embeddings((new_combination,), model)[0]
                    similarity = np.dot(comb_embedding, chinese_embeddings[prev_sentence_index])
                    # 考虑组合长度和句子长度比例
                    len_ratio = len(new_combination.split()) / len(chinese_sentences[prev_sentence_index].split())
                    score = similarity * (1 - 0.1 * abs(len_ratio - 1))
                    assignment[prev_sentence_index] = new_combination
                    used[i] = True
                else:
                    best_score = -np.inf
                    best_sentence_index = 0
                    for j in range(num_chinese_sentences):
                        new_combination = ' '.join([assignment[j], english_fragments[i]])
                        comb_embedding = get_embeddings((new_combination,), model)[0]
                        similarity = np.dot(comb_embedding, chinese_embeddings[j])
                        # 考虑组合长度和句子长度比例
                        len_ratio = len(new_combination.split()) / len(chinese_sentences[j].split())
                        score = similarity * (1 - 0.1 * abs(len_ratio - 1))
                        if score > best_score:
                            best_score = score
                            best_sentence_index = j
                    assignment[best_sentence_index] = ' '.join([assignment[best_sentence_index], english_fragments[i]])
                    used[i] = True

    return assignment


def process_paragraph(en_para, ch_para, model):
    en_fragments = split_into_fragments(en_para)
    ch_sentences = split_into_sentences(ch_para)
    ch_sentences = [s for s in ch_sentences if s]

    assigned_en_sentences = assign_fragments(ch_sentences, en_fragments, model)
    return [(en_sentence, ch_sentence) for en_sentence, ch_sentence in zip(assigned_en_sentences, ch_sentences)]


def load_model():
    """
    加载预训练的模型，优先从 config.ini 读取模型路径
    """
    config = configparser.ConfigParser()
    try:
        config.read('config.ini')
        model_path = config.get('Model', 'model_path')
        if os.path.exists(model_path):
            return SentenceTransformer(model_path)
    except (configparser.NoSectionError, configparser.NoOptionError, FileNotFoundError):
        pass

    # 检查同文件夹下的模型
    model_names = ['paraphrase-multilingual-mpnet-base-v2','all-MiniLM-L6-v2']
    current_dir = os.getcwd()
    for model_name in model_names:
        model_path = os.path.join(current_dir, model_name)
        if os.path.exists(model_path):
            return SentenceTransformer(model_path)

    # 检查各磁盘盘符下的 models 文件夹
    for drive in string.ascii_uppercase:
        drive_path = f"{drive}:\\"
        if os.path.exists(drive_path):
            for model_name in model_names:
                model_path = os.path.join(drive_path, 'models', model_name)
                if os.path.exists(model_path):
                    return SentenceTransformer(model_path)

    messagebox.showerror("错误", "未找到预训练模型，请在config.ini中设置正确的模型路径。")
    return None


def enhanced_alignment(english_text, chinese_text, model, use_thread_pool=True):
    """
    对英文和中文文本进行段落对齐，并记录对齐时间
    """
    start_time = time.time()
    english_paragraphs = split_into_paragraphs(english_text)
    chinese_paragraphs = split_into_paragraphs(chinese_text)

    if use_thread_pool:
        with ThreadPoolExecutor() as executor:
            results = list(executor.map(process_paragraph, english_paragraphs, chinese_paragraphs, [model] * len(english_paragraphs)), total=len(english_paragraphs), desc="Processing paragraphs")
    else:
        results = []
        for en_para, ch_para in zip(english_paragraphs, chinese_paragraphs):
            result = process_paragraph(en_para, ch_para, model)
            results.append(result)

    aligned_pairs = [pair for sublist in results for pair in sublist]
    end_time = time.time()
    elapsed_time = end_time - start_time
    return aligned_pairs, elapsed_time


def save_to_csv(aligned_pairs, file_path):
    """
    将对齐结果保存为 CSV 文件
    """
    with open(file_path, 'w', newline='', encoding='utf-8') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(['English', 'Chinese'])
        for pair in aligned_pairs:
            writer.writerow(pair)


def select_english_file():
    file_path = filedialog.askopenfilename(filetypes=[("Text files", "*.txt")])
    if file_path:
        english_file_entry.delete(0, tk.END)
        english_file_entry.insert(0, file_path)


def select_chinese_file():
    file_path = filedialog.askopenfilename(filetypes=[("Text files", "*.txt")])
    if file_path:
        chinese_file_entry.delete(0, tk.END)
        chinese_file_entry.insert(0, file_path)


def select_save_path():
    file_path = filedialog.asksaveasfilename(defaultextension=".csv", filetypes=[("CSV files", "*.csv")])
    if file_path:
        save_file_entry.delete(0, tk.END)
        save_file_entry.insert(0, file_path)


def perform_alignment():
    english_file = english_file_entry.get()
    chinese_file = chinese_file_entry.get()
    save_file = save_file_entry.get()
    use_thread_pool = thread_pool_var.get()

    if not english_file or not chinese_file:
        messagebox.showerror("Error", "请选择英文和中文文本文件。")
        return
    if not save_file:
        messagebox.showerror("Error", "请选择保存文件的路径和文件名。")
        return

    def alignment_task():
        try:
            with open(english_file, 'r', encoding='utf-8') as f:
                english_text = f.read()
            with open(chinese_file, 'r', encoding='utf-8') as f:
                chinese_text = f.read()

            model = load_model()
            if model is None:
                return
            progress_bar.start()
            aligned_pairs, elapsed_time = enhanced_alignment(english_text, chinese_text, model, use_thread_pool)
            progress_bar.stop()

            save_to_csv(aligned_pairs, save_file)

            messagebox.showinfo("完成", f"已完成对齐并保存，用时: {elapsed_time:.4f} 秒")
        except Exception as e:
            messagebox.showerror("Error", f"发生错误: {str(e)}")

    # 使用线程执行耗时任务
    task_thread = threading.Thread(target=alignment_task)
    task_thread.start()


# 创建主窗口
root = tk.Tk()
root.title("英中文本对齐工具")

# 获取屏幕尺寸
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()

# 设置窗口大小为屏幕的 1/4
window_width = screen_width // 4
window_height = screen_height // 3
root.geometry(f"{window_width}x{window_height}")

# 创建一个框架用于布局
frame = tk.Frame(root)
frame.place(relx=0.5, rely=0.5, anchor=tk.CENTER)

# 英文文件选择框
english_file_label = tk.Label(frame, text="选择英文文本文件:")
english_file_label.grid(row=0, column=0, pady=5, sticky='w')
english_file_entry = tk.Entry(frame, width=50)
english_file_entry.grid(row=0, column=1, pady=5, padx=5)
english_file_button = tk.Button(frame, text="选择文件", command=select_english_file)
english_file_button.grid(row=0, column=2, pady=5)

# 中文文件选择框
chinese_file_label = tk.Label(frame, text="选择中文文本文件:")
chinese_file_label.grid(row=1, column=0, pady=5, sticky='w')
chinese_file_entry = tk.Entry(frame, width=50)
chinese_file_entry.grid(row=1, column=1, pady=5, padx=5)
chinese_file_button = tk.Button(frame, text="选择文件", command=select_chinese_file)
chinese_file_button.grid(row=1, column=2, pady=5)

# 保存文件路径选择框
save_file_label = tk.Label(frame, text="选择保存文件的路径和文件名:")
save_file_label.grid(row=2, column=0, pady=5, sticky='w')
save_file_entry = tk.Entry(frame, width=50)
save_file_entry.grid(row=2, column=1, pady=5, padx=5)
save_file_button = tk.Button(frame, text="选择路径", command=select_save_path)
save_file_button.grid(row=2, column=2, pady=5)

# 是否使用线程池的复选框
thread_pool_var = tk.BooleanVar()
thread_pool_var.set(True)
thread_pool_checkbox = tk.Checkbutton(frame, text="启用线程池加速", variable=thread_pool_var)
thread_pool_checkbox.grid(row=3, column=0, columnspan=3, pady=10, sticky='w')

# 对齐按钮
align_button = tk.Button(frame, text="对齐英中文本", command=perform_alignment)
align_button.grid(row=4, column=0, columnspan=3, pady=20, sticky='w')

# 进度条
progress_bar = ttk.Progressbar(frame, mode='indeterminate')
progress_bar.grid(row=5, column=0, columnspan=3, pady=10, sticky='we')

# 提示文本
tip_label = tk.Label(frame, text="*本工具基于语义相似度对齐中英文本，请先在config.ini中设置预训练模型的路径")
tip_label.grid(row=6, column=0, columnspan=3,pady=10,sticky='w')

# 运行主循环
root.mainloop()