import argparse
import os
import random
import numpy as np
from typing import List, Tuple
from tokenizers import Tokenizer, decoders, normalizers
from tokenizers.models import BPE
from tokenizers.trainers import BpeTrainer
from tokenizers.pre_tokenizers import Metaspace
import datasets
import torch
from tqdm import tqdm


def filter_long_sentences(
    en_lines: List[str], zh_lines: List[str], max_length: int = 128
) -> List[Tuple[str, str]]:
    """过滤超长句子"""
    return [
        (en, zh)
        for en, zh in zip(en_lines, zh_lines)
        if len(en.strip()) <= max_length and len(zh.strip()) <= max_length
    ]


def filter_length_ratio(
    en_lines: List[str], zh_lines: List[str], ratio_threshold: float = 2.5
) -> List[Tuple[str, str]]:
    """过滤长度比例失衡的句子对"""
    filtered_pairs = []
    for en, zh in zip(en_lines, zh_lines):
        len_en, len_zh = len(en.strip()), len(zh.strip())
        if len_en > 0 and len_zh > 0:
            ratio = len_en / len_zh
            if 1 / ratio_threshold < ratio < ratio_threshold:
                filtered_pairs.append((en, zh))
    return filtered_pairs


def filter_language_purity(
    en_lines: List[str], zh_lines: List[str], min_ratio: float = 0.7
) -> List[Tuple[str, str]]:
    """过滤语言混杂的句子对，确保中英占比悬殊"""
    filtered_pairs = []
    for en, zh in zip(en_lines, zh_lines):
        en_str = en.strip()
        zh_str = zh.strip()
        if not en_str or not zh_str:
            continue
        
        # 统计中英字符数量
        en_en = sum(1 for c in en_str if c.isascii() and c.isalpha())
        en_zh = sum(1 for c in en_str if '\u4e00' <= c <= '\u9fff')
        zh_en = sum(1 for c in zh_str if c.isascii() and c.isalpha())
        zh_zh = sum(1 for c in zh_str if '\u4e00' <= c <= '\u9fff')
        
        # 计算占比：英文行应以英文为主，中文行应以中文为主
        en_total = en_en + en_zh
        zh_total = zh_en + zh_zh
        
        if en_total > 0 and zh_total > 0:
            en_ratio = en_en / en_total
            zh_ratio = zh_zh / zh_total
            if en_ratio >= min_ratio and zh_ratio >= min_ratio:
                filtered_pairs.append((en, zh))
    return filtered_pairs


def filter_by_semantic_similarity(
    en_lines: List[str], zh_lines: List[str], threshold: float = 0.8
) -> List[Tuple[str, str]]:
    from transformers import AutoTokenizer, AutoModel
    import torch

    # 使用Google LaBSE模型
    model_name = "sentence-transformers/LaBSE"
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModel.from_pretrained(model_name)

    # 将模型设置为评估模式
    model.eval()

    # 如果有GPU可用，使用GPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)

    def get_embeddings(sentences):
        """获取句子嵌入向量"""
        # Tokenize输入
        inputs = tokenizer(
            sentences, padding=True, truncation=True, return_tensors="pt"
        )
        inputs = {k: v.to(device) for k, v in inputs.items()}

        # 获取模型输出
        with torch.no_grad():
            outputs = model(**inputs)

        # 使用[CLS] token获取句子嵌入
        embeddings = outputs.last_hidden_state[:, 0]
        return embeddings.cpu().numpy()

    filtered_pairs = []
    batch_size = 32  # 减小batch_size以适应6GB显存限制

    for i in tqdm(range(0, len(en_lines), batch_size)):
        en_batch = en_lines[i : i + batch_size]
        zh_batch = zh_lines[i : i + batch_size]

        try:
            en_embeddings = get_embeddings(en_batch)
            zh_embeddings = get_embeddings(zh_batch)

            for en_emb, zh_emb, en, zh in zip(
                en_embeddings, zh_embeddings, en_batch, zh_batch
            ):
                en_tensor = torch.tensor(en_emb).unsqueeze(0)
                zh_tensor = torch.tensor(zh_emb).unsqueeze(0)
                similarity = torch.cosine_similarity(en_tensor, zh_tensor).item()

                if similarity >= threshold:
                    filtered_pairs.append((en, zh))
        except Exception as e:
            print(f"处理批次时出错: {e}，保留原始数据")
            filtered_pairs.extend(zip(en_batch, zh_batch))

    return filtered_pairs


def clean_data(
    en_lines: List[str],
    zh_lines: List[str],
    max_length: int = 128,
    ratio_threshold: float = 2.5,
    similarity_threshold: float = 0.8,
    purity_ratio: float = 0.7,
) -> Tuple[List[str], List[str]]:
    """完整的数据清洗流程"""
    # 过滤超长句子
    filtered_pairs = filter_long_sentences(en_lines, zh_lines, max_length)

    # 过滤长度比例失衡的句子对
    filtered_pairs = filter_length_ratio(
        [pair[0] for pair in filtered_pairs],
        [pair[1] for pair in filtered_pairs],
        ratio_threshold,
    )

    # 过滤语言混杂的句子对
    filtered_pairs = filter_language_purity(
        [pair[0] for pair in filtered_pairs],
        [pair[1] for pair in filtered_pairs],
        purity_ratio,
    )

    # 使用LaBSE进行语义相似度过滤
    filtered_pairs = filter_by_semantic_similarity(
        [pair[0] for pair in filtered_pairs],
        [pair[1] for pair in filtered_pairs],
        similarity_threshold,
    )

    return [pair[0] for pair in filtered_pairs], [pair[1] for pair in filtered_pairs]


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input_en", default="data/raw/OpenSubtitles.en-zh.en", type=str
    )
    parser.add_argument(
        "--input_zh", default="data/raw/OpenSubtitles.en-zh.zh", type=str
    )
    parser.add_argument("--output_dir", default="data/processed", type=str)
    parser.add_argument(
        "--clean_dir", default="data/clean", type=str, help="清洗后数据保存目录"
    )
    parser.add_argument("--vocab_size", default=32768, type=int)
    parser.add_argument("--train_ratio", default=0.8, type=float, help="训练集比例")
    parser.add_argument("--seed", default=42, type=int, help="随机种子")
    parser.add_argument("--max_length", default=128, type=int, help="最大句子长度")
    parser.add_argument(
        "--ratio_threshold", default=2.5, type=float, help="长度比例阈值"
    )
    parser.add_argument(
        "--similarity_threshold", default=0.8, type=float, help="语义相似度阈值"
    )
    parser.add_argument(
        "--purity_ratio", default=0.7, type=float, help="语言纯度阈值（防止翻译混入）"
    )
    parser.add_argument("--skip_cleaning", action="store_true", help="跳过数据清洗步骤")
    return parser.parse_args()


def split_dataset(input_en, input_zh, output_dir, train_ratio=0.8, seed=42):
    """将数据集按照指定比例分为训练集和验证集"""
    print("开始划分数据集...")

    # 设置随机种子
    random.seed(seed)

    # 读取原始数据
    with open(input_en, "r", encoding="utf-8") as f_en:
        en_lines = f_en.readlines()

    with open(input_zh, "r", encoding="utf-8") as f_zh:
        zh_lines = f_zh.readlines()

    # 确保两个文件行数相同
    assert len(en_lines) == len(
        zh_lines
    ), f"英文和中文文件行数不匹配: {len(en_lines)} vs {len(zh_lines)}"

    total_samples = len(en_lines)
    train_size = int(total_samples * train_ratio)
    val_size = total_samples - train_size

    print(f"数据集总样本数: {total_samples:,}")
    print(f"训练集大小: {train_size:,} ({train_ratio*100:.1f}%)")
    print(f"验证集大小: {val_size:,} ({(1-train_ratio)*100:.1f}%)")

    # 生成随机索引并打乱
    indices = list(range(total_samples))
    random.shuffle(indices)

    # 划分索引
    train_indices = indices[:train_size]
    val_indices = indices[train_size:]
    val_indices = val_indices[:16384]

    # 创建输出目录
    train_dir = os.path.join(output_dir, "train")
    val_dir = os.path.join(output_dir, "val")
    os.makedirs(train_dir, exist_ok=True)
    os.makedirs(val_dir, exist_ok=True)

    # 保存训练集
    print("正在保存训练集...")
    train_en_file = os.path.join(train_dir, "train.en")
    train_zh_file = os.path.join(train_dir, "train.zh")

    with open(train_en_file, "w", encoding="utf-8") as f_train_en:
        for idx in train_indices:
            f_train_en.write(en_lines[idx])

    with open(train_zh_file, "w", encoding="utf-8") as f_train_zh:
        for idx in train_indices:
            f_train_zh.write(zh_lines[idx])

    # 保存验证集
    print("正在保存验证集...")
    val_en_file = os.path.join(val_dir, "val.en")
    val_zh_file = os.path.join(val_dir, "val.zh")

    with open(val_en_file, "w", encoding="utf-8") as f_val_en:
        for idx in val_indices:
            f_val_en.write(en_lines[idx])

    with open(val_zh_file, "w", encoding="utf-8") as f_val_zh:
        for idx in val_indices:
            f_val_zh.write(zh_lines[idx])

    print("数据集划分完成！")
    print(f"训练集保存至: {train_dir}")
    print(f"验证集保存至: {val_dir}")

    return {
        "total_samples": total_samples,
        "train_size": train_size,
        "val_size": val_size,
        "train_files": {"en": train_en_file, "zh": train_zh_file},
        "val_files": {"en": val_en_file, "zh": val_zh_file},
    }


def get_tokenize_function(tokenizer, max_length, bos_id, eos_id):
    """
    创建一个闭包函数，以便 map 可以访问外部变量
    """

    def _tokenize_function(examples):
        src_encodings = tokenizer.encode_batch(examples["src"])
        tgt_encodings = tokenizer.encode_batch(examples["tgt"])

        src_ids_list = []
        tgt_ids_list = []

        for src_encoding, tgt_encoding in zip(src_encodings, tgt_encodings):
            if not src_encoding.ids or not tgt_encoding.ids:
                continue  # 跳过空行

            src_ids = src_encoding.ids[: max_length - 2]
            tgt_ids = tgt_encoding.ids[: max_length - 2]

            src_ids_list.append([bos_id] + src_ids + [eos_id])
            tgt_ids_list.append([bos_id] + tgt_ids + [eos_id])

        return {"src_ids": src_ids_list, "tgt_ids": tgt_ids_list}

    return _tokenize_function


def tokenize_and_save(
    split_name,
    src_path,
    tgt_path,
    tokenizer,
    max_length,
    bos_id,
    eos_id,
    num_proc,
    output_dir,
):
    print(f"正在处理 {split_name} 数据...")
    with open(src_path, "r", encoding="utf-8") as f:
        src_lines = [line.strip() for line in f]
    with open(tgt_path, "r", encoding="utf-8") as f:
        tgt_lines = [line.strip() for line in f]

    raw_dataset = datasets.Dataset.from_dict({"src": src_lines, "tgt": tgt_lines})

    tokenize_fn = get_tokenize_function(tokenizer, max_length, bos_id, eos_id)

    tokenized_dataset = raw_dataset.map(
        tokenize_fn,
        batched=True,
        num_proc=num_proc,
        remove_columns=raw_dataset.column_names,  # 移除 "src", "tgt" 节省空间
        desc=f"Tokenizing {split_name} dataset",
    )

    # 设置为 torch 格式，这样加载时就是 Tensor
    tokenized_dataset.set_format(type="torch", columns=["src_ids", "tgt_ids"])

    # ！！！关键步骤：保存到磁盘！！！
    save_path = os.path.join(output_dir, split_name)
    tokenized_dataset.save_to_disk(save_path)
    print(f"已将 {split_name} 数据集保存到 {save_path}")


if __name__ == "__main__":
    args = parse_args()

    os.makedirs(args.output_dir, exist_ok=True)
    # 数据清洗步骤
    if not args.skip_cleaning:
        print("开始数据清洗...")
        os.makedirs(args.clean_dir, exist_ok=True)

        # 读取原始数据
        with open(args.input_en, "r", encoding="utf-8") as f:
            en_lines = f.readlines()
        with open(args.input_zh, "r", encoding="utf-8") as f:
            zh_lines = f.readlines()

        print(f"原始数据量: {len(en_lines)}")

        # 执行数据清洗
        clean_en_lines, clean_zh_lines = clean_data(
            en_lines,
            zh_lines,
            max_length=args.max_length,
            ratio_threshold=args.ratio_threshold,
            similarity_threshold=args.similarity_threshold,
            purity_ratio=args.purity_ratio,
        )

        print(f"清洗后数据量: {len(clean_en_lines)}")

        # 保存清洗后的数据
        clean_en_path = os.path.join(args.clean_dir, "OpenSubtitles.en-zh.en")
        clean_zh_path = os.path.join(args.clean_dir, "OpenSubtitles.en-zh.zh")

        with open(clean_en_path, "w", encoding="utf-8") as f:
            f.writelines(clean_en_lines)
        with open(clean_zh_path, "w", encoding="utf-8") as f:
            f.writelines(clean_zh_lines)

        print(f"清洗后数据已保存至: {args.clean_dir}")

        # 使用清洗后的数据进行后续处理
        input_en = clean_en_path
        input_zh = clean_zh_path
    else:
        clean_en_path = os.path.join(args.clean_dir, "OpenSubtitles.en-zh.en")
        clean_zh_path = os.path.join(args.clean_dir, "OpenSubtitles.en-zh.zh")
        input_en = clean_en_path
        input_zh = clean_zh_path

    # 第一步：训练tokenizer
    print("第一步：训练tokenizer...")
    tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
    tokenizer.pre_tokenizer = Metaspace()  # pyright: ignore[reportAttributeAccessIssue]
    trainer = BpeTrainer(
        vocab_size=args.vocab_size,
        special_tokens=[
            "[UNK]",
            "[PAD]",
            "[BOS]",
            "[EOS]",
        ],
    )

    tokenizer.train(files=[input_en, input_zh], trainer=trainer)
    tokenizer.normalizer = normalizers.NFKC()
    tokenizer.decoder = decoders.Metaspace()
    tokenizer.save(os.path.join(args.output_dir, "tokenizer.json"))
    print(f"Saving tokenizer to: {os.path.join(args.output_dir, 'tokenizer.json')}")

    # 第二步：划分数据集
    print("\n第二步：划分数据集...")
    split_info = split_dataset(
        input_en,
        input_zh,
        args.output_dir,
        train_ratio=args.train_ratio,
        seed=args.seed,
    )
    print(f"数据集划分信息: {split_info}")

    # 第三步：tokenization并保存
    print("\n第三步：tokenization并保存...")

    # 加载训练好的tokenizer
    tokenizer = Tokenizer.from_file(os.path.join(args.output_dir, "tokenizer.json"))
    max_length = 128
    num_proc = os.cpu_count()

    # 创建tokenized数据目录
    tokenized_dir = os.path.join(args.output_dir, "tokenized")
    os.makedirs(tokenized_dir, exist_ok=True)

    bos_id = tokenizer.token_to_id("[BOS]")
    eos_id = tokenizer.token_to_id("[EOS]")

    # 处理训练集
    tokenize_and_save(
        "train",
        split_info["train_files"]["zh"],
        split_info["train_files"]["en"],
        tokenizer,
        max_length,
        bos_id,
        eos_id,
        num_proc,
        tokenized_dir,
    )

    # 处理验证集
    tokenize_and_save(
        "val",
        split_info["val_files"]["zh"],
        split_info["val_files"]["en"],
        tokenizer,
        max_length,
        bos_id,
        eos_id,
        num_proc,
        tokenized_dir,
    )

    print("所有数据预处理完毕并已保存到磁盘！")
