# pip install -U tqdm mmh3
import sys
import json
import os
from tqdm import tqdm
import psutil  # 内存监控
import math
import re
import time
from collections import defaultdict
from pathlib import Path
from typing import List
from tqdm import tqdm
import mmh3  # 高效哈希库
import numpy as np  # 高效数值计算
from src.dedupunion import UnionFind

# 定义如何拆分文本
def split(source: str):
    return list(source)

# 定义如何连接 n-gram
def join(source: List[str]):
    return "".join(source)

class Document(object):
    """
    优化的文档对象，减少内存占用
    """
    def __init__(self, id: int, content: str, ngram_n=3, band_size=2, signature_len=100):
        self.id = id
        self._content = content
        self.band_size = band_size
        self.ngram_n = ngram_n
        self.signature_len = signature_len  # MinHash签名长度
        self._ngrams = None
        self._min_hash = None
        self._lsh_bands = None

    @property
    def content(self):
        return self._content

    @property
    def ngrams(self):
        """
        获取文档的 n 元词袋表示（延迟计算）
        """
        if self._ngrams is None:
            self._ngrams = self.split_ngrams()
        return self._ngrams

    @property
    def min_hash(self):
        """
        获取文档的 MinHash 表示（延迟计算）
        """
        if self._min_hash is None:
            self._min_hash = self.calc_min_hash()
        return self._min_hash

    @property
    def lsh_bands(self):
        """
        获取文档的 LSH 带（延迟计算）
        """
        if self._lsh_bands is None:
            self._lsh_bands = self.prepare_lsh_bands()
        return self._lsh_bands

    def prepare_lsh_bands(self):
        """
        准备 LSH 带
        """
        min_hash = self.min_hash
        band_count = len(min_hash) // self.band_size
        lsh_bands = []
        for i in range(band_count):
            band = min_hash[i*self.band_size:(i+1)*self.band_size]
            lsh_bands.append("-".join(map(str, band)))
        return lsh_bands

    def calc_similarity(self, rhs):
        """
        计算两个文档之间的 jaccard 相似度（优化版）
        """
        # 使用 MinHash 近似计算 Jaccard 相似度
        set1 = set(self.min_hash)
        set2 = set(rhs.min_hash)
        intersection = len(set1 & set2)
        return intersection / self.signature_len

    def split_ngrams(self) -> List[str]:
        """
        拆分 n 元词袋（优化内存使用）
        """
        # 仅保留字母数字字符
        no_symbols = re.sub(r'[^\w\s]', '', self._content)
        parts = split(no_symbols)
        ngrams = []
        n = self.ngram_n
        
        # 使用生成器表达式减少内存占用
        for i in range(len(parts) - (n - 1)):
            ngrams.append(join(parts[i:i+n]))
        
        if len(ngrams) == 0:
            raise Exception(
                f"[ERROR] 无法为数据[{self.id}] 生成 {n}-grams")
        return ngrams

    def calc_min_hash(self) -> List[int]:
        """
        优化后的 MinHash 计算
        使用 murmurhash3 替代排列组合方法
        """
        signature = [np.iinfo(np.uint32).max] * self.signature_len
        
        for ngram in self.ngrams:
            for i in range(self.signature_len):
                # 使用不同种子生成独立哈希
                h = mmh3.hash(ngram, seed=i, signed=False)
                if h < signature[i]:
                    signature[i] = h
        return signature


class LSH(object):
    """
    优化的 LSH 实现
    """
    def __init__(self):
        self._buckets = None

    def build(self, docs: List[Document]):
        """
        构建 LSH 桶（优化内存和性能）
        """
        if self._buckets is not None:
            return self._buckets
        
        # 使用字典记录每个band对应的文档ID
        band_to_docs = defaultdict(set)
        
        # 并行处理文档的bands
        for doc in tqdm(docs, "[INFO] ✂ 处理文档 bands"):
            for band in doc.lsh_bands:
                band_to_docs[band].add(doc.id)
        
        # 过滤出有多个文档的band
        candidate_buckets = [list(doc_ids) for doc_ids in band_to_docs.values() if len(doc_ids) > 1]
        
        # 去重桶（避免重复比较）
        seen_buckets = set()
        unique_buckets = []
        
        for bucket in candidate_buckets:
            bucket_key = tuple(sorted(bucket))
            if bucket_key not in seen_buckets:
                seen_buckets.add(bucket_key)
                unique_buckets.append(bucket)
        
        self._buckets = unique_buckets
        return self._buckets


class Deduper(object):
    """
    优化的去重入口
    """
    def __init__(self, docs: List[str], sim_threshold=0.8, ngram_n=3, band_size=3, signature_len=100):
        self.sim_threshold = sim_threshold
        self.ngram_n = ngram_n
        self.band_size = band_size
        self.signature_len = signature_len
        
        # 检查参数有效性
        if band_size > signature_len:
            raise ValueError(f"band_size ({band_size}) 不能大于 signature_len ({signature_len})")
        
        # 分批处理文档避免内存爆炸
        batch_size = 3000  # 每批处理1000个文档
        self.docs = []
        
        print(f"[INFO] 分批处理文档 (每批 {batch_size} 个)")
        for i in tqdm(range(0, len(docs), batch_size), desc="[INFO] 🚀 处理文档"):
            batch_docs = docs[i:i+batch_size]
            batch_objects = [
                Document(idx + i, doc, self.ngram_n, self.band_size, self.signature_len)
                for idx, doc in enumerate(batch_docs)
            ]
            self.docs.extend(batch_objects)
        
        self.lsh = LSH()
        self.duplicated = set()  # 使用集合更高效
        self.duplicate_pairs = []  # 新增：记录相似文档对
        self._result = None

    def execute(self):
        if self._result is not None:
            return self._result
        
        print("[INFO] 构建 LSH 桶...")
        buckets = self.lsh.build(self.docs)
        print(f"[INFO] 待处理 buckets 数量：{len(buckets)}")
        
        # 处理每个桶
        for bucket in tqdm(buckets, desc="[INFO] 🔍 处理桶"):
            self._process_bucket(bucket)

        # 收集非重复文档
        self._result = [doc for doc in self.docs if doc.id not in self.duplicated]
        return self._result

    def _process_bucket(self, bucket):
        """处理单个桶中的文档，使用并查集分组并记录所有相似对"""
        bucket_docs = [(doc_id, self.docs[doc_id]) for doc_id in bucket]
        bucket_docs.sort(key=lambda x: len(x[1].content))  # 按长度排序（可选优化）

        # 初始化并查集
        uf = UnionFind()
        for doc_id in bucket:
            uf.add(doc_id)

        # 记录所有相似对（包括中间结果）
        candidate_pairs = []

        # 第一阶段：两两比较，记录所有相似对
        for i, (i_id, i_doc) in enumerate(bucket_docs):
            for j in range(i + 1, len(bucket_docs)):
                j_id, j_doc = bucket_docs[j]
                sim = i_doc.calc_similarity(j_doc)
                if sim >= self.sim_threshold:
                    candidate_pairs.append((i_id, j_id, sim))
                    uf.union(i_id, j_id)  # 合并相似文档

        # 第二阶段：生成最终分组和去重记录
        groups = defaultdict(list)
        for doc_id in bucket:
            root = uf.find(doc_id)
            groups[root].append(doc_id)

        # 记录所有相似对（排除完全相同的文档）
        for i_id, j_id, sim in candidate_pairs:
            if i_id == j_id:
                continue  # 跳过完全相同的文档对（如果不需要）
            
            # 确保不重复记录（可选）
            if not self._is_pair_recorded(i_id, j_id):
                self.duplicate_pairs.append({
                    "doc1_id": i_id,
                    "doc2_id": j_id,
                    "similarity": sim,
                    "doc1_content": self.docs[i_id].content,
                    "doc2_content": self.docs[j_id].content
                })

        # 标记重复文档（每组保留一个代表）
        for group in groups.values():
            if len(group) > 1:
                representative = group[0]
                for doc_id in group[1:]:
                    self.duplicated.add(doc_id)

    def _is_pair_recorded(self, id1, id2):
        """检查是否已记录过该文档对（避免重复）"""
        for pair in self.duplicate_pairs:
            if (pair["doc1_id"] == id1 and pair["doc2_id"] == id2) or \
            (pair["doc1_id"] == id2 and pair["doc2_id"] == id1):
                return True
        return False


def filter_len(x: str, min_length=50):
    """过滤无效文本并返回原因"""
    stripped = x.strip()
    
    # 检查长度
    if len(stripped) < min_length:
        return False, f"长度不足 ({len(stripped)} < {min_length})"
    
    # 检查是否有足够的不同字符
    uniq_chars = len(set(stripped))
    if uniq_chars <= 10:
        return False, f"字符多样性不足 ({uniq_chars} ≤ 10)"
    
    return True, ""

if __name__ == '__main__':

    to_dedupe = r"D:\haobingwen\remote-code\huawei_tools\output\02-pretrain\cleaned_data_dedup.jsonl"          # 待去重的文件路径
    deduped_file = r"D:\haobingwen\remote-code\huawei_tools\output\02-pretrain\cleaned_data_dedup_1.jsonl"      # 去重后的输出文件路径

    sim_threshold = 0.5            # 相似度阈值
    ngram_n = 5                      # n-gram 大小
    bands_size = 6                   # LSH bands 数量
    signature_len = 100              # MinHash 签名长度

    print(f"[INFO] 开始 LSH 去重，sim_threshold={sim_threshold} ngram_n={ngram_n} bands_size={bands_size}")

    started = time.time()
    process = psutil.Process(os.getpid())
    
    # 读取 JSONL 文件（流式处理避免内存爆炸）
    print("[INFO] 读取输入文件...")
    raw_entries = []
    texts = []
    
    with open(to_dedupe, 'r', encoding='utf-8') as f:
        for line in tqdm(f, desc="[INFO] 📖 读取文件"):
            if not line.strip():
                continue
            try:
                entry = json.loads(line)
                if 'text' in entry:
                    raw_entries.append(entry)
                    texts.append(entry['text'])
            except json.JSONDecodeError:
                print(f"[WARN] 无效JSON行: {line[:100]}...")
    
    print(f"[INFO] 内存使用: {process.memory_info().rss/1024/1024:.2f} MB")
    
    # 过滤文本
    print("[INFO] 过滤无效文本...")
    min_length = ngram_n * 5
    filtered_texts = []
    filtered_out = []  # 存储被过滤的文本及其原因
    
    for text in tqdm(texts, desc="[INFO] 🌳 筛选文本"):
        is_valid, reason = filter_len(text, min_length)
        if is_valid:
            filtered_texts.append(text)
        else:
            filtered_out.append((text, reason))
    
    print(f"[INFO] 原始文本数: {len(texts)}, 有效文本数: {len(filtered_texts)}")
    print(f"[INFO] 被过滤文本数: {len(texts) - len(filtered_texts)}")
    print(f"[INFO] 内存使用: {process.memory_info().rss/1024/1024:.2f} MB")

    # 将所有被过滤的文本保存到JSONL文件
    if filtered_out:
        filtered_file = deduped_file + ".filtered.jsonl"
        with open(filtered_file, 'w', encoding='utf-8') as f:
            for text, reason in filtered_out:
                entry = {
                    "reason": reason,
                    "text": text
                }
                f.write(json.dumps(entry, ensure_ascii=False) + '\n')
        print(f"[INFO] 完整被过滤文本已保存至: {filtered_file}")
    
    # 执行去重
    deduper = Deduper(
        filtered_texts, 
        sim_threshold=sim_threshold, 
        ngram_n=ngram_n, 
        band_size=bands_size,
        signature_len=signature_len
    )
    result = deduper.execute()
    
    duplicated_count = len(filtered_texts) - len(result)
    print(f"[INFO] 重复数量：{duplicated_count}，重复率：{duplicated_count / len(filtered_texts):.2%}")
    print(f"[INFO] 内存使用: {process.memory_info().rss/1024/1024:.2f} MB")
    
    # 保存相似文档对信息
    duplicate_pairs_file = deduped_file + ".duplicate_pairs.jsonl"
    with open(duplicate_pairs_file, 'w', encoding='utf-8') as f:
        for pair in deduper.duplicate_pairs:
            f.write(json.dumps(pair, ensure_ascii=False) + '\n')
    print(f"[INFO] 相似文档对信息已保存至: {duplicate_pairs_file}")
    print(f"[INFO] 检测到的相似文档对数量: {len(deduper.duplicate_pairs)}")
    
    # 构建去重后的结果
    print("[INFO] 构建输出...")
    text_to_keep = {doc.content for doc in result}
    deduped_entries = [
        entry for entry in raw_entries 
        if entry.get('text', '') in text_to_keep
    ]
    
    # 写入输出文件（流式写入）
    print("[INFO] 写入输出文件...")
    with open(deduped_file, 'w', encoding='utf-8') as f:
        for entry in tqdm(deduped_entries, desc="[INFO] 💾 写入结果"):
            f.write(json.dumps(entry, ensure_ascii=False) + '\n')
    
    total_time = time.time() - started
    print(f"[INFO] 已完成清洗，共耗时 {total_time:.2f} 秒")
    print(f"[INFO] 最终内存使用: {process.memory_info().rss/1024/1024:.2f} MB")