import os
import re
import numpy as np
import jieba
from zhon.hanzi import punctuation as zh_punctuation
from string import punctuation as en_punctuation
from simhash import Simhash
from datasketch import MinHash
from docx import Document
from openpyxl import load_workbook
from transformers import BertTokenizer, BertModel
import torch
import faiss
from typing import List, Dict, Any
import synonyms
import argparse    

# ---------------------------
# 全局配置参数
# ---------------------------
SIMHASH_THRESHOLD = 5          # 明距离阈值
MINHASH_THRESHOLD = 0.5        # Jaccard 相似度阈值
MAX_RESULTS = 5                # 语义匹配结果返回的最大数量
DOCUMENTS_DIR = os.getcwd()    # 扫描工作目录

# ---------------------------
# 全局数据存储
# ---------------------------
docs: List[Dict[str, Any]] = []           # 存储所有文档条目
embedding_cache: Dict[int, np.ndarray] = {}  # 缓存 BERT 嵌入向量

# 全局 BERT 模型及分词器
tokenizer = None
model = None

# ---------------------------
# 文本清洗与分词
# ---------------------------
def clean_text(text: str) -> str:
    """
    清洗文本：去除中英文标点及多余空白
    """
    all_punc = zh_punctuation + en_punctuation
    text = re.sub(f"[{re.escape(all_punc)}]", " ", text)
    text = re.sub(r"\s+", " ", text)
    return text.strip()

def tokenize_text(text: str) -> List[str]:
    """
    使用 jieba 对文本进行分词
    """
    tokens = jieba.lcut(clean_text(text))
    return [tok for tok in tokens if tok.strip()]

def get_snippet(text: str, query: str = None, length: int = 100) -> str:
    """
    返回文本摘要
    """
    snippet = ""
    if query:
        low_text = text.lower()
        low_query = query.lower()
        idx = low_text.find(low_query)
        if idx != -1:
            start = max(0, idx - length // 2)
            end = min(len(text), idx + length // 2)
            snippet = text[start:end]
    if not snippet:
        snippet = text[:length]
    snippet = snippet.replace("\n", " ")
    if len(text) > len(snippet):
        snippet = snippet.strip() + "..."
    return snippet

# ---------------------------
# 同义词扩展函数
# ---------------------------
def query_expansion(query: str, threshold: float = 0.7) -> List[str]:
    """
    对查询词进行扩展：
    """
    expansion = [query]
    tokens = tokenize_text(query)
    tokens = [t for t in tokens if t.strip()]  # 过滤掉空白 token
    print(f"内部使用 jieba 分词结果: {tokens}")
    try:
        for token in tokens:
            if len(token) == 1:
                print(f"Token '{token}' 长度为1，跳过同义词扩展")
                continue
            sim_words, sim_scores = synonyms.nearby(token)
            for word, score in zip(sim_words, sim_scores):
         
                if score >= threshold and word not in expansion:
                    print(f"{token} 的同义词: {word}:{score}")
                    expansion.append(word)
    except Exception as e:
        print(f"调用同义词库失败：{e}")
    print(f"最终扩展后的查询词列表: {expansion}")
    return expansion

# ---------------------------
# 文档加载、分词、匹配、查询
# ---------------------------
class AdvancedChineseSearch:
    def __init__(self, config: Dict[str, Any]):
        """
        初始化搜索系统，加载配置、BERT 模型及分词器，并加载文档数据。
        """
        self.config = config
        global tokenizer, model
        tokenizer = BertTokenizer.from_pretrained(self.config.get("model_name", "bert-base-chinese"))
        model = BertModel.from_pretrained(self.config.get("model_name", "bert-base-chinese"))
        model.eval()
        self.tokenizer = tokenizer
        self.model = model
        self.docs: List[Dict[str, Any]] = []
        self.embedding_cache: Dict[int, np.ndarray] = {}
        self.load_documents(self.config.get("folder_path", DOCUMENTS_DIR))
    
    def tokenize(self, text: str) -> List[str]:
        """
        对文本进行分词。如果配置 use_jieba 为 True，则使用 jieba 分词；否则使用 BERT 自带的分词器。
        """
        if self.config.get("use_jieba", True):
            return jieba.lcut(clean_text(text))
        else:
            return self.tokenizer.tokenize(text)
    
    def compute_simhash(self, text: str) -> Simhash:
        tokens = self.tokenize(text)
        return Simhash(tokens)
    
    def compute_minhash(self, text: str, num_perm: int = 128) -> MinHash:
        m = MinHash(num_perm=num_perm)
        tokens = set(self.tokenize(text))
        for token in tokens:
            m.update(token.encode('utf8'))
        return m
    
    def query_expansion(self, query: str, threshold: float = 0.7) -> List[str]:
        return query_expansion(query, threshold)
    
    def get_matched_tokens(self, query: str, candidate: str) -> List[Dict[str, float]]:
        import difflib
        q_tokens = self.tokenize(query)
        cand_tokens = self.tokenize(candidate)
        matches = []
        for token in q_tokens:
            if token in cand_tokens:
                matches.append({"token": token, "similarity": 1.0})
            else:
                ratios = [difflib.SequenceMatcher(None, token, ct).ratio() for ct in cand_tokens]
                max_ratio = max(ratios) if ratios else 0.0
                if max_ratio >= 0.8:
                    matches.append({"token": token, "similarity": round(max_ratio, 2)})
        return matches
    
    def load_documents(self, folder: str):
        self.docs = []
        for root, dirs, files in os.walk(folder):
            for file in files:
                file_path = os.path.join(root, file)
                if file.lower().endswith('.docx'):
                    try:
                        doc = Document(file_path)
                    except Exception as e:
                        print(f"读取 DOCX 文件 {file_path} 失败：{e}")
                        continue
                    parts = []
                    for para in doc.paragraphs:
                        if para.text.strip():
                            parts.append(para.text.strip())
                    for table in doc.tables:
                        for row in table.rows:
                            row_texts = [cell.text.strip() for cell in row.cells if cell.text.strip()]
                            if row_texts:
                                parts.append(" ".join(row_texts))
                    content = "\n".join(parts)
                    if not content.strip():
                        continue
                    self.docs.append({
                        "id": len(self.docs),
                        "type": "docx",
                        "file": file,
                        "text_orig": content,
                        "text_clean": " ".join(self.tokenize(content)),
                        "simhash": self.compute_simhash(content),
                        "minhash": self.compute_minhash(content)
                    })
                elif file.lower().endswith('.xlsx'):
                    try:
                        wb = load_workbook(file_path, read_only=True)
                    except Exception as e:
                        print(f"读取 XLSX 文件 {file_path} 失败：{e}")
                        continue
                    for sheet_name in wb.sheetnames:
                        sheet = wb[sheet_name]
                        row_index = 0
                        for row in sheet.iter_rows(values_only=True):
                            row_index += 1
                            if row_index == 1:
                                continue  # 跳过表头
                            if not row:
                                continue
                            A_val = row[0] if len(row) > 0 else ""
                            B_val = row[1] if len(row) > 1 else ""
                            C_val = row[2] if len(row) > 2 else ""
                            if not B_val or str(B_val).strip() == "":
                                continue
                            content = str(B_val).strip()
                            self.docs.append({
                                "id": len(self.docs),
                                "type": "xlsx",
                                "file": file,
                                "sheet": sheet_name,
                                "row": row_index,
                                "A": str(A_val).strip(),
                                "B": str(B_val).strip(),
                                "C": str(C_val).strip(),
                                "text_orig": content,
                                "text_clean": " ".join(self.tokenize(content)),
                                "simhash": self.compute_simhash(content),
                                "minhash": self.compute_minhash(content)
                            })
                    wb.close()
        print(f"文档加载完成，共加载 {len(self.docs)} 条记录.")
    
    def compute_embedding(self, text: str) -> np.ndarray:
        global tokenizer, model
        inputs = self.tokenizer(text, truncation=True, padding='max_length', max_length=512, return_tensors='pt')
        with torch.no_grad():
            outputs = self.model(**inputs)
        if outputs.pooler_output is not None:
            vec = outputs.pooler_output[0].cpu().numpy()
        else:
            vec = outputs.last_hidden_state[0].mean(dim=0).cpu().numpy()
        return vec.astype('float32')
    
    def search(self, query: str) -> Dict[str, List[str]]:
        query = query.strip()
        if not query:
            return {"direct_matches": [], "similar_matches": []}

        expanded_queries = self.query_expansion(query, threshold=0.7)
        all_direct_matches = []
        all_semantic_matches = []
        
        for expanded_query in expanded_queries:
            direct_matches = []
            candidate_docs = []
            q_simhash = self.compute_simhash(expanded_query)
            q_minhash = self.compute_minhash(expanded_query)
            for doc in self.docs:
                if expanded_query.lower() in doc["text_orig"].lower():
                    direct_matches.append(doc)
                else:
                    sim_dist = q_simhash.distance(doc["simhash"])
                    jaccard_sim = q_minhash.jaccard(doc["minhash"])
                    if sim_dist <= SIMHASH_THRESHOLD or jaccard_sim >= MINHASH_THRESHOLD:
                        candidate_docs.append(doc)
            for doc in direct_matches:
                if doc["type"] == "xlsx":
                    out_str = f"文件: {doc['file']} - 行: {doc.get('row', '')} | A: {doc.get('A', '')}, B: {doc.get('B', '')}, C: {doc.get('C', '')}"
                else:
                    snippet = get_snippet(doc["text_orig"], expanded_query)
                    out_str = f"文件: {doc['file']} | 摘要: {snippet}"
                tokens_info = self.get_matched_tokens(expanded_query, doc["text_clean"])
                tokens_str = ", ".join([f"{item['token']}:{item['similarity']}" for item in tokens_info])
                if tokens_str:
                    out_str += f" | 匹配到: {tokens_str}"
                all_direct_matches.append(out_str)
            
            if candidate_docs:
                query_vec = self.compute_embedding(expanded_query)
                candidate_vecs = []
                for doc in candidate_docs:
                    if doc["id"] in self.embedding_cache:
                        vec = self.embedding_cache[doc["id"]]
                    else:
                        vec = self.compute_embedding(doc["text_orig"])
                        self.embedding_cache[doc["id"]] = vec
                    candidate_vecs.append(vec)
                candidate_vecs = np.array(candidate_vecs, dtype='float32')
                faiss.normalize_L2(candidate_vecs)
                query_vec = query_vec.reshape(1, -1)
                faiss.normalize_L2(query_vec)
                dim = candidate_vecs.shape[1]
                ann_index = faiss.IndexFlatIP(dim)
                ann_index.add(candidate_vecs)
                k = min(MAX_RESULTS, len(candidate_docs))
                if k > 0:
                    D, I = ann_index.search(query_vec, k)
                    for idx, dist in zip(I[0], D[0]):
                        if idx < 0 or idx >= len(candidate_docs):
                            continue
                        doc = candidate_docs[idx]
                        tokens_info = self.get_matched_tokens(expanded_query, doc["text_clean"])
                        tokens_str = ", ".join([f"{item['token']}:{item['similarity']}" for item in tokens_info])
                        if doc["type"] == "xlsx":
                            out_str = (f"文件: {doc['file']} - 行: {doc.get('row', '')} | A: {doc.get('A', '')}, "
                                       f"B: {doc.get('B', '')}, C: {doc.get('C', '')} | 相似度: {round(dist, 3)} | Token匹配: {tokens_str}")
                        else:
                            snippet = get_snippet(doc["text_orig"], expanded_query)
                            out_str = f"文件: {doc['file']} | 摘要: {snippet} | 相似度: {round(dist, 3)} | Token匹配: {tokens_str}"
                        all_semantic_matches.append(out_str)
        print(f"查询 \"{query}\" -> 直接匹配: {len(all_direct_matches)} 条, 语义匹配: {len(all_semantic_matches)} 条")
        return {"direct_matches": all_direct_matches, "similar_matches": all_semantic_matches}


# ---------------------------
# 测试模式
# ---------------------------
def run_cli():
    print("进入CLI测试模式。输入查询关键词，输入 exit 或 quit 退出。")
    while True:
        try:
            user_input = input("请输入查询词: ").strip()
        except (EOFError, KeyboardInterrupt):
            print("\n退出CLI模式。")
            break
        if user_input.lower() in ("exit", "quit", "q"):
            print("退出CLI模式。")
            break
        if not user_input:
            continue
        results = searcher.search(user_input)
        if results["direct_matches"]:
            print(f"直接匹配结果（{len(results['direct_matches'])}条）：")
            for res in results["direct_matches"]:
                print(res)
        else:
            print("直接匹配结果：无")
        if results["similar_matches"]:
            print(f"语义匹配结果（{len(results['similar_matches'])}条）：")
            for res in results["similar_matches"]:
                print(res)
        else:
            print("语义匹配结果：无")
        print("-" * 50)

# ---------------------------
# 主程序
# ---------------------------
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="中文条款匹配系统")
    parser.add_argument("--dir", type=str, default=DOCUMENTS_DIR, help="文档目录路径")
    parser.add_argument("--use_jieba", action="store_true", help="启用jieba分词（默认启用）")
    parser.add_argument("--no_jieba", action="store_true", help="禁用jieba分词，使用BERT自带分词")
    args = parser.parse_args()
    
    use_jieba = True
    if args.no_jieba:
        use_jieba = False
    
    config = {
        "use_jieba": use_jieba,
        "folder_path": args.dir,
        "model_name": "bert-base-chinese",
        "simhash_threshold": SIMHASH_THRESHOLD,
        "minhash_threshold": MINHASH_THRESHOLD,
        "max_results": MAX_RESULTS
    }

    # 初始化搜索系统
    searcher = AdvancedChineseSearch(config)
    run_cli()