import os
import re
import numpy as np
import jieba
from zhon.hanzi import punctuation as zh_punctuation
from string import punctuation as en_punctuation
from simhash import Simhash
from datasketch import MinHash
from docx import Document
from openpyxl import load_workbook
from transformers import BertTokenizer, BertModel
import torch
import faiss
from fastapi import FastAPI
import uvicorn
import argparse
from typing import List, Dict, Any, Tuple
import synonyms
from collections import defaultdict

# ---------------------------
# 全局配置参数
# ---------------------------
SIMHASH_THRESHOLD = 5          # SimHash 汉明距离阈值
MINHASH_THRESHOLD = 0.5        # MinHash Jaccard 相似度阈值
MAX_RESULTS = 5                # 最大返回结果数
DOCUMENTS_DIR = os.getcwd()    # 默认扫描目录
BERT_EMBEDDING_DIM = 768       # BERT 嵌入维度
CONTEXT_WINDOW = 50            # 摘要上下文窗口大小

# ---------------------------
# 工具函数
# ---------------------------
def clean_text(text: str) -> str:
    """清洗文本：移除标点符号和多余空格"""
    all_punc = zh_punctuation + en_punctuation
    text = re.sub(f"[{re.escape(all_punc)}]", " ", text)
    return re.sub(r"\s+", " ", text).strip()

def get_snippet(text: str, query: str = None) -> str:
    """生成包含查询词的上下文摘要"""
    if not query:
        return text[:100] + ("..." if len(text) > 100 else "")
    
    # 查找查询词出现位置
    start_idx = text.lower().find(query.lower())
    if start_idx == -1:
        return text[:100] + ("..." if len(text) > 100 else "")
    
    # 扩展上下文窗口
    end_idx = start_idx + len(query)
    start = max(0, start_idx - CONTEXT_WINDOW)
    end = min(len(text), end_idx + CONTEXT_WINDOW)
    
    snippet = text[start:end]
    if start > 0:
        snippet = "..." + snippet
    if end < len(text):
        snippet += "..."
    return snippet.replace("\n", " ")

# ---------------------------
# 核心搜索类
# ---------------------------
class AdvancedChineseSearch:
    def __init__(self, config: Dict[str, Any]):
        """
        初始化搜索系统
        关键改进点：
        1. 完整处理XLSX所有列数据
        2. 预计算文档嵌入加速查询
        3. 构建Faiss索引提升搜索效率
        """
        self.config = config
        self.docs: List[Dict[str, Any]] = []
        self.embedding_cache = None
        self.faiss_index = None
        
        # 初始化NLP模型
        self.tokenizer = BertTokenizer.from_pretrained(config["model_name"])
        self.model = BertModel.from_pretrained(config["model_name"])
        self.model.eval()
        
        # 加载文档并建立索引
        self._load_documents(config["folder_path"])
        self._precompute_embeddings()
        self._build_faiss_index()

    def _load_documents(self, folder: str):
        """加载并预处理文档数据"""
        print(f"开始加载文档：{folder}")
        for root, _, files in os.walk(folder):
            for file in files:
                file_path = os.path.join(root, file)
                
                # 处理DOCX文档
                if file.lower().endswith('.docx'):
                    try:
                        doc = Document(file_path)
                        content = []
                        for para in doc.paragraphs:
                            if para.text.strip():
                                content.append(para.text.strip())
                        for table in doc.tables:
                            for row in table.rows:
                                row_content = [cell.text.strip() for cell in row.cells]
                                content.append(" | ".join(row_content))
                        full_text = "\n".join(content)
                        if full_text:
                            self._add_document(file, "docx", full_text)
                    except Exception as e:
                        print(f"加载DOCX失败：{file_path} - {str(e)}")
                
                # 处理XLSX文档（关键修改：合并所有列）
                elif file.lower().endswith('.xlsx'):
                    try:
                        wb = load_workbook(file_path, read_only=True)
                        for sheet in wb:
                            for row_idx, row in enumerate(sheet.iter_rows(values_only=True), 1):
                                # 合并所有有效单元格内容
                                row_content = [str(cell).strip() for cell in row if cell]
                                if not any(row_content):
                                    continue
                                full_text = " | ".join(row_content)
                                meta = {
                                    "file": file,
                                    "sheet": sheet.title,
                                    "row": row_idx,
                                    "type": "xlsx"
                                }
                                self._add_document(file, "xlsx", full_text, meta)
                        wb.close()
                    except Exception as e:
                        print(f"加载XLSX失败：{file_path} - {str(e)}")
        print(f"文档加载完成，共加载{len(self.docs)}条记录")

    def _add_document(self, filename: str, doc_type: str, text: str, meta: Dict = None):
        """统一添加文档到内存存储"""
        doc_id = len(self.docs)
        clean_content = clean_text(text)
        tokens = jieba.lcut(clean_content)
        
        # 构建文档记录
        doc_record = {
            "id": doc_id,
            "file": filename,
            "type": doc_type,
            "content": text,
            "clean_content": clean_content,
            "tokens": tokens,
            "simhash": Simhash(tokens),
            "minhash": self._compute_minhash(tokens),
            "meta": meta or {}
        }
        self.docs.append(doc_record)

    def _precompute_embeddings(self):
        """预计算所有文档的BERT嵌入（关键性能优化）"""
        print("开始预计算文档嵌入...")
        embeddings = []
        for doc in self.docs:
            inputs = self.tokenizer(
                doc["clean_content"],
                max_length=512,
                truncation=True,
                return_tensors="pt",
                padding="max_length"
            )
            with torch.no_grad():
                outputs = self.model(**inputs)
            embedding = outputs.last_hidden_state[:,0,:].cpu().numpy().astype('float32').flatten()
            embeddings.append(embedding)
        self.embedding_cache = np.vstack(embeddings)
        print(f"嵌入预计算完成，维度：{self.embedding_cache.shape}")

    def _build_faiss_index(self):
        """构建Faiss索引加速相似度搜索"""
        print("构建Faiss索引...")
        self.faiss_index = faiss.IndexFlatIP(BERT_EMBEDDING_DIM)
        faiss.normalize_L2(self.embedding_cache)
        self.faiss_index.add(self.embedding_cache)
        print(f"索引构建完成，包含{self.faiss_index.ntotal}条记录")

    def _compute_minhash(self, tokens: List[str], num_perm: int = 128) -> MinHash:
        """计算MinHash签名"""
        m = MinHash(num_perm=num_perm)
        for token in set(tokens):
            m.update(token.encode('utf-8'))
        return m

    def _semantic_search(self, query: str) -> List[Tuple[int, float]]:
        """语义相似度搜索"""
        # 计算查询嵌入
        inputs = self.tokenizer(
            query,
            max_length=512,
            truncation=True,
            return_tensors="pt",
            padding="max_length"
        )
        with torch.no_grad():
            outputs = self.model(**inputs)
        query_embedding = outputs.last_hidden_state[:,0,:].cpu().numpy().astype('float32')
        
        # Faiss搜索
        faiss.normalize_L2(query_embedding)
        scores, indices = self.faiss_index.search(query_embedding, MAX_RESULTS)
        return list(zip(indices[0], scores[0]))

    def _exact_match_search(self, query: str) -> List[int]:
        """精确匹配搜索"""
        clean_query = clean_text(query)
        return [
            doc["id"] for doc in self.docs 
            if clean_query.lower() in doc["clean_content"].lower()
        ]

    def _similarity_match_search(self, query: str) -> List[Tuple[int, float]]:
        """相似度匹配搜索（SimHash + MinHash）"""
        query_tokens = jieba.lcut(clean_text(query))
        query_simhash = Simhash(query_tokens)
        results = []
        
        for doc in self.docs:
            # SimHash匹配
            sim_dist = query_simhash.distance(doc["simhash"])
            if sim_dist <= SIMHASH_THRESHOLD:
                sim_score = 1 - sim_dist/SIMHASH_THRESHOLD
                results.append((doc["id"], sim_score))
            
            # MinHash匹配
            query_minhash = self._compute_minhash(query_tokens)
            jaccard = query_minhash.jaccard(doc["minhash"])
            if jaccard >= MINHASH_THRESHOLD:
                results.append((doc["id"], float(jaccard)))
        
        return results

    def search(self, query: str) -> List[Dict]:
        """综合搜索入口"""
        query = query.strip()
        if not query:
            return []
        
        # 多策略并行搜索
        results = defaultdict(float)
        
        # 1. 语义搜索
        for doc_id, score in self._semantic_search(query):
            if doc_id >= 0:
                results[doc_id] = max(results[doc_id], score)
        
        # 2. 精确匹配
        for doc_id in self._exact_match_search(query):
            results[doc_id] = max(results[doc_id], 1.0)
        
        # 3. 相似度匹配
        for doc_id, score in self._similarity_match_search(query):
            results[doc_id] = max(results[doc_id], score)
        
        # 结果整合与排序
        sorted_results = []
        for doc_id, score in sorted(results.items(), key=lambda x: x[1], reverse=True):
            if score < 0.4:  # 过滤低分结果
                continue
            doc = self.docs[doc_id]
            sorted_results.append({
                "file": doc["file"],
                "snippet": get_snippet(doc["content"], query),
                "score": round(score, 3),
                "type": doc["type"],
                "meta": doc["meta"]
            })
        
        return sorted_results[:MAX_RESULTS]

# ---------------------------
# API 服务
# ---------------------------
app = FastAPI(title="智能文档搜索系统")
searcher = None

@app.on_event("startup")
async def startup_event():
    global searcher
    config = {
        "model_name": "bert-base-chinese",
        "folder_path": DOCUMENTS_DIR
    }
    searcher = AdvancedChineseSearch(config)

@app.get("/search")
async def api_search(query: str):
    return searcher.search(query)

# ---------------------------
# CLI 接口
# ---------------------------
def run_cli():
    print(">>> 文档搜索系统CLI模式（输入 'exit' 退出）")
    config = {
        "model_name": "bert-base-chinese",
        "folder_path": DOCUMENTS_DIR
    }
    searcher = AdvancedChineseSearch(config)
    
    while True:
        try:
            query = input("请输入搜索内容：").strip()
            if query.lower() in ('exit', 'quit'):
                break
            if not query:
                continue
            
            results = searcher.search(query)
            print(f"\n找到 {len(results)} 条相关结果：")
            for idx, res in enumerate(results, 1):
                print(f"{idx}. [{res['type'].upper()}] {res['file']}")
                print(f"   匹配度：{res['score']:.2f}")
                print(f"   内容：{res['snippet']}")
                if res['meta'].get('sheet'):
                    print(f"   位置：{res['meta']['sheet']} 工作表第{res['meta']['row']}行")
                print()
        except KeyboardInterrupt:
            break

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="文档搜索系统")
    parser.add_argument("--api", action="store_true", help="启动API服务")
    parser.add_argument("--dir", type=str, default=DOCUMENTS_DIR, help="文档目录路径")
    args = parser.parse_args()
    
    if args.api:
        uvicorn.run(app, host="0.0.0.0", port=8000)
    else:
        run_cli()