import json
import os
from typing import List, Tuple
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import PyPDF2


class SemanticDocumentSplitter:
    """基于语义相似度的文档分片器"""
    
    def __init__(self, chunk_size: int = 200, model_name: str = 'all-MiniLM-L6-v2'):
        self.chunk_size = chunk_size
        self.model = SentenceTransformer(model_name)
        self.chunks = []
        self.chunk_sources = []  # 记录每个chunk来源的文件
        self.chunk_embeddings = None
    
    def split_text(self, text: str) -> List[str]:
        """将文本分割成小块，保持语义完整性"""
        lines = text.strip().split('\n')
        chunks = []
        current_chunk = ""
        
        for line in lines:
            line = line.strip()
            if not line:
                if current_chunk:
                    chunks.append(current_chunk.strip())
                    current_chunk = ""
                continue
            
            if (current_chunk and len(current_chunk + '\n' + line) > self.chunk_size):
                if current_chunk:
                    chunks.append(current_chunk.strip())
                current_chunk = line
            else:
                if current_chunk:
                    current_chunk += '\n' + line
                else:
                    current_chunk = line
        
        if current_chunk:
            chunks.append(current_chunk.strip())
        
        chunks = [chunk for chunk in chunks if len(chunk.split()) >= 3]
        return chunks
    
    def extract_text_from_pdf(self, pdf_path: str) -> str:
        """从PDF文件中提取文本"""
        try:
            with open(pdf_path, 'rb') as file:
                pdf_reader = PyPDF2.PdfReader(file)
                text = ""
                for page in pdf_reader.pages:
                    text += page.extract_text() + "\n"
                return text
        except Exception as e:
            print(f"读取PDF文件 {pdf_path} 失败: {e}")
            return ""
    
    def index_directory(self, directory_path: str):
        """为目录中的所有PDF文件建立索引"""
        all_chunks = []
        all_sources = []
        
        if not os.path.exists(directory_path):
            print(f"目录不存在: {directory_path}")
            return
        
        pdf_files = [f for f in os.listdir(directory_path) if f.lower().endswith('.pdf')]
        
        if not pdf_files:
            print(f"目录中没有找到PDF文件: {directory_path}")
            return
        
        print(f"发现 {len(pdf_files)} 个PDF文件")
        
        for pdf_file in pdf_files:
            pdf_path = os.path.join(directory_path, pdf_file)
            print(f"处理文件: {pdf_file}")
            
            text = self.extract_text_from_pdf(pdf_path)
            if text.strip():
                file_chunks = self.split_text(text)
                all_chunks.extend(file_chunks)
                all_sources.extend([pdf_file] * len(file_chunks))
        
        if all_chunks:
            self.chunks = all_chunks
            self.chunk_sources = all_sources
            print(f"正在为 {len(self.chunks)} 个文档片段生成语义嵌入...")
            self.chunk_embeddings = self.model.encode(self.chunks, show_progress_bar=True)
            print(f"语义嵌入完成，向量维度: {self.chunk_embeddings.shape}")
    
    def index_document(self, text: str, source_name: str = "文档"):
        """为单个文档建立索引"""
        self.chunks = self.split_text(text)
        self.chunk_sources = [source_name] * len(self.chunks)
        if self.chunks:
            print(f"正在为 {len(self.chunks)} 个文档片段生成语义嵌入...")
            self.chunk_embeddings = self.model.encode(self.chunks, show_progress_bar=True)
            print(f"语义嵌入完成，向量维度: {self.chunk_embeddings.shape}")
    
    def search_similar_chunks(self, query: str, top_k: int = 3) -> List[Tuple[str, str, float]]:
        """搜索最相关的文档片段，返回(内容, 来源文件, 相似度)"""
        if not self.chunks or self.chunk_embeddings is None:
            return []
        
        query_embedding = self.model.encode([query])
        similarities = cosine_similarity(query_embedding, self.chunk_embeddings)[0]
        top_indices = np.argsort(similarities)[::-1][:top_k]
        
        results = []
        for idx in top_indices:
            if similarities[idx] > 0.1:
                content = self.chunks[idx]
                if idx >= 1:
                    content = self.chunks[idx - 1] + "\n" + content
                if idx < len(self.chunks) - 1:
                    content += "\n" + self.chunks[idx + 1]

                results.append((content, self.chunk_sources[idx], similarities[idx]))
        
        return results

if __name__ == '__main__':
    from utils.api_connector import ApiConnector

    llm = ApiConnector()
    
    # 初始化语义文档分片器
    doc_splitter = SemanticDocumentSplitter(model_name='all-MiniLM-L6-v2')
    
    # 检查是否有PDF目录，如果有则索引PDF文件，否则使用内置文档
    pdf_directory = "/home/ubuntu/sonic-buildimage/.source/ai-tester/docs"  # PDF文件目录
    
    if os.path.exists(pdf_directory):
        doc_splitter.index_directory(pdf_directory)
    else:
        # 如果没有PDF目录，使用内置文档
        NEW_CLI_DOC = """
        配置命令

        # config视图

        no mac address-table static mac vlan vlan-id interface port-id
        mac address-table static mac vlan vlan-id interface port-id

        # 用户视图

        show mac address-table static
        """
        doc_splitter.index_document(NEW_CLI_DOC, "内置文档")
    
    # 处理测试计划
    with open('testplan.json', 'r') as f:
        testplan = json.load(f)
        testplan = testplan[0]

    print("title: ", testplan["title"])

    relevant_chunks = doc_splitter.search_similar_chunks("添加vlan", top_k=10)
    if relevant_chunks:
        print("配置vlan相关文档片段:")
        for j, (chunk, source, score) in enumerate(relevant_chunks):
            print(f"  片段{j+1} [来源: {source}] (相似度: {score:.4f}): {chunk}")
            print("=================================================")
    else:
        print("没有找到相关的配置vlan文档片段")
