from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import DashScopeEmbeddings
from langchain.vectorstores import FAISS
from langchain.docstore.document import Document
from typing import List, Dict
import os
import docx

class LegalDocumentProcessor:
    def __init__(self, docs_dir: str = 'legal_docs'):
        self.docs_dir = docs_dir
        self.embeddings = DashScopeEmbeddings()
        self.vector_store = None
        self._ensure_docs_dir()

    def _ensure_docs_dir(self):
        """确保法律文档目录存在"""
        if not os.path.exists(self.docs_dir):
            os.makedirs(self.docs_dir)

    def load_documents(self) -> List[str]:
        """加载法律文档"""
        documents = []
        
        # 处理txt文件
        txt_loader = DirectoryLoader(
            self.docs_dir,
            glob="**/*.txt",
            loader_cls=TextLoader
        )
        documents.extend(txt_loader.load())
        
        # 处理doc/docx文件
        for root, _, files in os.walk(self.docs_dir):
            for file in files:
                if file.endswith(('.doc', '.docx')):
                    file_path = os.path.join(root, file)
                    try:
                        doc = docx.Document(file_path)
                        text = '\n'.join([paragraph.text for paragraph in doc.paragraphs])
                        documents.append(Document(
                            page_content=text,
                            metadata={'source': file_path}
                        ))
                    except Exception as e:
                        print(f"Error processing {file_path}: {str(e)}")
                        continue

        # 文档分段
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=200,
            length_function=len
        )
        texts = text_splitter.split_documents(documents)
        return texts

    def create_vector_store(self):
        """创建向量存储"""
        texts = self.load_documents()
        if texts:
            self.vector_store = FAISS.from_documents(texts, self.embeddings)

    def search_relevant_laws(self, query: str, k: int = 5) -> List[Dict]:
        """搜索相关法律条文"""
        if not self.vector_store:
            self.create_vector_store()

        if not self.vector_store:
            return []

        results = self.vector_store.similarity_search_with_score(query, k=k)
        return [{
            'content': doc.page_content,
            'source': doc.metadata.get('source', 'unknown'),
            'score': score
        } for doc, score in results]