from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import OpenAIEmbeddings
from langchain.schema import Document
from typing import List, Dict
import logging

logger = logging.getLogger(__name__)

class PaperVectorStore:
    """论文向量存储"""
    
    def __init__(self, api_key: str, persist_dir: str = "chroma_db"):
        """初始化向量存储
        
        Args:
            api_key: OpenAI API密钥
            persist_dir: 持久化存储目录
        """
        self.embedding = OpenAIEmbeddings(openai_api_key=api_key)
        self.db = Chroma(
            embedding_function=self.embedding,
            persist_directory=persist_dir
        )
        
    def store_papers(self, papers: List[Dict]) -> bool:
        """存储论文数据
        
        Args:
            papers: 论文数据列表
            
        Returns:
            是否存储成功
        """
        try:
            docs = [
                Document(
                    page_content=paper['summary'],
                    metadata={
                        'title': paper['title'],
                        'authors': paper['authors'],
                        'published': str(paper['published']),
                        'categories': paper['categories']
                    }
                ) for paper in papers
            ]
            
            self.db.add_documents(docs)
            self.db.persist()
            logger.info(f"成功存储 {len(papers)} 篇论文")
            return True
            
        except Exception as e:
            logger.error(f"论文存储失败: {str(e)}")
            return False
            
    def search_similar(self, query: str, k: int = 5) -> List[Dict]:
        """检索相似论文
        
        Args:
            query: 检索查询
            k: 返回结果数量
            
        Returns:
            相似论文列表
        """
        try:
            results = self.db.similarity_search(query, k=k)
            return [
                {
                    'title': doc.metadata['title'],
                    'authors': doc.metadata['authors'],
                    'score': doc.metadata.get('score', 0),
                    'content': doc.page_content
                } for doc in results
            ]
        except Exception as e:
            logger.error(f"检索失败: {str(e)}")
            return []