
import os
import uuid
from typing import Dict,List,Optional
import dashscope #导入阿里通义千问 SDK
from dashscope import TextEmbedding
from chromadb import Client,Settings
from chromadb.api.models.Collection import Collection

# 配置信息  1 chromadb数据库路径  2 通义千问 api key 
CHROMA_DB_PATH= './ragchroma_db'
DASHSCOPE_API_KEY='sk-4a106bdba4ce4b01b836651877585128'
EMBEDDING_MODEL = 'text-embedding-v4'

class VectorStore:
    def __init__(self,api_key: str=DASHSCOPE_API_KEY):
        #设置API Key
        dashscope.api_key=api_key
        #初始化向量数据库客户端
        self.client = Client(Settings(
            chroma_db_impl='duckdb+parquet',
            persist_directory = CHROMA_DB_PATH,
            anonymized_telemetry=False
        ))
        self.collection: Optional[Collection] = None
        print("向量数据库初始化完成")
        
    # 调用通义千问的编码模型对文本块生成向量
    def _call_embedding_api(self,texts: List[str])->List[List[float]]:
        embddings=[]
        for text in texts:
            # 去除过长文本，当前的编码模型支持最大8000token,
            truncated_text=text.strip()[:5000]
            if not truncated_text:
                truncated_text = '空内容'
            try:
                response = TextEmbedding.call(
                    model = EMBEDDING_MODEL,
                    input = truncated_text 
                )
                if response.status_code == 200:
                    embedding=response.output['embeddings'][0]['embedding']
                    embddings.append(embedding)
                else:
                    error_msg = response.message
                    print(f'嵌入失败{[response.status_code]}:{error_msg}')
                    dummy = [0.0] * 1024
                    embddings.append(dummy)
            except Exception as e:
                print(f'调用嵌入模型失败，{e},使用零向量替代')
                dummy=[0.0]*1024
                embddings.append(dummy)
        
        return embddings
    
    # 创建或者获取向量集合(数据表)
    def create_or_get_collection(self,name: str = 'rag_docs'):
        self.collection = self.client.get_or_create_collection(name= name)
        print(f'使用向量数据集 {name}')
        return self.collection
    
    # 把向量化的分块内容存储到chromadb数据库中
    def add_chunks_to_vector_db(self,chunks: List[Dict], collection_name: str = 'rag_docs'):
        if self.collection is None or self.collection.name != collection_name:
            self.create_or_get_collection(collection_name)
        texts=[chunk['content'] for chunk in chunks]
        metadatas = [chunk['metadata'] for chunk in chunks]
        ids=[str(uuid.uuid4()) for _ in range(len(texts))]
        print(f'正在使用通义千问进行向量化，{len(texts)} 个文本块……')

        #批量处理（每秒限制调用不超过5次）
        batch_size = 5
        all_embddings = []
        for i in range(0,len(texts),batch_size):
            batch_texts=texts[i:i+batch_size]
            batch_embeddings= self._call_embedding_api(batch_texts)
            all_embddings.extend(batch_embeddings)
            print(f'向量化进度： {i + len(batch_texts)}/{len(texts)}')
        
        self.collection.add(
            embeddings=all_embddings,
            documents= texts,
            metadatas= metadatas,
            ids= ids
        )
        print(f'成功将 {len(texts)}个文本块向量化存储到向量数据库{collection_name}中')
    
    # 保持数据库到磁盘
    def persist(self):
        self.client.persist()
        print(f'向量数据已经保持到磁盘')


    # 测试相似性搜索功能
    def query_similar(self,query_text: str,n_result: int =3 )->Dict:
        if self.collection is None:
            raise RuntimeError("请先创建或者加载集合")
        
        query_text= query_text.strip()[:500] or "查询为空"
        query_embedding= self._call_embedding_api([query_text])

        results=self.collection.query(
            query_embeddings=[query_embedding],
            n_results=n_result
        )

        return {
            'query': query_text,
            'results': [
                {
                'content': doc,
                'metadata': meta,
                'distance': float(dist)
                }
            
            for doc,meta,dist in zip(results['documents'][0],results['metadata'][0],results['distances'][0])
            ]
        }

def test():
    from document_chunking import SmartDocumentChunker
    
    sample_text="""高血压（hypertension）是指以体循环动脉血压（收缩压和/或舒张压）增高为特征的慢性疾病，
    收缩压≥140毫米汞柱，舒张压≥90毫米汞柱，可伴有心、脑、肾等器官的功能或器质性损害。
    高血压是最常见的慢性病，也是心脑血管病最主要的危险因素。"""
    
    chunker=SmartDocumentChunker(chunk_size=56,chunk_overlap=10)
    
    chunks=chunker.chunk_document(sample_text)
    print(f'分块数量{len(chunks)}')

    vs=VectorStore()
    vs.add_chunks_to_vector_db(chunks=chunks,collection_name='rag_docs')
    vs.persist()

    #查询测试
    test_questions=[
        '高血压怎么诊断？',
        '高血压有什么症状',
        '高血压怎么治疗'
    ]

    for q in test_questions:
        result=vs.query_similar(q,n_result=1)
        print(f"\n 问题: {result['query']}")
        for i,res in enumerate(result['results']):
            print(f"答案{i+1} (相似度距离： {res['distance']:.4f}):")
            print(f" {res['content']} ")

if __name__ == '__main__':
    test()