from qdrant_client import QdrantClient, models
from qdrant_client.models import VectorParams, Distance
from sentence_transformers import SentenceTransformer
from qdrant_client.http.models import PointIdsList
import torch
import datetime

class QdrantHandler:
    def __init__(self, url, collection_name, model_name):
        self.client = QdrantClient(url)
        self.collection_name = collection_name
        self.model = SentenceTransformer(
            model_name, 
            device='cuda' if torch.cuda.is_available() else 'cpu',
            cache_folder="/mnt/hf_models/huggingface/hub"
        )

        # self.client.delete_collection("hot_feeds")  # 删除旧集合（可选）

        if not self.client.collection_exists(self.collection_name):
            print(f"集合 {self.collection_name} 不存在，正在创建...")
            self.client.create_collection(
                collection_name=self.collection_name,
                vectors_config=VectorParams(size=1024, distance=Distance.COSINE),
            )
        else:
            print(f"集合 {self.collection_name} 已经存在")
    
    def insert_documents(self, list_of_documents):
        current_count = self.client.count(self.collection_name, exact=True).count
        print(f"已有 {current_count} 条数据")
        print(f"list_of_documents 的类型: {type(list_of_documents)}")
        for idx, doc in enumerate(list_of_documents, 1):
            try:
                print(f"处理第 {idx}/{len(list_of_documents)} 条数据")
                payloads = doc.get("payloads", {})
                feed_url = payloads.get("feed_url", "")
                # print(f"当前 feed_url: {feed_url}")
                content = doc.get("content", {})
                # print(f"当前 content: {content}")
                vector = self.model.encode([content])[0].tolist()

                # 检查 feed_url 是否在 client 中存在
                if feed_url:
                    # 如果存在，删除原来的数据
                    print(f"查询 feed_url 存在：{feed_url}")
                    points = self.client.scroll(
                        collection_name=self.collection_name,
                        scroll_filter=models.Filter(
                            must=[
                                models.FieldCondition(
                                    key="feed_url",
                                    match=models.MatchValue(value=feed_url)
                                )
                            ]
                        ),
                        limit=10
                    )
                    # print(f"查询结果: {points}")

                    if points[0]:
                        # 删除旧数据
                        # doc_id = points[0]["id"]
                        # self.client.delete(
                        #     collection_name=self.collection_name,
                        #     points=[doc_id]
                        # )
                        # print(f"已删除 feed_url 为 {feed_url} 的旧数据")
                        doc_id = points[0][0].id
                        print(f"将要删除的文档 ID: {doc_id}")
                        self.client.delete(
                            collection_name=self.collection_name,
                            points_selector=PointIdsList(points=[doc_id])
                            # points=[doc_id]
                        )
                        # print(f"删除响应: {res}")

                self.client.upsert(
                    collection_name=self.collection_name,
                    points=[
                        models.PointStruct(
                            id=current_count + idx,
                            vector=vector,
                            payload=payloads
                        )
                    ]
                )
                print(f"成功插入第 {idx} 条数据")
            except Exception as e:
                print(f"插入出错: {e}")


    def search(self, keyword, top_k=5):
        query_vector = self.model.encode([keyword])[0].tolist()
        results = self.client.search(
            collection_name=self.collection_name,
            query_vector=query_vector,
            limit=top_k,
            with_payload=True,
        )
        return [
            {
                "feed_content": res.payload.get("feed_content", ""),
                "feed_url": res.payload.get("feed_url", "")
            }
            for res in results
        ]