from qdrant_client import QdrantClient, models
from qdrant_client.models import VectorParams, Distance
from sentence_transformers import SentenceTransformer
import torch
import datetime

class QdrantHandler:
    def __init__(self, url, collection_name, model_name):
        self.client = QdrantClient(url)
        self.collection_name = collection_name
        self.model = SentenceTransformer(
            model_name, 
            device='cuda' if torch.cuda.is_available() else 'cpu',
            cache_folder="/mnt/hf_models/huggingface/hub"
        )

        self.client.delete_collection("hot_feeds")  # 删除旧集合（可选）

        if not self.client.collection_exists(self.collection_name):
            self.client.create_collection(
                collection_name=self.collection_name,
                vectors_config=VectorParams(size=1024, distance=Distance.COSINE),
            )

    def insert_documents(self, documents):
        current_count = self.client.count(self.collection_name, exact=True).count
        print(f"发现 {current_count} 条数据")
        # print(f"documents 的内容: {documents}")
        for idx, doc in enumerate(documents, 1):
            # try:
                feed_url = doc.get("feed_url", "")
                print(f"当前 feed_url: {feed_url}")
                content = doc.get("extracted_content", "")
                print(f"处理第 {idx}/{len(documents)} 条数据")
                vector = self.model.encode([content])[0].tolist()

               # 检查 feed_url 是否在 client 中存在
                if feed_url:
                    # 如果存在，删除原来的数据
                    print(f"查询 feed_url 存在：{feed_url}")
                    points = self.client.scroll(
                        collection_name=self.collection_name,
                        scroll_filter={
                            "must": [
                                {
                                    "key": "feed_url",
                                    "match": {"value": feed_url}
                                }
                            ]
                        },
                        limit=1
                    )
                    print(f"查询结果: {points}")

                    if points[0]:
                        # 删除旧数据
                        doc_id = points[0]["id"]
                        self.client.delete(
                            collection_name=self.collection_name,
                            points=[doc_id]
                        )
                        print(f"已删除 feed_url 为 {feed_url} 的旧数据")

                # 构建payload
                payloads = {
                    "author_name": doc.get('author_name', ''),
                    "title": doc.get('title', ''),
                    "platform": doc.get('platform', ''),
                    "keywords": doc.get('keywords', []),
                    "feed_url": doc.get('feed_url', ''),
                    "profile_url": doc.get('profile_url', ''),
                    "image_url": doc.get('image_url', ''),
                    "video": doc.get('video', ''),
                    "image_base64": doc.get('image_base64', ''),
                    "like_count": doc.get('like_count', 0),
                    "profile_likes_stats": doc.get('profile_likes_stats', ''),
                    "createAt": doc.get('createAt', ''),
                    "extracted_content": doc.get('extracted_content', ''),
                    "status": doc.get('status', ''),
                    "processing_time": doc.get('processing_time', ''),
                }
                self.client.upsert(
                    collection_name=self.collection_name,
                    points=[
                        models.PointStruct(
                            id=current_count + idx,
                            vector=vector,
                            payload=payloads
                        )
                    ]
                )
            # except Exception as e:
            #     print(f"插入出错: {e}")
        print(f"成功全部插入！")

    def search(self, keyword, top_k=5):
        query_vector = self.model.encode([keyword])[0].tolist()
        results = self.client.search(
            collection_name=self.collection_name,
            query_vector=query_vector,
            limit=top_k,
            with_payload=True,
        )
        return [
            {
                "extracted_content": res.payload.get("extracted_content", ""),
                "feed_url": res.payload.get("feed_url", "")
            }
            for res in results
        ]
    
    