import pymongo
import requests
from bs4 import BeautifulSoup, Comment
from sentence_transformers import SentenceTransformer
from qdrant_client import QdrantClient, models
from qdrant_client.models import VectorParams, Distance
import torch
import re
import chardet
import datetime

import os
print(os.environ.get("HF_HOME"))

# 初始化BGE-M3模型
model = SentenceTransformer('BAAI/bge-m3', device='cuda' if torch.cuda.is_available() else 'cpu')

from huggingface_hub import snapshot_download

# 注意：use_local_only=True 可以确保不会联网，只使用本地缓存
model_path = snapshot_download("BAAI/bge-m3", local_files_only=True)
print("Local model cache path:", model_path)


# 移除emoji
def remove_emojis(text):
    emoji_pattern = re.compile(
        "["  # 表情符号匹配范围
        "\U0001F600-\U0001F64F"
        "\U0001F300-\U0001F5FF"
        "\U0001F680-\U0001F6FF"
        "\U0001F1E0-\U0001F1FF"
        "\U00002700-\U000027BF"
        "\U0001F900-\U0001F9FF"
        "\U00002600-\U000026FF"
        "\U00002B50"
        "\U0001FA70-\U0001FAFF"
        "\U0001F018-\U0001F270"
        "]+",
        flags=re.UNICODE
    )
    return emoji_pattern.sub('', text)

# 获取原始文档
def get_original_documents():
    client = pymongo.MongoClient("mongodb://172.20.50.49:27017/")
    db = client["snob10"]
    collection = db["hot_feeds"]
    return list(collection.find({}, {"_id": 0}))

# 页面解析函数
def extract_content(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
    }
    
    try:
        response = requests.get(url, headers=headers, timeout=10)
        raw_content = response.content
        detected = chardet.detect(raw_content)
        encoding = detected.get("encoding", "utf-8")

        try:
            text = raw_content.decode(encoding, errors='replace')
        except Exception:
            text = raw_content.decode('utf-8', errors='replace')

        soup = BeautifulSoup(response.text, 'html.parser')
        title_div = soup.find('div', id='detail-title')
        title = remove_emojis(title_div.get_text(strip=True)) if title_div else "[无标题]"

        desc_div = soup.find('div', id='detail-desc')
        content_spans = desc_div.select('.note-text span')
        content = [remove_emojis(span.get_text(strip=True)) for span in content_spans if not isinstance(span, Comment)]

        tags = [tag.get_text(strip=True) for tag in desc_div.select('a.tag')]
        
        content_str = '\n'.join(content)
        combined_content = f"{title}\n{content_str}\n{', '.join(tags)}"
        return combined_content

    except Exception as e:
        print(f"处理失败: {url} | 错误: {str(e)}")
        return "[处理失败]"

# 存储到MongoDB
def save_to_new_database(data):
    client = pymongo.MongoClient("mongodb://172.20.50.49:27017/")
    db = client["bian"]
    collection = db["hot_feeds"]
    if data:
        collection.insert_many(data)
    print(f"成功写入 {len(data)} 条记录")

# 存储到Qdrant
def save_to_qdrant(data):
    client = QdrantClient("http://localhost:6333")
    extracted_content = data[1].get('extracted_content', {})

    client.delete_collection("hot_feeds")
    if not client.collection_exists("hot_feeds"):
        client.create_collection(
            collection_name="hot_feeds",
            vectors_config=VectorParams(size=1024, distance=Distance.COSINE),
        )

    current_count = client.count(collection_name="hot_feeds", exact=True).count
    print(f"当前集合文档总数: {current_count}")

    for idx, doc in enumerate(data, 1):
        try:
            new_id = current_count + idx
            extracted_content = doc.get('extracted_content', {})
            vectors = model.encode([extracted_content])[0].tolist()

            payloads = {
                "author_name": doc.get('author_name', ''),
                "title": doc.get('title', ''),
                "platform": doc.get('platform', ''),
                "keywords": doc.get('keywords', []),
                "feed_url": doc.get('feed_url', ''),
                "profile_url": doc.get('profile_url', ''),
                "image_url": doc.get('image_url', ''),
                "video": doc.get('video', ''),
                "image_base64": doc.get('image_base64', ''),
                "like_count": doc.get('like_count', 0),
                "profile_likes_stats": doc.get('profile_likes_stats', ''),
                "createAt": doc.get('createAt', ''),
                "extracted_content": extracted_content,
                "status": doc.get('status', ''),
                "processing_time": doc.get('processing_time', ''),
            }

            client.upsert(
                collection_name="hot_feeds",
                points=[models.PointStruct(id=new_id, vector=vectors, payload=payloads)],
            )
        except Exception as e:
            print(f"处理文档 {doc.get('_id')} 时出错: {str(e)}")

# 搜索Qdrant
def search_from_qdrant(keyword, top_k=5):
    keyword_vector = model.encode([keyword])[0].tolist()
    client = QdrantClient("http://localhost:6333")

    search_results = client.search(
        collection_name="hot_feeds",
        query_vector=keyword_vector,
        limit=top_k,
        with_payload=True,
    )

    results = [{"extracted_content": point.payload.get("extracted_content", ""),
                "feed_url": point.payload.get("feed_url", "")} for point in search_results]
    return results

# 主处理流程
def main():
    original_docs = get_original_documents()
    print(f"发现 {len(original_docs)} 条原始数据")
    
    processed_data = []

    for idx, doc in enumerate(original_docs, 1):
        print(f"处理第 {idx}/{len(original_docs)} 条数据")
        extracted = extract_content(doc['feed_url'])
        extracted = re.sub(r'\n\s*\n+', '\n', re.sub(r'\n{2,}(#)', r'\n\1', extracted or '')).strip()

        new_doc = doc.copy()
        new_doc.update({
            "extracted_content": extracted,
            "status": "success" if extracted else "failed",
            "processing_time": datetime.datetime.now().isoformat()
        })
        processed_data.append(new_doc)

    save_to_new_database(processed_data)
    print(f"最终存储 {len(processed_data)} 条处理结果到数据库")

    save_to_qdrant(processed_data)

if __name__ == "__main__":
    main()
    keyword = "瘦腿"
    results = search_from_qdrant(keyword, top_k=5)
    for i, item in enumerate(results, 1):
        print(f"\n第 {i} 条结果：")
        print("Feed URL:", item['feed_url'])
        print("内容摘要:\n", item['extracted_content'])
