import pymongo
import requests
from bs4 import BeautifulSoup, Comment
import chardet
import re
import datetime

class MongoDBHandler:
    def __init__(self, url):
        self.client = pymongo.MongoClient(url)

    def get_documents(self, db_name, collection_name, query=None, projection=None):
        db = self.client[db_name]
        collection = db[collection_name]
        return list(collection.find(query or {}, projection or {"_id": 0}))

    def save_documents(self, doc, extracted, target_db, target_collection):
        processed_data = []
        new_doc = doc.copy()
        new_doc.update({
            "extracted_content": extracted,
            "status": "success" if extracted else "failed",
            "processing_time": datetime.datetime.now().isoformat()
        })

        # 检查 new_doc 中的 feed_url 是否已存在于目标集合中
        feed_url = new_doc.get("feed_url")
        if feed_url:
            db = self.client[target_db]
            collection = db[target_collection]
            
            # 如果 feed_url 存在，先删除原有数据
            existing_doc = collection.find_one({"feed_url": feed_url})
            if existing_doc:
                collection.delete_one({"feed_url": feed_url})
                print(f"已删除 feed_url 为 {feed_url} 的旧数据")
            
            # 插入新的文档
            if new_doc:
                collection.insert_one(new_doc)
                print(f"成功写入 1 条记录")
            else:
                print("没有处理的数据可写入。")

    def remove_emojis(self, text):
        emoji_pattern = re.compile(
            "[" "\U0001F600-\U0001F64F" "\U0001F300-\U0001F5FF"
            "\U0001F680-\U0001F6FF" "\U0001F1E0-\U0001F1FF"
            "\U00002700-\U000027BF" "\U0001F900-\U0001F9FF"
            "\U00002600-\U000026FF" "\U00002B50"
            "\U0001FA70-\U0001FAFF" "\U0001F018-\U0001F270"
            "]+", flags=re.UNICODE
        )
        return emoji_pattern.sub('', text)

    def extract_content(self, url):
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }
        try:
            response = requests.get(url, headers=headers, timeout=10)
            # 关键：禁止requests自动推断编码
            response.encoding = None
            raw_content = response.content
            detected = chardet.detect(raw_content)
            encoding = detected.get("encoding") or "utf-8"
            print(f"网页头部声明编码: {response.apparent_encoding}, chardet猜测编码: {encoding}")
            try:
                # 尝试先按检测出来的编码解码
                text = raw_content.decode(encoding, errors='replace')
                # 如果解出来明显乱码（比如大量奇怪字符），再用utf-8重新decode一次
                if self.is_messy_code(text):
                    print("检测到乱码，重新以utf-8解码！")
                    text = raw_content.decode('utf-8', errors='replace')
            except Exception:
                text = raw_content.decode('utf-8', errors='replace')

            soup = BeautifulSoup(text, 'html.parser')
            title_div = soup.find('div', id='detail-title')
            title = self.remove_emojis(title_div.get_text(strip=True)) if title_div else "[无标题]"

            desc_div = soup.find('div', id='detail-desc')
            content_spans = desc_div.select('.note-text span') if desc_div else []
            content = [self.remove_emojis(span.get_text(strip=True)) for span in content_spans if not isinstance(span, Comment)]
            # content = '\n'.join(content).replace('  ', ' ')

            tags = [tag.get_text(strip=True) for tag in desc_div.select('a.tag')] if desc_div else []

            content_str = '\n'.join(content)
            tags_str = ', '.join(tags)
            extracted = f"{title}\n{content_str}\n{tags_str}"
            extracted = re.sub(r'\n\s*\n+', '\n', re.sub(r'\n{2,}(#)', r'\n\1', extracted or '')).strip()
            # 删除 '\ufeff\n' 字符
            extracted = extracted.replace('\ufeff\n', '')
            return extracted
        except Exception as e:
            print(f"提取失败: {url} | 错误: {str(e)}")
            return "[处理失败]"
        

