import requests
from bs4 import BeautifulSoup
from typing import List, Dict
from dataclasses import dataclass
import re
from app.db.database import SessionLocal
from app.db.crud import create_tpost
from app.pacong.content_fetcher import ContentFetcher

@dataclass
class FastBullNewsItem:
    """FastBull新闻条目的数据类"""
    url: str
    title: str
    pub_date: int

class FastBullBaseCrawler:
    """FastBull爬虫的基类，包含共用的方法"""
    
    def __init__(self):
        self.base_url = "https://www.fastbull.com"

    def my_fetch(self, url: str) -> str:
        """
        发送HTTP GET请求，获取指定URL的HTML内容。

        :param url: 目标URL
        :return: 返回HTML内容
        :raises: 如果请求失败，抛出HTTPError
        """
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        return response.text

    def extract_title(self, title_text: str) -> str:
        """
        从标题文本中提取实际标题。

        :param title_text: 原始标题文本
        :return: 处理后的标题
        """
        match = re.search(r'【(.+)】', title_text)
        if match and len(match.group(1)) >= 4:
            return match.group(1)
        return title_text

class FastBullExpressCrawler(FastBullBaseCrawler, ContentFetcher):
    """
    用于从FastBull网站获取快讯数据并处理的类。
    """

    def __init__(self):
        """
        初始化类实例。
        """
        super().__init__()
        self.express_url = f"{self.base_url}/cn/express-news"
        self.content_type = 9  # FastBull快讯的source_id设为9

    def fetch_content(self) -> List[Dict]:
        """
        获取快讯数据。

        :return: 返回处理后的快讯列表
        """
        html_content = self.my_fetch(self.express_url)
        soup = BeautifulSoup(html_content, 'html.parser')
        news_items = soup.select('.news-list')
        
        result = []
        for item in news_items:
            title_element = item.select_one('.title_name')
            if not title_element:
                continue
                
            url = title_element.get('href')
            title_text = title_element.text.strip()
            date = item.get('data-date')
            
            if url and title_text and date:
                title = self.extract_title(title_text)
                full_url = f"{self.base_url}{url}"
                
                result.append({
                    "id": url,
                    "title": title,
                    "url": full_url,
                    "pub_date": int(date),
                    "extra": {
                        "type": "express"
                    }
                })
        
        return result

class FastBullNewsCrawler(FastBullBaseCrawler, ContentFetcher):
    """
    用于从FastBull网站获取新闻数据并处理的类。
    """

    def __init__(self):
        """
        初始化类实例。
        """
        super().__init__()
        self.news_url = f"{self.base_url}/cn/news"
        self.content_type = 10  # FastBull新闻的source_id设为10

    def fetch_content(self) -> List[Dict]:
        """
        获取新闻数据。

        :return: 返回处理后的新闻列表
        """
        html_content = self.my_fetch(self.news_url)
        soup = BeautifulSoup(html_content, 'html.parser')
        news_items = soup.select('.trending_type')
        
        result = []
        for item in news_items:
            url = item.get('href')
            title_element = item.select_one('.title')
            date_element = item.select_one('[data-date]')
            
            if not all([url, title_element, date_element]):
                continue
                
            title = title_element.text.strip()
            date = date_element.get('data-date')
            full_url = f"{self.base_url}{url}"
            
            result.append({
                "id": url,
                "title": title,
                "url": full_url,
                "pub_date": int(date),
                "extra": {
                    "type": "news"
                }
            })
        
        return result


# 示例调用
if __name__ == "__main__":
    # 获取快讯数据
    express_crawler = FastBullExpressCrawler()
    express_crawler.fetch_and_save()
    
    # 获取新闻数据
    news_crawler = FastBullNewsCrawler()
    news_crawler.fetch_and_save() 