import requests
from bs4 import BeautifulSoup
from typing import List, Dict
from dataclasses import dataclass
import datetime
import pytz
from app.db.database import SessionLocal
from app.db.crud import create_tpost
from app.pacong.content_fetcher import ContentFetcher

@dataclass
class GelonghuiNewsItem:
    """格隆汇新闻条目的数据类"""
    url: str
    title: str
    info: str
    relative_time: str

class GelonghuiCrawler(ContentFetcher):
    """
    用于从格隆汇网站获取新闻数据并处理的类。
    """

    def __init__(self):
        """
        初始化类实例。
        """
        self.base_url = "https://www.gelonghui.com"
        self.news_url = f"{self.base_url}/news/"
        self.content_type = 11  # 格隆汇的source_id设为11

    def my_fetch(self) -> str:
        """
        发送HTTP GET请求，获取指定URL的HTML内容。

        :return: 返回HTML内容
        :raises: 如果请求失败，抛出HTTPError
        """
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(self.news_url, headers=headers)
        response.raise_for_status()
        return response.text

    def parse_relative_date(self, relative_time: str) -> int:
        """
        将相对时间转换为时间戳。

        :param relative_time: 相对时间字符串（如"2小时前"）
        :return: UTC时间戳（毫秒）
        """
        now = datetime.datetime.now(pytz.timezone('Asia/Shanghai'))
        
        # 解析相对时间
        if '分钟前' in relative_time:
            minutes = int(relative_time.replace('分钟前', ''))
            timestamp = int((now.timestamp() - minutes * 60) * 1000)
        elif '小时前' in relative_time:
            hours = int(relative_time.replace('小时前', ''))
            timestamp = int((now.timestamp() - hours * 3600) * 1000)
        elif '天前' in relative_time:
            days = int(relative_time.replace('天前', ''))
            timestamp = int((now.timestamp() - days * 86400) * 1000)
        else:
            # 如果无法解析，返回当前时间
            timestamp = int(now.timestamp() * 1000)
            
        return timestamp

    def fetch_content(self) -> List[Dict]:
        """
        获取并处理新闻数据。

        :return: 返回处理后的数据列表。
        """
        # 获取HTML内容
        html_content = self.my_fetch()
        
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(html_content, 'html.parser')
        news_items = soup.select('.article-content')
        
        result = []
        for item in news_items:
            # 获取文章链接和标题
            article_link = item.select_one('.detail-right > a')
            if not article_link:
                continue
                
            url = article_link.get('href')
            title = article_link.select_one('h2')
            if not title:
                continue
            title = title.text.strip()
            
            # 获取时间和信息
            time_container = item.select_one('.time')
            if not time_container:
                continue
                
            info = time_container.select_one('span:nth-child(1)')
            relative_time = time_container.select_one('span:nth-child(3)')
            
            if not all([url, title, relative_time]):
                continue
                
            info_text = info.text.strip() if info else ""
            relative_time_text = relative_time.text.strip()
            
            # 构建完整URL
            full_url = f"{self.base_url}{url}"
            
            # 添加到结果列表
            result.append({
                "id": url,
                "title": title,
                "url": full_url,
                "pub_date": self.parse_relative_date(relative_time_text),
                "extra": {
                    "date": self.parse_relative_date(relative_time_text),
                    "info": info_text
                }
            })
        
        return result


# 示例调用
if __name__ == "__main__":
    # 创建类实例
    crawler = GelonghuiCrawler()
    
    # 获取并保存数据
    crawler.fetch_and_save() 