import requests
from typing import List, Dict
from dataclasses import dataclass
import datetime
import pytz
from app.db.database import SessionLocal
from app.db.crud import create_tpost
from concurrent.futures import ThreadPoolExecutor
from app.pacong.content_fetcher import ContentFetcher

@dataclass
class CankaoNewsItem:
    """参考消息新闻条目的数据类"""
    id: str
    title: str
    url: str
    publish_time: str

class CankaoxiaoxiCrawler(ContentFetcher):
    """
    用于从参考消息网站获取新闻数据并处理的类。
    """

    def __init__(self):
        """
        初始化类实例。
        """
        self.base_url = "https://china.cankaoxiaoxi.com/json/channel"
        # 定义要爬取的频道
        self.channels = ["zhongguo", "guandian", "gj"]
        self.content_type = 6  # 参考消息的source_id设为6

    def transform_to_utc(self, beijing_time: str) -> int:
        """
        将北京时间转换为UTC时间戳。

        :param beijing_time: 北京时间字符串
        :return: UTC时间戳（毫秒）
        """
        # 解析北京时间
        beijing_tz = pytz.timezone('Asia/Shanghai')
        local_time = datetime.datetime.strptime(beijing_time, '%Y-%m-%d %H:%M:%S')
        local_time = beijing_tz.localize(local_time)
        
        # 转换为UTC时间戳（毫秒）
        utc_timestamp = int(local_time.timestamp() * 1000)
        return utc_timestamp

    def fetch_channel(self, channel: str) -> List[Dict]:
        """
        获取单个频道的新闻数据。

        :param channel: 频道名称
        :return: 该频道的新闻列表
        """
        url = f"{self.base_url}/{channel}/list.json"
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        try:
            response = requests.get(url, headers=headers)
            response.raise_for_status()
            data = response.json()
            return data.get('list', [])
        except Exception as e:
            print(f"获取频道 {channel} 数据失败: {str(e)}")
            return []

    def fetch_content(self) -> List[Dict]:
        """
        并行获取所有频道的数据并处理。

        :return: 返回处理后的数据列表。
        """
        # 使用线程池并行获取各个频道的数据
        with ThreadPoolExecutor(max_workers=len(self.channels)) as executor:
            channel_results = list(executor.map(self.fetch_channel, self.channels))
        
        # 合并所有频道的数据
        all_news = []
        for channel_news in channel_results:
            for news in channel_news:
                news_data = news.get('data', {})
                if not all(key in news_data for key in ['id', 'title', 'url', 'publishTime']):
                    continue
                
                # 添加到结果列表
                all_news.append({
                    "id": news_data['id'],
                    "title": news_data['title'],
                    "url": news_data['url'],
                    "pub_date": self.transform_to_utc(news_data['publishTime']),
                    "extra": {
                        "date": self.transform_to_utc(news_data['publishTime'])
                    }
                })
        
        # 按发布时间降序排序
        all_news.sort(key=lambda x: x['extra']['date'], reverse=True)
        return all_news


# 示例调用
if __name__ == "__main__":
    # 创建类实例
    crawler = CankaoxiaoxiCrawler()
    
    # 获取并保存数据
    crawler.fetch_and_save() 