import requests
from typing import List, Dict
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor
from app.db.database import SessionLocal
from app.db.crud import create_tpost
from app.pacong.content_fetcher import ContentFetcher

@dataclass
class V2exFeedItem:
    """V2EX Feed条目的数据类"""
    url: str
    date_modified: str
    content_html: str
    date_published: str
    title: str
    id: str

@dataclass
class V2exFeed:
    """V2EX Feed的数据类"""
    version: str
    title: str
    description: str
    home_page_url: str
    feed_url: str
    icon: str
    favicon: str
    items: List[V2exFeedItem]

class V2exCrawler(ContentFetcher):
    """
    用于从V2EX网站获取多个频道的Feed数据并处理的类。
    """

    def __init__(self):
        """
        初始化类实例。
        """
        self.base_url = "https://www.v2ex.com"
        # 定义要爬取的频道
        self.channels = ["create", "ideas", "programmer", "share"]
        self.content_type = 13  # V2EX的source_id设为13

    def fetch_channel(self, channel: str) -> Dict:
        """
        获取单个频道的Feed数据。

        :param channel: 频道名称
        :return: 该频道的Feed数据
        """
        url = f"{self.base_url}/feed/{channel}.json"
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        try:
            response = requests.get(url, headers=headers)
            response.raise_for_status()
            return response.json()
        except Exception as e:
            print(f"获取频道 {channel} 数据失败: {str(e)}")
            return {"items": []}

    def fetch_content(self) -> List[Dict]:
        """
        并行获取所有频道的数据并处理。

        :return: 返回处理后的数据列表。
        """
        # 使用线程池并行获取各个频道的数据
        with ThreadPoolExecutor(max_workers=len(self.channels)) as executor:
            channel_results = list(executor.map(self.fetch_channel, self.channels))
        
        # 合并所有频道的数据
        all_posts = []
        for feed in channel_results:
            for item in feed.get('items', []):
                # 使用date_modified如果存在，否则使用date_published
                pub_date = item.get('date_modified') or item.get('date_published')
                if not all([item.get('id'), item.get('title'), item.get('url'), pub_date]):
                    continue
                
                # 添加到结果列表
                all_posts.append({
                    "id": item['id'],
                    "title": item['title'],
                    "url": item['url'],
                    "pub_date": pub_date,
                    "content": item.get('content_html', ''),
                    "extra": {
                        "date": pub_date
                    }
                })
        
        # 按发布时间降序排序
        all_posts.sort(key=lambda x: x['extra']['date'], reverse=True)
        return all_posts


# 示例调用
if __name__ == "__main__":
    # 创建类实例
    crawler = V2exCrawler()
    
    # 获取并保存数据
    crawler.fetch_and_save() 