import requests
from typing import List, Dict, Optional
from app.db.database import SessionLocal
from app.db.crud import create_tpost
from app.pacong.content_fetcher import ContentFetcher

class ZhihuHotListFetcher(ContentFetcher):
    """
    用于从知乎获取热榜数据并处理的类。
    """

    def __init__(self):
        """
        初始化类实例。
        """
        self.url = "https://www.zhihu.com/api/v3/feed/topstory/hot-lists/total"
        self.content_type = 14  # 知乎的source_id设为14

    def my_fetch(self) -> Dict:
        """
        发送HTTP GET请求，获取API响应。

        :return: 返回JSON响应。
        :raises: 如果请求失败，抛出HTTPError。
        """
        params = {
            'limit': 20,
            'desktop': 'true'
        }
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Origin': 'https://www.zhihu.com',
            'Referer': 'https://www.zhihu.com/hot'
        }

        response = requests.get(
            self.url,
            params=params,
            headers=headers,
            timeout=10  # 设置10秒超时
        )
        response.raise_for_status()
        return response.json()

    def process_icon_url(self, icon_url: Optional[str]) -> Optional[str]:
        """
        处理图标URL。

        :param icon_url: 原始图标URL
        :return: 处理后的图标URL
        """
        if not icon_url:
            return None
        return icon_url

    def fetch_content(self) -> List[Dict]:
        """
        从API获取数据并处理。

        :return: 返回处理后的数据列表。
        """
        # 获取API响应
        response_data = self.my_fetch()
        
        result = []
        for item in response_data.get('data', []):
            target = item.get('target', {})
            card_label = item.get('card_label', {})
            
            if not target.get('id') or not target.get('title'):
                continue
            
            # 构建问题URL
            question_url = f"https://www.zhihu.com/question/{target['id']}"
            
            # 添加到结果列表
            result.append({
                "id": str(target['id']),
                "title": target['title'],
                "url": question_url,
                "pub_date": target.get('created'),
                "content": target.get('excerpt', ''),
                "extra": {
                    "icon": self.process_icon_url(card_label.get('night_icon')),
                    "answer_count": target.get('answer_count'),
                    "follower_count": target.get('follower_count'),
                    "comment_count": target.get('comment_count')
                }
            })
        
        return result


# 示例调用
if __name__ == "__main__":
    # 创建类实例
    crawler = ZhihuHotListFetcher()
    
    # 获取并保存数据
    crawler.fetch_and_save() 