#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
话题数据解析脚本
调用查询话题接口，将所有"话题内容"解码出来，然后根据category，分别存入不同的csv文件中
"""

import requests
import csv
import urllib.parse
import os
import time
from typing import Dict, List, Any
import json


class TopicParser:
    def __init__(self):
        self.base_url = "https://wanyinfm.com/api/livetopics/list"
        self.page_size = 200  # 每页获取200条数据
        self.output_dir = "topics_csv"  # 输出目录
        self.session = requests.Session()
        
        # 设置请求头
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive'
        })
        
        # 创建输出目录
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
    
    def fetch_topics_page(self, page: int = 0) -> Dict[str, Any]:
        """
        获取指定页面的话题数据
        
        Args:
            page: 页码，从0开始
            
        Returns:
            API响应的JSON数据
        """
        params = {
            'size': self.page_size,
            'page': page
        }
        
        try:
            response = self.session.get(self.base_url, params=params, timeout=30)
            response.raise_for_status()
            return response.json()
        except requests.exceptions.RequestException as e:
            print(f"请求第{page}页数据失败: {e}")
            return None
        except json.JSONDecodeError as e:
            print(f"解析第{page}页JSON数据失败: {e}")
            return None
    
    def decode_topic_description(self, encoded_description: str) -> str:
        """
        解码URL编码的话题描述
        
        Args:
            encoded_description: URL编码的话题描述
            
        Returns:
            解码后的话题描述
        """
        try:
            return urllib.parse.unquote(encoded_description, encoding='utf-8')
        except Exception as e:
            print(f"解码话题描述失败: {encoded_description}, 错误: {e}")
            return encoded_description
    
    def fetch_all_topics(self) -> List[Dict[str, Any]]:
        """
        获取所有话题数据（不考虑分页，直接调用接口）
        
        Returns:
            所有话题数据的列表
        """
        print("开始获取话题数据...")
        
        # 直接调用接口，不考虑分页
        data = self.fetch_topics_page(0)
        if not data:
            print("获取话题数据失败")
            return []
        
        # 检查响应状态
        if not data.get('h', {}).get('success', False):
            print(f"API返回错误: {data.get('h', {}).get('msg', '未知错误')}")
            return []
        
        # 获取话题内容
        content = data.get('b', {}).get('content', [])
        if not content:
            print("没有获取到任何话题数据")
            return []
        
        # 解码话题描述
        all_topics = []
        for topic in content:
            if 'topicDescription' in topic:
                topic['decodedDescription'] = self.decode_topic_description(topic['topicDescription'])
            all_topics.append(topic)
        
        print(f"总共获取到{len(all_topics)}条话题数据")
        return all_topics
    
    def group_topics_by_category(self, topics: List[Dict[str, Any]]) -> Dict[int, List[Dict[str, Any]]]:
        """
        按category分组话题数据
        
        Args:
            topics: 话题数据列表
            
        Returns:
            按category分组的字典
        """
        grouped = {}
        for topic in topics:
            category = topic.get('category', 0)
            if category not in grouped:
                grouped[category] = []
            grouped[category].append(topic)
        
        return grouped
    
    def save_topics_to_csv(self, topics: List[Dict[str, Any]], category: int):
        """
        将话题数据保存到CSV文件（只保存话题内容）
        
        Args:
            topics: 话题数据列表
            category: 话题分类
        """
        if not topics:
            return
        
        filename = f"topics_category_{category}.csv"
        filepath = os.path.join(self.output_dir, filename)
        
        # CSV字段 - 只保存话题内容
        fieldnames = ['decodedDescription']
        
        try:
            with open(filepath, 'w', newline='', encoding='utf-8-sig') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                writer.writeheader()
                
                for topic in topics:
                    # 只写入话题内容
                    row = {'decodedDescription': topic.get('decodedDescription', '')}
                    writer.writerow(row)
            
            print(f"分类{category}的{len(topics)}条数据已保存到: {filepath}")
            
        except Exception as e:
            print(f"保存分类{category}的数据到CSV失败: {e}")
    
    def run(self):
        """
        运行主程序
        """
        print("=" * 50)
        print("话题数据解析程序启动")
        print("=" * 50)
        
        # 获取所有话题数据
        all_topics = self.fetch_all_topics()
        
        if not all_topics:
            print("没有获取到任何话题数据")
            return
        
        # 按category分组
        grouped_topics = self.group_topics_by_category(all_topics)
        
        print(f"\n发现{len(grouped_topics)}个不同的分类:")
        for category, topics in grouped_topics.items():
            print(f"  分类{category}: {len(topics)}条数据")
        
        # 保存到CSV文件
        print(f"\n开始保存数据到CSV文件...")
        for category, topics in grouped_topics.items():
            self.save_topics_to_csv(topics, category)
        
        print(f"\n所有数据已保存到目录: {os.path.abspath(self.output_dir)}")
        print("=" * 50)
        print("程序执行完成")
        print("=" * 50)


def main():
    """
    主函数
    """
    parser = TopicParser()
    parser.run()


if __name__ == "__main__":
    main()