#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
话题重复记录检查脚本
调用查询话题接口，将所有"话题内容"解码出来，然后检查重复记录
"""

import requests
import urllib.parse
import json
from typing import Dict, List, Any, Set
from collections import defaultdict


class TopicDuplicateChecker:
    def __init__(self):
        self.base_url = "https://wanyinfm.com/api/livetopics/list"
        self.page_size = 700  # 设置较大的页面大小以获取所有数据
        self.session = requests.Session()
        
        # 设置请求头
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive'
        })
    
    def fetch_topics_data(self) -> Dict[str, Any]:
        """
        获取话题数据（不考虑分页，直接调用接口）
        
        Returns:
            API响应的JSON数据
        """
        params = {
            'size': self.page_size
        }
        
        try:
            response = self.session.get(self.base_url, params=params, timeout=30)
            response.raise_for_status()
            return response.json()
        except requests.exceptions.RequestException as e:
            print(f"请求话题数据失败: {e}")
            return None
        except json.JSONDecodeError as e:
            print(f"解析JSON数据失败: {e}")
            return None
    
    def decode_topic_description(self, encoded_description: str) -> str:
        """
        解码URL编码的话题描述
        
        Args:
            encoded_description: URL编码的话题描述
            
        Returns:
            解码后的话题描述
        """
        try:
            return urllib.parse.unquote(encoded_description, encoding='utf-8')
        except Exception as e:
            print(f"解码话题描述失败: {encoded_description}, 错误: {e}")
            return encoded_description
    
    def get_all_topics(self) -> List[Dict[str, Any]]:
        """
        获取所有话题数据并解码
        
        Returns:
            所有话题数据的列表
        """
        print("开始获取话题数据...")
        
        data = self.fetch_topics_data()
        if not data:
            print("获取话题数据失败")
            return []
        
        # 检查响应状态
        if not data.get('h', {}).get('success', False):
            print(f"API返回错误: {data.get('h', {}).get('msg', '未知错误')}")
            return []
        
        # 获取话题内容
        content = data.get('b', {}).get('content', [])
        if not content:
            print("没有获取到任何话题数据")
            return []
        
        # 解码话题描述
        all_topics = []
        for topic in content:
            if 'topicDescription' in topic:
                topic['decodedDescription'] = self.decode_topic_description(topic['topicDescription'])
            all_topics.append(topic)
        
        print(f"总共获取到{len(all_topics)}条话题数据")
        return all_topics
    
    def find_duplicate_topics(self, topics: List[Dict[str, Any]]) -> Dict[str, List[int]]:
        """
        查找重复的话题记录
        
        Args:
            topics: 话题数据列表
            
        Returns:
            重复话题内容对应的topicId列表字典
        """
        print("\n开始检查重复记录...")
        
        # 使用字典记录每个话题内容对应的topicId列表
        content_to_ids = defaultdict(list)
        
        # 遍历所有话题，按内容分组
        for topic in topics:
            decoded_content = topic.get('decodedDescription', '')
            topic_id = topic.get('topicId')
            
            if decoded_content and topic_id:
                content_to_ids[decoded_content].append(topic_id)
        
        # 找出重复的记录（出现次数大于1的内容）
        duplicate_records = {}
        for content, topic_ids in content_to_ids.items():
            if len(topic_ids) > 1:
                duplicate_records[content] = topic_ids
        
        return duplicate_records
    
    def print_duplicate_results(self, duplicate_records: Dict[str, List[int]]):
        """
        打印重复记录的结果
        
        Args:
            duplicate_records: 重复话题内容对应的topicId列表字典
        """
        if not duplicate_records:
            print("\n✅ 没有发现重复的话题记录")
            return
        
        print(f"\n🔍 发现 {len(duplicate_records)} 组重复的话题记录:")
        print("=" * 80)
        
        total_duplicate_count = 0
        for i, (content, topic_ids) in enumerate(duplicate_records.items(), 1):
            duplicate_count = len(topic_ids) - 1  # 减去原始记录，只计算重复的数量
            total_duplicate_count += duplicate_count
            
            print(f"\n第 {i} 组重复记录:")
            print(f"话题内容: {content}")
            print(f"重复次数: {len(topic_ids)} 次")
            print(f"涉及的话题ID: {', '.join(map(str, topic_ids))}")
            print("-" * 60)
        
        print(f"\n📊 统计结果:")
        print(f"重复记录组数: {len(duplicate_records)} 组")
        print(f"重复记录总数: {total_duplicate_count} 条")
        print("=" * 80)
    
    def run(self):
        """
        运行主程序
        """
        print("=" * 80)
        print("话题重复记录检查程序启动")
        print("=" * 80)
        
        # 获取所有话题数据
        all_topics = self.get_all_topics()
        
        if not all_topics:
            print("没有获取到任何话题数据，程序结束")
            return
        
        # 查找重复记录
        duplicate_records = self.find_duplicate_topics(all_topics)
        
        # 打印结果
        self.print_duplicate_results(duplicate_records)
        
        print("\n程序执行完成")
        print("=" * 80)


def main():
    """
    主函数
    """
    checker = TopicDuplicateChecker()
    checker.run()


if __name__ == "__main__":
    main()