import requests
import json
import time
from concurrent.futures import ThreadPoolExecutor
import os
from datetime import datetime


def crawl_single_page(page_num, cookies):
    """爬取单页数据"""
    after_id = 5 + (page_num - 1) * 6

    url = f"https://www.zhihu.com/api/v3/feed/topstory/recommend?action=down&ad_interval=-10&after_id={after_id}&desktop=true&end_offset={after_id}&page_number={page_num + 1}&session_token=6b00af0d2d5094663f104de098b473ae"

    try:
        response = requests.get(url, cookies=cookies, timeout=10)
        response.encoding = 'utf-8'

        if response.status_code == 200:
            data = json.loads(response.text)
            return data.get('data', [])
    except Exception as e:
        print(f"第 {page_num} 页爬取异常: {e}")

    return []


def crawl_zhihu_fast(total_pages=10):
    """快速爬取多页数据"""

    cookies = {
        'sessionid': 'ymUTBvFG6PwdXGM4cdrYVIIsT7PKNwljkxkeJ0hFR1K',
        'z_c0': '2|1:0|10:1761571673|4:z_c0|92:Mi4xbjZUQkNBQUFBQUEta2RPYzB2YllHaVlBQUFCZ0FsVk5XY0hzYVFEMnVSSzJxcHJwa2F3M29USDJGYXN0N2tPMmlB|891c094e44db34691048fb925b74038feb3f77e06dcf633ee227b8e36beeff54;',
        'd_c0': 'PpHTnNL22BqPTkBpSKVIkW8pJxLO34shAUo=|1754016816; __snaker__id=fxPSbV8pszufjUR7;',
    }

    all_feeds = []

    print(f"开始快速爬取 {total_pages} 页数据...")

    # 使用多线程加速
    with ThreadPoolExecutor(max_workers=3) as executor:
        futures = []

        for page in range(1, total_pages + 1):
            # 添加延迟避免请求过快
            time.sleep(0.5)
            future = executor.submit(crawl_single_page, page, cookies)
            futures.append((page, future))

        # 收集结果
        for page, future in futures:
            try:
                page_data = future.result(timeout=10)
                if page_data:
                    all_feeds.extend(page_data)
                    print(f"✓ 第 {page} 页: {len(page_data)} 条")
                else:
                    print(f"✗ 第 {page} 页: 无数据")
            except Exception as e:
                print(f"✗ 第 {page} 页: 超时或错误 - {e}")

    return all_feeds


def save_data(feeds, filename=None):
    """保存数据到文件"""
    if not filename:
        # 生成带时间戳的文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"zhihu_data_{timestamp}.json"

    # 确保目录存在
    os.makedirs('data', exist_ok=True)
    filepath = os.path.join('data', filename)

    # 保存完整数据
    with open(filepath, 'w', encoding='utf-8') as f:
        json.dump(feeds, f, ensure_ascii=False, indent=2)

    print(f"完整数据已保存到: {filepath}")

    # 同时保存简化版数据（便于查看）
    simplified_data = []
    for feed in feeds:
        target = feed['target']
        simplified = {
            'id': target['id'],
            'author': target['author']['name'],
            'question_title': target['question']['title'] if 'question' in target else '未知问题',
            'excerpt': target['excerpt'],
            'voteup_count': target['voteup_count'],
            'comment_count': target['comment_count'],
            'created_time': target['created_time']
        }
        simplified_data.append(simplified)

    simplified_filename = f"zhihu_simplified_{timestamp}.json"
    simplified_filepath = os.path.join('data', simplified_filename)
    with open(simplified_filepath, 'w', encoding='utf-8') as f:
        json.dump(simplified_data, f, ensure_ascii=False, indent=2)

    print(f"简化数据已保存到: {simplified_filepath}")

    return filepath, simplified_filepath


def analyze_data(feeds):
    """分析数据并生成统计报告"""
    if not feeds:
        print("没有数据可分析")
        return

    print(f"\n=== 数据统计报告 ===")
    print(f"总数据量: {len(feeds)} 条")

    # 统计信息
    total_votes = sum(feed['target']['voteup_count'] for feed in feeds)
    total_comments = sum(feed['target']['comment_count'] for feed in feeds)
    avg_votes = total_votes / len(feeds)
    avg_comments = total_comments / len(feeds)

    print(f"总点赞数: {total_votes}")
    print(f"总评论数: {total_comments}")
    print(f"平均点赞: {avg_votes:.1f}")
    print(f"平均评论: {avg_comments:.1f}")

    # 作者统计
    authors = {}
    for feed in feeds:
        author = feed['target']['author']['name']
        authors[author] = authors.get(author, 0) + 1

    print(f"\n作者统计 (前10):")
    for author, count in sorted(authors.items(), key=lambda x: x[1], reverse=True)[:10]:
        print(f"  {author}: {count} 条")

    # 点赞最多前10
    print(f"\n点赞最多前10:")
    top_voted = sorted(feeds, key=lambda x: x['target']['voteup_count'], reverse=True)[:10]
    for i, feed in enumerate(top_voted, 1):
        target = feed['target']
        print(f"  {i}. {target['author']['name']} - {target['voteup_count']}赞")
        print(f"     问题: {target['question']['title'][:30]}...")
        print(f"     内容: {target['excerpt'][:50]}...")

    # 时间分布
    timestamps = [feed['target']['created_time'] for feed in feeds]
    min_time = min(timestamps)
    max_time = max(timestamps)
    print(f"\n时间范围: {datetime.fromtimestamp(min_time)} 到 {datetime.fromtimestamp(max_time)}")


def save_to_csv(feeds):
    """保存为CSV格式"""
    import csv

    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    csv_filename = f"zhihu_data_{timestamp}.csv"
    csv_filepath = os.path.join('data', csv_filename)

    with open(csv_filepath, 'w', encoding='utf-8', newline='') as f:
        writer = csv.writer(f)
        # 写入表头
        writer.writerow(['ID', '作者', '问题标题', '内容摘要', '点赞数', '评论数', '创建时间'])

        # 写入数据
        for feed in feeds:
            target = feed['target']
            writer.writerow([
                target['id'],
                target['author']['name'],
                target['question']['title'] if 'question' in target else '未知问题',
                target['excerpt'],
                target['voteup_count'],
                target['comment_count'],
                datetime.fromtimestamp(target['created_time']).strftime('%Y-%m-%d %H:%M:%S')
            ])

    print(f"CSV数据已保存到: {csv_filepath}")
    return csv_filepath


# 主程序
if __name__ == "__main__":
    print("开始爬取知乎数据...")

    # 爬取数据
    feeds = crawl_zhihu_fast(total_pages=10)

    if feeds:
        print(f"\n爬取完成! 共获取 {len(feeds)} 条数据")

        # 保存数据
        json_file, simplified_file = save_data(feeds)

        # 保存为CSV
        csv_file = save_to_csv(feeds)

        # 分析数据
        analyze_data(feeds)

        print(f"\n=== 文件保存汇总 ===")
        print(f"完整JSON: {json_file}")
        print(f"简化JSON: {simplified_file}")
        print(f"CSV文件: {csv_file}")

    else:
        print("没有获取到数据")