# 引入stock_stil的comments模块
from stock_stil import comments
import csv
from proxy_manager import init_proxy_pool, get_proxy, remove_proxy
import multiprocessing as mp
from typing import List
import time
import logging
import multiprocessing

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def crawl_page(args) -> List:
    """爬取单个页面的函数"""
    stock_code, page = args
    max_retries = 3
    retry_count = 0
    
    while retry_count < max_retries:
        try:
            proxy = get_proxy()
            # proxy = None
            logger.info(f"使用代理{proxy}")

            post_list = comments.getEastMoneyPostList(
                stock_code=stock_code,
                page=page,
                proxies=proxy
            )
            return post_list
        except Exception as e:
            print(f"爬取页面 {page} 时出错: {str(e)}")
            remove_proxy(proxy)  # 移除失败的代理
            retry_count += 1
            time.sleep(1)  # 失败后等待1秒再重试
    
    return []

def main():
    stock_code = 'hk01810'
    max_pages = float("inf")
    
    # 初始化代理池
    init_proxy_pool(max_use_count=8, min_pool_size=10)
    
    # 获取总页数
    first_page = comments.getEastMoneyPostList(
        stock_code=stock_code,
        proxies=get_proxy()
    )
    # print(first_page[0].__dict__)
    total_page = min(first_page[0].total_page, max_pages)
    print(total_page)
    # 准备多进程参数
    pool = mp.Pool(processes=2 * multiprocessing.cpu_count() + 1)  # 使用4个进程
    args = [(stock_code, page) for page in range(1, total_page + 1)]
    
    # 使用多进程爬取
    results = []
    for post_list in pool.imap_unordered(crawl_page, args):
        if post_list:
            results.extend(post_list)
    
    pool.close()
    pool.join()

    # 定义需要保存的字段
    useful_fields = [
        # 帖子基础信息
        "post_id",            # 帖子唯一标识（用于去重）
        "post_title",         # 帖子标题（核心文本，需情感分析）
        "post_publish_time",  # 发布时间（时间序列分析）
        "post_url",           # 原文链接（数据溯源）

        # 股票关联信息
        "stockbar_code",      # 股票代码（如600598）
        "stockbar_name",      # 股票名称（如"北大荒吧"）

        # 用户信息
        "user_id",            # 用户ID（分析用户行为模式）
        "user_nickname",      # 用户昵称（需清洗，可能含情绪线索）
        "user_is_majia",      # 是否马甲号（过滤噪声数据）

        # 互动数据
        "post_click_count",   # 点击量（衡量话题热度）
        "post_comment_count"  # 评论数（反映讨论强度）
    ]

    # 保存数据到CSV
    output_filename = f'stock_posts_{stock_code}_origin_data.csv'
    with open(output_filename, 'w', newline='', encoding='utf-8-sig') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=useful_fields)
        writer.writeheader()
        
        for post in results:
            post_dict = {field: getattr(post, field, '') for field in useful_fields}
            writer.writerow(post_dict)

    print(f"数据已保存到 {output_filename}")
    print(f"共爬取 {len(results)} 条帖子数据")

if __name__ == '__main__':
    main()




