# -*- coding: utf-8 -*-
# @File    : 5_muilt_get_pinglun.py
# 多Cookie并发爬取微博评论

import pandas as pd
import time
import datetime
import random
import requests
import json
import os
import threading
import concurrent.futures
from cookies_manager import CookiesManager
import traceback
from dateutil import parser
from queue import Queue

# 全局变量
failed_ids = []  # 记录失败的微博ID
location_data = []  # 存储地理定位数据
location_data_lock = threading.Lock()  # 地理定位数据的锁

def load_progress():
    """
    加载断点续爬进度
    
    Returns:
        set: 已处理过的微博ID集合
    """
    progress_file = 'Weibo/爬取进度/comment_crawl_progress.json'
    if os.path.exists(progress_file):
        with open(progress_file, 'r', encoding='utf-8') as f:
            return set(json.load(f))
    return set()

def save_progress(processed_ids):
    """
    保存爬取进度
    
    Args:
        processed_ids (set): 已处理的微博ID集合
    """
    # 确保目录存在
    os.makedirs('Weibo/爬取进度', exist_ok=True)
    progress_file = 'Weibo/爬取进度/comment_crawl_progress.json'
    with open(progress_file, 'w', encoding='utf-8') as f:
        json.dump(list(processed_ids), f)

def save_location_data(local_location_data=None):
    """
    将收集的地理定位数据保存到单独的CSV文件
    
    Args:
        local_location_data: 本地线程收集的地理定位数据，如果为None则使用全局变量
    """
    # 使用锁保护全局变量的访问
    with location_data_lock:
        # 如果提供了本地数据，则添加到全局变量
        if local_location_data:
            location_data.extend(local_location_data)
        
        # 如果没有数据，直接返回
        if not location_data:
            return
        
        # 创建保存目录（如果不存在）
        os.makedirs('Weibo', exist_ok=True)
        
        # 定义输出文件路径
        location_file = 'Weibo/4_社交媒体特定表达_地理定位数据.csv'
        
        # 首先检查是否存在已有的地理定位数据文件
        existing_data = []
        if os.path.exists(location_file):
            try:
                # 读取已有数据
                existing_df = pd.read_csv(location_file, encoding='utf-8-sig')
                existing_data = existing_df.to_dict('records')
                print(f"已加载 {len(existing_data)} 条已有地理定位数据")
            except Exception as e:
                print(f"读取已有地理定位数据失败: {str(e)}")
        
        # 创建全部数据的列表（已有数据 + 新数据）
        all_location_data = existing_data + location_data
        
        # 去重处理（防止重复数据）
        unique_ids = set()
        unique_location_data = []
        for item in all_location_data:
            if item.get('微博唯一ID') not in unique_ids:
                unique_ids.add(item.get('微博唯一ID'))
                unique_location_data.append(item)
        
        # 将数据保存为CSV
        location_df = pd.DataFrame(unique_location_data)
        
        # 只保留地理定位相关的必要列
        essential_columns = ['微博唯一ID', '帖子的URL', '地理定位解析结果', '带时区时间格式']
        # 只保留存在的列
        existing_columns = [col for col in essential_columns if col in location_df.columns]
        location_df = location_df[existing_columns]
        
        location_df.to_csv(location_file, index=False, encoding='utf-8-sig')
        print(f"\n地理定位数据已保存到 {location_file}")
        print(f"共收集了 {len(unique_location_data)} 条地理定位数据（含已有数据）")
        
        # 清空全局变量，避免重复追加
        location_data.clear()

def format_weibo_time(time_str):
    """
    将微博API返回的英文时间格式转换为标准格式
    
    Args:
        time_str (str): 原始时间字符串，如 "Tue Dec 31 11:28:41 +0800 2024"
        
    Returns:
        str: 格式化后的时间字符串，如 "2024-12-31 11:28:41"
    """
    if not time_str:
        return ""
    
    try:
        # 使用dateutil解析各种时间格式
        dt = parser.parse(time_str)
        # 转换为标准格式的字符串
        return dt.strftime("%Y-%m-%d %H:%M:%S")
    except Exception as e:
        print(f"时间格式转换错误: {str(e)}, 原始时间: {time_str}")
        return time_str  # 如果转换失败，返回原始字符串

def check_cookie_validity(cookie):
    """
    检查cookie是否有效
    
    Args:
        cookie (str): cookie字符串
    
    Returns:
        bool: cookie是否有效
    """
    # 修改为与4_muilt_get_username.py一致的测试URL
    test_url = "https://weibo.com/ajax/profile/info?uid=1669879400"  # 测试用的微博UID，用任意有效UID
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'accept-encoding': 'gzip, deflate, br',
        'cookie': cookie,
        'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'connection': 'keep-alive'
    }
    
    try:
        response = requests.get(test_url, headers=headers, timeout=10)
        
        # 检查HTTP状态码
        if response.status_code != 200:
            return False
        
        # 尝试解析响应内容
        response_json = response.json()
        
        # 检查响应中是否有错误码，表示登录失效
        if 'ok' in response_json and response_json['ok'] == 0:
            return False
        
        # 检查是否有用户数据
        if 'data' not in response_json:
            return False
            
        return True
    except Exception as e:
        print(f"检查cookie有效性时出错: {str(e)}")
        return False

class MultiThreadCommentCrawler:
    """
    多线程爬虫类，使用多个Cookie并发爬取微博评论
    """
    
    def __init__(self, input_csv='Weibo/1_社交媒体特定表达_帖子内容.csv'):
        """
        初始化多线程评论爬虫
        
        Args:
            input_csv: 输入CSV文件路径，包含微博ID
        """
        self.input_csv = input_csv
        self.comments_file = 'Weibo/3_社交媒体特定表达_评论数据.csv'
        self.weibo_details_file = 'weibo_details.csv'
        self.cookies_manager = CookiesManager()
        self.processed_ids = set()  # 已处理的微博ID集合
        self.failed_ids = []  # 失败的微博ID列表
        self.weibo_details = []  # 微博详情列表
        self.details_lock = threading.Lock()  # 微博详情锁
        self.progress_lock = threading.Lock()  # 进度锁
        self.fail_lock = threading.Lock()  # 失败ID锁
        self.threads = []  # 线程列表
        self.progress_dir = 'Weibo/爬取进度'
        self.failed_ids_file = os.path.join(self.progress_dir, 'failed_comment_ids.json')
        
        # 确保目录存在
        os.makedirs(self.progress_dir, exist_ok=True)
        os.makedirs('Weibo', exist_ok=True)
        
        # 加载现有数据
        self.initial_setup()
    
    def initial_setup(self):
        """
        初始化设置，加载已处理的微博ID和现有结果
        """
        # 加载已处理的微博ID
        self.processed_ids = load_progress()
        print(f"已加载 {len(self.processed_ids)} 个已处理的微博ID")
        
        # 加载现有微博详情数据
        if os.path.exists(self.weibo_details_file):
            try:
                existing_df = pd.read_csv(self.weibo_details_file, encoding='utf-8-sig')
                self.weibo_details = existing_df.to_dict('records')
                print(f"已加载 {len(self.weibo_details)} 条微博详情")
            except Exception as e:
                print(f"加载现有微博详情失败: {str(e)}")
                self.weibo_details = []
        
        # 加载失败的微博ID
        if os.path.exists(self.failed_ids_file):
            try:
                with open(self.failed_ids_file, 'r', encoding='utf-8') as f:
                    self.failed_ids = json.load(f)
                print(f"已加载 {len(self.failed_ids)} 个失败微博ID")
            except Exception as e:
                print(f"加载失败微博ID失败: {str(e)}")
                self.failed_ids = []
        
        # 确保评论文件存在
        if not os.path.exists(self.comments_file):
            # 如果评论文件不存在，创建文件并写入表头
            fieldnames = ['帖子的URL', '微博唯一ID', '带时区时间格式', '评论内容', 
                          '转发量', '评论量', '点赞量', '地理定位解析结果']
            with open(self.comments_file, 'w', encoding='utf-8-sig', newline='') as f:
                writer = pd.DataFrame(columns=fieldnames)
                writer.to_csv(f, index=False)
            print(f"已创建评论数据文件: {self.comments_file}")
    
    def save_failed_ids(self):
        """
        保存失败的微博ID到文件
        """
        with self.fail_lock:
            with open(self.failed_ids_file, 'w', encoding='utf-8') as f:
                json.dump(self.failed_ids, f)
    
    def save_weibo_details(self):
        """
        保存微博详情到CSV文件
        """
        with self.details_lock:
            # 确保weibo_details中的数据不重复
            if self.weibo_details:
                result_df = pd.DataFrame(self.weibo_details)
                
                # 去重处理（根据微博唯一ID去重）
                if '微博唯一ID' in result_df.columns:
                    result_df.drop_duplicates(subset=['微博唯一ID'], keep='first', inplace=True)
                    
                # 保存数据
                result_df.to_csv(self.weibo_details_file, index=False, encoding='utf-8-sig')
                print(f"\n微博详情已保存到 {self.weibo_details_file}")
                print(f"共 {len(self.weibo_details)} 条微博详情")
    
    def split_weibo_ids(self):
        """
        按照Cookie数量拆分微博ID，分配给不同的Cookie处理
        
        Returns:
            Dict[str, List]: 每个Cookie ID对应的微博ID列表
        """
        # 读取CSV文件，获取所有微博ID
        try:
            df = pd.read_csv(self.input_csv, encoding='utf-8-sig')
        except UnicodeDecodeError:
            try:
                df = pd.read_csv(self.input_csv, encoding='gbk')
            except Exception as e:
                print(f"读取CSV文件失败: {str(e)}")
                return {}
        
        # 确保评价数字段为数值类型
        df['评价数'] = pd.to_numeric(df['评价数'])
        # 筛选评论数>=1的数据
        total_count = len(df)
        df = df[df['评价数'] >= 0]
        filtered_count = len(df)
        
        print(f"总微博数: {total_count}")
        print(f"评论数>=1的微博数: {filtered_count}")
        print(f"已过滤掉 {total_count - filtered_count} 条评论数<1的微博")
        
        # 确保类型一致性
        df['mblogid'] = df['mblogid'].astype(str)
        
        # 获取所有微博ID
        all_weibo_ids = df['mblogid'].unique().tolist()
        
        # 过滤出未处理的微博ID
        remaining_ids = [str(wid) for wid in all_weibo_ids if str(wid) not in self.processed_ids]
        print(f"未处理微博数: {len(remaining_ids)}")
        
        # 如果没有未处理的微博ID，直接返回
        if not remaining_ids:
            print("所有微博都已处理完成！")
            return {}
        
        # 按照Cookie数量拆分微博ID
        # 确保multi_pinglun_cookies目录存在
        os.makedirs(os.path.join(self.progress_dir, 'multi_pinglun_cookies'), exist_ok=True)
        # 修改为使用multi_pinglun_cookies目录
        return self.cookies_manager.split_tasks(remaining_ids, os.path.join(self.progress_dir, 'multi_pinglun_cookies'))
    
    def process_weibo(self, mblogid, cookie_id):
        """
        处理单个微博ID，获取微博详情和评论
        
        Args:
            mblogid: 微博ID
            cookie_id: 处理该微博的Cookie ID
            
        Returns:
            dict: 处理结果，包括状态和数据
        """
        try:
            # 获取该Cookie对应的header
            cookie_content, header = self.cookies_manager.get_cookie_with_header(cookie_id)
            
            # 检查cookie是否有效
            if not cookie_content:
                return {'status': 'error', 'mblogid': mblogid, 'message': f"Cookie {cookie_id} 不可用"}
            
            # 进一步检查cookie有效性
            if not check_cookie_validity(cookie_content):
                error_msg = f"Cookie {cookie_id} 已失效，请更新"
                print(f"\n[错误] {error_msg}")
                return {'status': 'error', 'mblogid': mblogid, 'message': error_msg}
            
            # 本地存储地理定位数据
            local_location_data = []
            
            # 添加随机延时，避免请求过快
            time.sleep(random.uniform(0.5, 0.7))
            
            # 构造微博详情API的URL
            test_url = f"https://weibo.com/ajax/statuses/show?id={mblogid}"
            response = requests.get(test_url, headers=header)
            
            # 检查请求是否成功
            if response.status_code != 200:
                return {'status': 'error', 'mblogid': mblogid, 'message': f"请求失败，状态码: {response.status_code}"}
            
            # 解析API返回的JSON数据
            try:
                response_json = response.json()
                print(f"成功获取微博基础数据: {mblogid}")
            except json.JSONDecodeError as e:
                return {'status': 'error', 'mblogid': mblogid, 'message': f"JSON解析错误: {str(e)}, 响应内容: {response.text[:200]}"}
            
            # 使用API返回的数据作为基础信息
            basic_info = response_json
            
            if basic_info:
                # 构建微博详情页面的URL
                weibo_url = f"https://weibo.com/detail/{mblogid}"
                
                # 整理微博基础信息
                detail = {
                    '帖子的URL': weibo_url,
                    '微博唯一ID': mblogid,
                    '带时区时间格式': format_weibo_time(basic_info.get('created_at', '')),
                    '评论内容': basic_info.get('text_raw', ''),
                    '转发量': basic_info.get('reposts_count', 0),
                    '评论量': basic_info.get('comments_count', 0),
                    '点赞量': basic_info.get('attitudes_count', 0),
                    '地理定位解析结果': basic_info.get('region_name', '')
                }
                
                # 将微博详情添加到全局列表
                with self.details_lock:
                    self.weibo_details.append(detail)
                
                # 将地理定位数据添加到本地变量
                location_info = {
                    '微博唯一ID': mblogid,
                    '帖子的URL': weibo_url,
                    '带时区时间格式': format_weibo_time(basic_info.get('created_at', '')),
                    '地理定位解析结果': basic_info.get('region_name', '')
                }
                local_location_data.append(location_info)
                
                # 随机延时
                time.sleep(random.uniform(0.5, 0.7))
                
                # 获取该微博下的评论信息
                print(f"开始获取评论数据: {mblogid}")
                # 构造评论API的URL
                comments_url = f"https://weibo.com/ajax/statuses/buildComments?flow=0&is_reload=1&id={mblogid}&is_show_bulletin=2"
                try:
                    comments_response = requests.get(comments_url, headers=header, timeout=15)
                except requests.exceptions.RequestException as e:
                    print(f"获取评论请求失败: {str(e)}")
                    # 保存已获取的微博详情和地理位置数据
                    save_location_data(local_location_data)
                    return {
                        'status': 'success', 
                        'mblogid': mblogid, 
                        'detail': detail,
                        'comments_count': 0
                    }
                
                comments_list = []
                # 处理评论数据
                if comments_response.status_code == 200:
                    try:
                        comments_data = comments_response.json()
                        comments_list = comments_data.get('data', [])
                    except json.JSONDecodeError as e:
                        print(f"解析评论JSON失败: {str(e)}")
                        comments_list = []
                    
                    # 如果获取到评论数据
                    if comments_list:
                        # 处理每条评论
                        for comment in comments_list:
                            # 整理评论数据
                            comment_data = {
                                '帖子的URL': f"https://weibo.com/detail/{mblogid}",
                                '微博唯一ID': comment.get('id', ''),
                                '带时区时间格式': format_weibo_time(comment.get('created_at', '')),
                                '评论内容': comment.get('text_raw', ''),
                                '转发量': 0,  # 评论通常没有转发数
                                '评论量': len(comment.get('comments', [])),  # 子评论数量
                                '点赞量': comment.get('like_counts', 0),
                                '地理定位解析结果': comment.get('source', '').replace('来自', '')  # 评论来源地
                            }
                            
                            # 将单条评论数据追加到CSV文件
                            comment_df = pd.DataFrame([comment_data])
                            comment_df.to_csv(self.comments_file, mode='a', header=False, index=False, encoding='utf-8-sig')
                            
                        print(f"已保存 {len(comments_list)} 条评论 - 微博ID: {mblogid}")
                    else:
                        print(f"微博 {mblogid} 没有评论数据")
                else:
                    print(f"获取评论失败，微博ID: {mblogid}, 状态码: {comments_response.status_code}")
                
                # 保存本次采集的地理定位数据
                save_location_data(local_location_data)
                
                return {
                    'status': 'success', 
                    'mblogid': mblogid, 
                    'detail': detail,
                    'comments_count': len(comments_list)
                }
            else:
                return {'status': 'error', 'mblogid': mblogid, 'message': f"未能获取微博数据: {mblogid}"}
            
        except Exception as e:
            error_message = f"处理微博 {mblogid} 时发生错误: {str(e)}"
            print(f"\n[错误] {error_message}")
            traceback.print_exc()
            return {'status': 'error', 'mblogid': mblogid, 'message': error_message}
    
    def worker(self, cookie_id, weibo_ids):
        """
        工作线程，处理分配给特定Cookie的微博ID列表
        
        Args:
            cookie_id: Cookie ID
            weibo_ids: 该Cookie负责处理的微博ID列表
        """
        total = len(weibo_ids)
        success_count = 0
        fail_count = 0
        consecutive_fails = 0  # 连续失败计数
        
        print(f"线程 {cookie_id} 开始处理 {total} 个微博")
        
        # 先验证cookie是否有效
        cookie_content = self.cookies_manager.get_cookie(cookie_id)
        if not cookie_content:
            print(f"\n[错误] 线程 {cookie_id} 的Cookie无效，无法启动")
            # 将该线程的所有任务添加到失败列表
            with self.fail_lock:
                for wid in weibo_ids:
                    if wid not in self.failed_ids:
                        self.failed_ids.append(wid)
                self.save_failed_ids()
            return
        
        # 进一步检查cookie有效性
        if not check_cookie_validity(cookie_content):
            print(f"\n[错误] 线程 {cookie_id} 的Cookie已失效，请更新")
            # 将该线程的所有任务添加到失败列表
            with self.fail_lock:
                for wid in weibo_ids:
                    if wid not in self.failed_ids:
                        self.failed_ids.append(wid)
                self.save_failed_ids()
            return
        
        # 处理每个微博ID
        for index, mblogid in enumerate(weibo_ids, 1):
            try:
                # 检查全局是否已处理
                if mblogid in self.processed_ids:
                    print(f"线程 {cookie_id} - 微博 {mblogid} 已被其他线程处理")
                    continue
                
                print(f"线程 {cookie_id} 处理 {index}/{len(weibo_ids)}: 微博ID {mblogid}")
                
                # 处理微博
                result = self.process_weibo(mblogid, cookie_id)
                
                if result['status'] == 'success':
                    # 处理成功，重置连续失败计数
                    consecutive_fails = 0
                    
                    # 标记为已处理
                    with self.progress_lock:
                        self.processed_ids.add(mblogid)
                        # 每处理成功一个微博，就立即更新进度文件，确保断点续爬
                        save_progress(self.processed_ids)
                    
                    success_count += 1
                    
                    # 每10个微博保存一次微博详情
                    if success_count % 10 == 0:
                        self.save_weibo_details()
                    
                else:
                    # 处理失败
                    consecutive_fails += 1
                    fail_count += 1
                    
                    # 记录失败原因，避免重复爬取导致进度错误
                    print(f"\n[失败] 线程 {cookie_id} 处理微博 {mblogid} 失败: {result.get('message', '未知错误')}")
                    
                    with self.fail_lock:
                        if mblogid not in self.failed_ids:
                            self.failed_ids.append(mblogid)
                            self.save_failed_ids()
                    
                    # 如果连续失败超过5次，可能是cookie失效
                    if consecutive_fails >= 5:
                        print(f"\n[警告] 线程 {cookie_id} 连续失败 {consecutive_fails} 次，可能cookie已失效")
                        self.cookies_manager.mark_cookie_unavailable(cookie_id)
                        break
                    
                    # 失败时增加等待时间
                    time.sleep(random.uniform(3, 5))
                
            except Exception as e:
                print(f"\n[异常] 线程 {cookie_id} 处理微博 {mblogid} 时发生异常: {str(e)}")
                traceback.print_exc()
                
                consecutive_fails += 1
                fail_count += 1
                
                with self.fail_lock:
                    if mblogid not in self.failed_ids:
                        self.failed_ids.append(mblogid)
                        self.save_failed_ids()
                
                # 失败时增加等待时间
                time.sleep(random.uniform(3, 5))
                
                # 连续失败过多，退出
                if consecutive_fails >= 5:
                    print(f"\n[警告] 线程 {cookie_id} 连续失败 {consecutive_fails} 次，退出")
                    break
        
        # 线程结束时保存最终结果
        self.save_weibo_details()
        print(f"线程 {cookie_id} 已完成处理，成功: {success_count}，失败: {fail_count}")
    
    def run(self):
        """
        运行多线程爬取评论
        """
        # 拆分微博ID
        task_splits = self.split_weibo_ids()
        
        if not task_splits:
            print("没有任务需要处理或所有微博都已处理完成")
            return
        
        # 创建线程池
        with concurrent.futures.ThreadPoolExecutor(max_workers=len(task_splits)) as executor:
            # 提交任务
            futures = {executor.submit(self.worker, cookie_id, weibo_ids): cookie_id 
                      for cookie_id, weibo_ids in task_splits.items()}
            
            # 等待所有任务完成
            for future in concurrent.futures.as_completed(futures):
                cookie_id = futures[future]
                try:
                    future.result()  # 获取结果，如有异常会抛出
                except Exception as e:
                    print(f"\n[错误] 线程 {cookie_id} 执行失败: {str(e)}")
                    traceback.print_exc()
        
        # 所有线程完成后，保存最终结果
        self.save_weibo_details()
        self.save_failed_ids()
        save_location_data()  # 保存地理位置数据
        
        # 输出统计信息
        success_count = len(self.processed_ids)
        failed_count = len(self.failed_ids)
        
        print("\n爬取完成！")
        print(f"成功获取: {success_count} 个微博评论")
        print(f"失败数量: {failed_count} 个微博")
        
        # 输出结果文件路径
        print(f"\n微博详情已保存到: {self.weibo_details_file}")
        print(f"评论数据已保存到: {self.comments_file}")
        print(f"地理位置数据已保存到: Weibo/4_社交媒体特定表达_地理定位数据.csv")
        print(f"爬取进度已保存到: {os.path.join(self.progress_dir, 'comment_crawl_progress.json')}")
        print(f"失败ID已保存到: {self.failed_ids_file}")


if __name__ == "__main__":
    try:
        # 确保必要的目录存在
        os.makedirs('Weibo', exist_ok=True)
        os.makedirs('Weibo/爬取进度', exist_ok=True)
        
        # 创建并运行多线程爬虫
        crawler = MultiThreadCommentCrawler()
        crawler.run()
    except Exception as e:
        print(f"\n[错误] 程序执行出错: {str(e)}")
        traceback.print_exc() 