#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
多线程管理器
调用 tg_scraper.py 的方法进行多线程处理
"""

import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
import logging

# 导入 tg_scraper 的方法
from tg_scraper import (
    get_group_links,
    get_username_from_link,
    try_get_group_data,
    submit_data,
    logger
)

class ThreadTGScraper:
    """多线程管理器"""
    
    def __init__(self, max_workers=5):
        self.max_workers = max_workers
        self.stats = {
            'total_processed': 0,
            'success_count': 0,
            'error_count': 0,
            'start_time': None
        }
        self.stats_lock = threading.Lock()
    
    def process_single_group(self, group_info, index, total):
        """处理单个群组的函数"""
        try:
            group_id = group_info["id"]
            group_link = group_info["groupLink"]
            
            logger.info(f"处理进度: {index}/{total} - ID:{group_id} - {group_link}")
            
            username = get_username_from_link(group_link)
            if not username:
                logger.warning(f"[ID:{group_id}] 无法提取用户名: {group_link}")
                return False
            
            data = try_get_group_data(username, group_id)
            if data:
                # 能获取到数据，说明是公开群组
                data["is_public"] = True
                # 提交数据（包含ID和原始群组信息）
                result = submit_data(data, group_id, group_info)
                logger.info(f"[ID:{group_id}] 提交结果: {result}")
                logger.info(f"[ID:{group_id}] 成功处理: {username} - 公开群组")
                
                # 更新统计信息
                with self.stats_lock:
                    self.stats['success_count'] += 1
                
                return True
            else:
                # 无法获取数据，说明是私有群组，提交默认数据
                private_data = {
                    "source": "telegram_web",
                    "title": "",
                    "description": "",
                    "member_count": 0,
                    "avatar_url": "",
                    "group_type": "private",
                    "is_verified": False,
                    "language": "",
                    "is_public": False,
                    "username": username,
                    "scraped_at": datetime.now().isoformat()
                }
                result = submit_data(private_data, group_id, group_info)
                logger.info(f"[ID:{group_id}] 处理私有群组: {username} - 私有群组")
                logger.info(f"[ID:{group_id}] 提交结果: {result}")
                
                # 更新统计信息
                with self.stats_lock:
                    self.stats['success_count'] += 1
                
                return True
                
        except Exception as e:
            logger.error(f"[ID:{group_id}] 处理群组失败 {group_info.get('groupLink', 'Unknown')}: {e}")
            
            # 更新统计信息
            with self.stats_lock:
                self.stats['error_count'] += 1
            
            return False
    
    def run_batch(self, group_info_list):
        """处理一批群组"""
        logger.info(f"开始处理 {len(group_info_list)} 个群组")
        
        # 重置统计信息
        with self.stats_lock:
            self.stats['total_processed'] = len(group_info_list)
            self.stats['success_count'] = 0
            self.stats['error_count'] = 0
            self.stats['start_time'] = datetime.now()
        
        # 使用线程池处理群组
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            # 提交所有任务
            future_to_group = {
                executor.submit(self.process_single_group, group_info, i+1, len(group_info_list)): group_info
                for i, group_info in enumerate(group_info_list)
            }
            
            # 等待所有任务完成
            for future in as_completed(future_to_group):
                group_info = future_to_group[future]
                try:
                    result = future.result()
                except Exception as e:
                    logger.error(f"群组 {group_info['id']} 处理异常: {e}")
        
        # 打印统计信息
        with self.stats_lock:
            elapsed_time = datetime.now() - self.stats['start_time']
            logger.info(f"批次处理完成:")
            logger.info(f"  总数量: {self.stats['total_processed']}")
            logger.info(f"  成功: {self.stats['success_count']}")
            logger.info(f"  失败: {self.stats['error_count']}")
            logger.info(f"  耗时: {elapsed_time.total_seconds():.2f}秒")
            if elapsed_time.total_seconds() > 0:
                logger.info(f"  平均速度: {self.stats['total_processed']/elapsed_time.total_seconds():.2f}个/秒")

def main_multithreaded(max_workers=5, batch_size=500):
    """多线程主函数"""
    logger.info("=== Telegram群组爬虫程序启动 (多线程版本) ===")
    logger.info(f"当前时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    logger.info(f"最大线程数: {max_workers}")
    logger.info(f"批处理大小: {batch_size}")
    
    # 创建多线程管理器
    manager = ThreadTGScraper(max_workers)
    
    while True:
        try:
            # 获取群组信息
            group_info_list = get_group_links(batch_size)
            logger.info(f"获取到 {len(group_info_list)} 个群组信息")
            
            if not group_info_list:
                logger.info("没有更多群组信息，等待10秒后重新请求...")
                time.sleep(10)
                continue
            
            # 处理这批群组
            manager.run_batch(group_info_list)
            
            logger.info("准备获取下一批群组信息...")
            
        except Exception as e:
            logger.error(f"主循环异常: {e}")
            logger.info("等待30秒后重试...")
            time.sleep(30)

if __name__ == "__main__":
    import sys
    
    # 默认参数
    max_workers = 5
    batch_size = 500
    
    # 解析命令行参数
    if len(sys.argv) > 1:
        try:
            max_workers = int(sys.argv[1])
        except ValueError:
            logger.warning(f"无效的线程数参数: {sys.argv[1]}，使用默认值: {max_workers}")
    
    if len(sys.argv) > 2:
        try:
            batch_size = int(sys.argv[2])
        except ValueError:
            logger.warning(f"无效的批处理大小参数: {sys.argv[2]}，使用默认值: {batch_size}")
    
    # 启动多线程程序
    main_multithreaded(max_workers, batch_size)
