#!/usr/bin/env python3
"""
Telegram群组信息获取器
此脚本使用Telegram API获取群组信息。

"""

import asyncio
import re
from telethon import TelegramClient
from telethon.errors import ChannelPrivateError, UsernameNotOccupiedError, FloodWaitError, UsernameInvalidError
from telethon.tl.types import Channel, Chat
import json
from datetime import datetime
import time
import logging
import os
import requests
from typing import List, Dict, Any


API_ID = "14404091"
API_HASH = "a2e84cbbe86727180110b6e2eff92c7c"
PHONE_NUMBER = "+959650351486"


BASE_URL = "http://137.175.106.122:5001"

class TelegramGroupInfo:
    def __init__(self):
        self.client = TelegramClient('session', API_ID, API_HASH)
        self.setup_logging()
        
    def setup_logging(self):
        """设置详细的日志记录"""
        # 创建logs目录
        if not os.path.exists('logs'):
            os.makedirs('logs')
            
        # 设置日志格式
        log_format = '%(asctime)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s'
        
        # 创建带时间戳的日志文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        log_filename = f'logs/telegram_fetcher_{timestamp}.log'
        
        # 配置日志记录器
        logging.basicConfig(
            level=logging.INFO,
            format=log_format,
            handlers=[
                logging.FileHandler(log_filename, encoding='utf-8'),
                logging.StreamHandler()  # 同时输出到控制台
            ]
        )
        
        self.logger = logging.getLogger(__name__)
        self.logger.info(f"日志系统初始化完成，日志文件: {log_filename}")
        
        # 创建原始数据目录
        if not os.path.exists('raw_data'):
            os.makedirs('raw_data')
            self.logger.info("创建原始数据目录: raw_data/")

    # 1、获取群组链接
    def get_group_links(self, size: int):
        url = f"{BASE_URL}/tgGroup/getDataByNum?num={size}"
        self.logger.info(f"🌐 请求API获取群组链接: {url}")
        
        try:
            response = requests.get(url, timeout=30)
            self.logger.info(f"📡 API响应状态码: {response.status_code}")
            
            response.raise_for_status()
            data = response.json() or {}
            self.logger.info(f"📄 API返回数据结构: {list(data.keys()) if isinstance(data, dict) else type(data)}")
            
            result = data.get("result") or []
            self.logger.info(f"📋 result字段类型: {type(result)}, 长度: {len(result) if isinstance(result, (list, tuple)) else 'N/A'}")
            
            if not result:
                self.logger.warning(f"⚠️ API返回的result为空或None: {result}")
                self.logger.info(f"📄 完整API响应: {data}")
                return []
            
            group_info: List[Dict[str, Any]] = []
            valid_count = 0
            invalid_count = 0
            
            for i, item in enumerate(result):
                if not isinstance(item, dict):
                    invalid_count += 1
                    self.logger.debug(f"跳过非字典项 [{i}]: {type(item)} - {item}")
                    continue
                    
                gid = item.get("id")
                glink = item.get("groupLink")
                
                if gid is None or not glink:
                    invalid_count += 1
                    self.logger.debug(f"跳过无效项 [{i}]: id={gid}, groupLink={glink}")
                    continue
                    
                group_info.append({
                    "id": gid,
                    "groupLink": glink,
                })
                valid_count += 1
                
            self.logger.info(f"✅ 成功解析群组链接: 有效={valid_count}, 无效={invalid_count}, 总计={len(result)}")
            
            if valid_count == 0:
                self.logger.warning("⚠️ 没有找到有效的群组链接数据")
                
            return group_info
            
        except requests.exceptions.RequestException as e:
            self.logger.error(f"❌ 网络请求失败: {e}")
            return []
        except ValueError as e:
            self.logger.error(f"❌ JSON解析失败: {e}")
            return []
        except Exception as e:
            self.logger.error(f"❌ 获取群组链接时发生未知错误: {e}", exc_info=True)
            return []

    def submit_group_data(self, group_data: dict, group_id: int, group_link: str):
        """提交群组数据到服务端"""
        url = f"{BASE_URL}/tgGroup/updateByGroupLink"
        
        # 构造提交数据
        submit_data = {
            "id": group_id,
            "title": group_data.get("title", ""),
            "groupLink": group_link,
            "groupUsername": group_data.get("username", ""),
            "memberCount": group_data.get("participants_count", 0),
            "description": group_data.get("description", "N/A"),
            "isVerified": 1 if group_data.get("is_verified", False) else 0,
            "rawData": json.dumps(group_data, ensure_ascii=False, default=str),  # 添加default=str处理datetime
            "language": "",  # 可以后续添加语言检测
            "isStock": 0,    # 可以后续添加股票相关检测
            "isCrypto": 0,   # 可以后续添加加密货币相关检测
        }
        
        try:
            self.logger.info(f"提交群组数据到服务端: {group_link}")
            response = requests.post(url, json=submit_data, timeout=30)
            response.raise_for_status()
            
            result = response.json()
            self.logger.info(f"✅ 成功提交群组数据: {group_link} - 响应: {result}")
            return result
            
        except Exception as e:
            self.logger.error(f"❌ 提交群组数据失败: {group_link} - 错误: {e}")
            return None



    async def start_client(self):
        """启动Telegram客户端并进行身份验证"""
        self.logger.info("开始连接Telegram客户端...")
        try:
            # 检查是否已有会话文件
            if os.path.exists('session.session'):
                self.logger.info("发现已存在的会话文件，尝试自动登录...")
                print("🔄 发现已存在的登录会话，尝试自动连接...")
            else:
                self.logger.info("首次登录，需要手机验证...")
                print("📱 首次登录需要手机验证，请准备接收验证码...")
            
            # 启动客户端（如果有会话文件会自动登录，否则需要验证）
            await self.client.start(phone=PHONE_NUMBER)
            
            self.logger.info(f"✅ 成功连接到Telegram，使用手机号: {PHONE_NUMBER}")
            print("✅ 成功连接到Telegram")
            
        except Exception as e:
            self.logger.error(f"连接Telegram失败: {e}")
            print(f"❌ 连接失败: {e}")
            print("💡 提示：")
            print("   1. 确保API_ID和API_HASH正确")
            print("   2. 确保手机号格式正确（包含国家代码）")
            print("   3. 首次登录需要接收验证码")
            raise
        
    async def extract_username_from_url(self, url):
        """从Telegram URL中提取用户名或频道ID"""
        self.logger.debug(f"解析URL: {url}")
        
        # 清理URL，移除多余的参数和空格
        url = url.strip()
        
        # 处理不同的URL格式
        patterns = [
            r't\.me/c/(\d+)',   # https://t.me/c/1234567890 (优先匹配频道ID)
            r't\.me/joinchat/([^/?]+)',  # https://t.me/joinchat/invite_hash
            r't\.me/([^/?]+)'   # https://t.me/username (最后匹配用户名)
        ]
        
        for pattern in patterns:
            match = re.search(pattern, url)
            if match:
                username = match.group(1)
                self.logger.debug(f"从URL {url} 提取到标识符: {username}")
                return username
        
        self.logger.warning(f"无法从URL提取标识符: {url}")
        return None
    
    async def save_raw_data(self, url, raw_data, data_type="group_info"):
        """保存原始数据到文件"""
        try:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]  # 精确到毫秒
            # 从URL中提取安全的文件名
            safe_name = re.sub(r'[^\w\-_\.]', '_', url.replace('https://t.me/', '').replace('/', '_'))
            filename = f'raw_data/{data_type}_{safe_name}_{timestamp}.json'
            
            # 将对象转换为可序列化的字典
            if hasattr(raw_data, 'to_dict'):
                serializable_data = raw_data.to_dict()
            else:
                # 手动处理Telethon对象
                serializable_data = self._convert_to_serializable(raw_data)
            
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump({
                    'url': url,
                    'timestamp': datetime.now().isoformat(),
                    'data_type': data_type,
                    'raw_data': serializable_data
                }, f, ensure_ascii=False, indent=2, default=str)
            
            self.logger.info(f"原始数据已保存到: {filename}")
            return filename
        except Exception as e:
            self.logger.error(f"保存原始数据失败: {e}")
            return None
    
    def _convert_to_serializable(self, obj):
        """将Telethon对象转换为可序列化的格式"""
        if hasattr(obj, '__dict__'):
            result = {}
            for key, value in obj.__dict__.items():
                if key.startswith('_'):
                    continue
                try:
                    if hasattr(value, '__dict__'):
                        result[key] = self._convert_to_serializable(value)
                    elif isinstance(value, (list, tuple)):
                        result[key] = [self._convert_to_serializable(item) for item in value]
                    elif isinstance(value, (str, int, float, bool)) or value is None:
                        result[key] = value
                    else:
                        result[key] = str(value)
                except Exception:
                    result[key] = str(value)
            return result
        else:
            return str(obj)
    
    async def get_group_info(self, url):
        """获取Telegram群组信息"""
        self.logger.info(f"开始获取群组信息: {url}")
        try:
            username = await self.extract_username_from_url(url)
            if not username:
                error_msg = "无效的URL格式"
                self.logger.error(f"{error_msg}: {url}")
                return {"error": error_msg, "url": url}
            
            # 处理不同类型的标识符
            try:
                if username.isdigit():
                    # 频道ID格式（纯数字）
                    channel_id = int(username)
                    self.logger.info(f"使用频道ID获取实体: {channel_id}")
                    entity = await self.client.get_entity(channel_id)
                else:
                    # 用户名格式
                    self.logger.info(f"使用用户名获取实体: {username}")
                    # 尝试不同的用户名格式
                    try:
                        entity = await self.client.get_entity(username)
                    except (ValueError, UsernameNotOccupiedError, UsernameInvalidError):
                        # 如果用户名包含特殊字符，尝试添加@前缀
                        if not username.startswith('@'):
                            self.logger.info(f"尝试使用@前缀获取实体: @{username}")
                            entity = await self.client.get_entity(f"@{username}")
                        else:
                            raise
            except (ValueError, UsernameNotOccupiedError, UsernameInvalidError) as e:
                # 这里直接抛出，让外层的异常处理来处理
                raise
            
            # 获取完整信息
            self.logger.info(f"获取完整实体信息: {entity.id}")
            full_info = await self.client.get_entity(entity)
            
            # 保存原始实体数据
            await self.save_raw_data(url, full_info, "entity_info")
            self.logger.debug(f"实体类型: {type(full_info).__name__}, ID: {full_info.id}")
            
            # 提取相关信息
            info = {
                "url": url,
                "id": full_info.id,
                "title": getattr(full_info, 'title', 'N/A'),
                "username": getattr(full_info, 'username', None),
                "type": "Channel" if isinstance(full_info, Channel) else "Chat",
                "is_private": not hasattr(full_info, 'username') or full_info.username is None,
                "is_megagroup": getattr(full_info, 'megagroup', False),
                "is_broadcast": getattr(full_info, 'broadcast', False),
                "participants_count": 0,
                "description": getattr(full_info, 'about', 'N/A'),
                "created_date": getattr(full_info, 'date', None),
                "access_hash": getattr(full_info, 'access_hash', None),
                "status": "success"
            }
            
            # 尝试获取成员数量
            try:
                if isinstance(full_info, Channel):
                    self.logger.info(f"获取频道成员数量: {full_info.id}")
                    participants = await self.client.get_participants(full_info, limit=0)
                    info["participants_count"] = participants.total
                    # 保存参与者原始数据
                    await self.save_raw_data(url, participants, "participants_info")
                    self.logger.info(f"频道成员数量: {participants.total}")
                else:
                    # 普通聊天群组
                    self.logger.info(f"获取聊天群组成员: {full_info.id}")
                    participants = await self.client.get_participants(full_info)
                    info["participants_count"] = len(participants)
                    # 保存参与者原始数据
                    await self.save_raw_data(url, participants, "participants_list")
                    self.logger.info(f"聊天群组成员数量: {len(participants)}")
            except Exception as e:
                info["participants_count"] = "无法获取"
                info["participants_error"] = str(e)
                self.logger.warning(f"获取成员数量失败: {e}")
            
            self.logger.info(f"成功获取群组信息: {info.get('title', '未知')} (ID: {info['id']})")
            return info
            
        except ChannelPrivateError as e:
            error_msg = "频道是私有的，您没有访问权限"
            self.logger.warning(f"{error_msg}: {url} - {e}")
            return {
                "url": url,
                "error": error_msg,
                "status": "private_no_access"
            }
        except UsernameNotOccupiedError as e:
            error_msg = "用户名未找到或频道不存在"
            self.logger.warning(f"{error_msg}: {url} - {e}")
            return {
                "url": url,
                "error": error_msg,
                "status": "not_found"
            }
        except UsernameInvalidError as e:
            error_msg = "用户名格式无效或不可接受"
            self.logger.warning(f"🔍 {error_msg}: {url}")
            self.logger.debug(f"详细错误信息: {e}")
            return {
                "url": url,
                "error": error_msg,
                "status": "invalid_username"
            }
        except ValueError as e:
            # 处理 "No user has 'xxx' as username" 错误
            if "No user has" in str(e) and "as username" in str(e):
                error_msg = "用户名未找到或频道不存在"
                self.logger.warning(f"🔍 {error_msg}: {url}")
                self.logger.debug(f"详细错误信息: {e}")  # 详细错误只记录在debug级别
                return {
                    "url": url,
                    "error": error_msg,
                    "status": "not_found"
                }
            else:
                # 其他ValueError
                self.logger.error(f"获取群组信息时发生值错误: {url} - {e}")
                return {
                    "url": url,
                    "error": f"数据格式错误: {str(e)}",
                    "status": "error"
                }
        except FloodWaitError as e:
            error_msg = f"请求频率限制，请等待 {e.seconds} 秒"
            self.logger.warning(f"{error_msg}: {url}")
            return {
                "url": url,
                "error": error_msg,
                "status": "rate_limited",
                "wait_seconds": e.seconds
            }
        except Exception as e:
            self.logger.error(f"获取群组信息时发生未知错误: {url} - {e}", exc_info=True)
            return {
                "url": url,
                "error": str(e),
                "status": "error"
            }
    
    async def process_group_list(self, urls, delay=1):
        """处理群组URL列表，请求之间有延迟"""
        self.logger.info(f"开始处理 {len(urls)} 个群组URL，延迟: {delay}秒")
        results = []
        
        for i, url in enumerate(urls, 1):
            self.logger.info(f"处理进度: {i}/{len(urls)} - {url}")
            print(f"正在处理 {i}/{len(urls)}: {url}")
            
            result = await self.get_group_info(url)
            results.append(result)
            
            # 打印即时结果
            if result.get("status") == "success":
                print(f"✅ {result.get('title', '无')} - 成员数: {result.get('participants_count', '无')}")
            else:
                print(f"❌ 错误: {result.get('error', '未知错误')}")
            
            # 添加延迟以避免频率限制
            if i < len(urls):
                self.logger.debug(f"等待 {delay} 秒后处理下一个URL")
                await asyncio.sleep(delay)
        
        self.logger.info(f"完成处理 {len(urls)} 个群组URL")
        return results
    
    async def process_groups_from_api(self, size: int = 10, delay: int = 1, use_test_data: bool = False):
        """从API获取群组链接并处理"""
        self.logger.info(f"开始从API获取 {size} 个群组链接")
        
        # 1. 获取群组链接列表
        group_links = self.get_group_links(size)
        
        if not group_links and use_test_data:
            self.logger.info("🧪 API无数据，使用测试群组链接")
            # 使用一些测试群组
            group_links = [
                {"id": 1001, "groupLink": "https://t.me/aryacam"},
                {"id": 1002, "groupLink": "https://t.me/xndjzjdjsI"},
                {"id": 1003, "groupLink": "https://t.me/arzdigitalo"},
            ]
            print("🧪 使用测试数据模式，处理3个测试群组")
        
        if not group_links:
            self.logger.warning("未获取到任何群组链接，且未启用测试模式")
            print("❌ 没有可处理的群组数据")
            print("💡 建议:")
            print("   1. 检查服务端是否有待处理的群组数据")
            print("   2. 联系管理员确认API接口状态")
            print("   3. 或者使用测试模式: process_groups_from_api(use_test_data=True)")
            return []
        
        self.logger.info(f"成功获取到 {len(group_links)} 个群组链接")
        
        results = []
        successful_count = 0
        
        # 2. 处理每个群组
        for i, group_info in enumerate(group_links, 1):
            group_id = group_info["id"]
            group_link = group_info["groupLink"]
            
            self.logger.info(f"处理进度: {i}/{len(group_links)} - ID: {group_id}, 链接: {group_link}")
            print(f"🔍 正在处理 {i}/{len(group_links)}: {group_link}")
            
            # 3. 通过TelegramClient获取群组数据
            group_data = await self.get_group_info(group_link)
            
            if group_data.get("status") == "success":
                # 4. 提交数据到服务端
                submit_result = self.submit_group_data(group_data, group_id, group_link)
                
                if submit_result:
                    successful_count += 1
                    print(f"✅ {group_data.get('title', '未知')} - 成员数: {group_data.get('participants_count', '无')} - 已提交")
                else:
                    print(f"⚠️ {group_data.get('title', '未知')} - 获取成功但提交失败")
                
                # 添加提交结果到群组数据中
                group_data["submit_result"] = submit_result
            else:
                print(f"❌ 获取群组信息失败: {group_data.get('error', '未知错误')}")
            
            results.append({
                "group_id": group_id,
                "group_link": group_link,
                "telegram_data": group_data,
                "submitted": group_data.get("submit_result") is not None
            })
            
            # 添加延迟避免频率限制
            if i < len(group_links):
                self.logger.debug(f"等待 {delay} 秒后处理下一个群组")
                await asyncio.sleep(delay)
        
        self.logger.info(f"完成处理 {len(group_links)} 个群组，成功提交 {successful_count} 个")
        print(f"\n📊 处理完成: {successful_count}/{len(group_links)} 个群组成功提交到服务端")
        
        return results
    
    async def process_groups_in_batches(self, total_size: int = 500, batch_size: int = 50, delay: int = 2, use_test_data: bool = False):
        """批量循环处理群组"""
        self.logger.info(f"开始批量处理群组: 总数={total_size}, 批次大小={batch_size}")
        
        all_results = []
        total_successful = 0
        batch_count = (total_size + batch_size - 1) // batch_size  # 向上取整
        
        for batch_num in range(1, batch_count + 1):
            # 计算当前批次的实际大小
            current_batch_size = min(batch_size, total_size - (batch_num - 1) * batch_size)
            
            self.logger.info(f"🔄 开始处理第 {batch_num}/{batch_count} 批次，大小: {current_batch_size}")
            print(f"\n🔄 处理第 {batch_num}/{batch_count} 批次 ({current_batch_size} 个群组)")
            print("=" * 60)
            
            try:
                # 处理当前批次
                batch_results = await self.process_groups_from_api(
                    size=current_batch_size, 
                    delay=delay, 
                    use_test_data=use_test_data and batch_num == 1  # 只在第一批次使用测试数据
                )
                
                # 统计当前批次结果
                batch_successful = sum(1 for r in batch_results if r["submitted"])
                total_successful += batch_successful
                
                # 添加批次信息到结果中
                for result in batch_results:
                    result["batch_number"] = batch_num
                
                all_results.extend(batch_results)
                
                # 打印批次统计
                batch_success_rate = (batch_successful / len(batch_results) * 100) if batch_results else 0
                print(f"✅ 第 {batch_num} 批次完成: {batch_successful}/{len(batch_results)} 成功 ({batch_success_rate:.1f}%)")
                
                self.logger.info(f"第 {batch_num} 批次完成: {batch_successful}/{len(batch_results)} 成功")
                
                # 批次间延迟（避免API限制）
                if batch_num < batch_count:
                    batch_delay = 5  # 批次间延迟5秒
                    print(f"⏳ 等待 {batch_delay} 秒后处理下一批次...")
                    self.logger.info(f"批次间延迟 {batch_delay} 秒")
                    await asyncio.sleep(batch_delay)
                    
            except Exception as e:
                self.logger.error(f"第 {batch_num} 批次处理失败: {e}")
                print(f"❌ 第 {batch_num} 批次处理失败: {e}")
                continue
        
        # 最终统计
        total_processed = len(all_results)
        overall_success_rate = (total_successful / total_processed * 100) if total_processed else 0
        
        self.logger.info(f"批量处理完成: 总处理={total_processed}, 总成功={total_successful}, 成功率={overall_success_rate:.1f}%")
        print(f"\n🎯 批量处理完成!")
        print(f"📊 总统计: {total_successful}/{total_processed} 成功 ({overall_success_rate:.1f}%)")
        
        return all_results
    
    async def process_groups_continuously(self, batch_size: int = 500, group_delay: int = 2, batch_delay: int = 10):
        """持续循环处理群组，每次处理一批后继续下一批"""
        self.logger.info(f"开始持续处理群组: 每批={batch_size}, 群组延迟={group_delay}秒, 批次延迟={batch_delay}秒")
        
        round_number = 1
        total_processed = 0
        total_successful = 0
        
        while True:
            try:
                self.logger.info(f"🔄 开始第 {round_number} 轮处理")
                print(f"\n🔄 第 {round_number} 轮处理 (每轮 {batch_size} 个群组)")
                print("=" * 70)
                
                # 处理当前轮次
                round_results = await self.process_groups_in_batches(
                    total_size=batch_size,
                    batch_size=50,  # 内部仍然分小批次处理
                    delay=group_delay,
                    use_test_data=False  # 持续模式不使用测试数据
                )
                
                # 统计当前轮次结果
                round_successful = sum(1 for r in round_results if r["submitted"])
                total_processed += len(round_results)
                total_successful += round_successful
                
                # 保存当前轮次结果
                round_filename = f"telegram_groups_round_{round_number}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
                round_data = {
                    "round_number": round_number,
                    "timestamp": datetime.now().isoformat(),
                    "total_groups": len(round_results),
                    "successful_submissions": round_successful,
                    "results": round_results
                }
                await self.save_results(round_data, round_filename)
                
                # 打印轮次统计
                round_success_rate = (round_successful / len(round_results) * 100) if round_results else 0
                overall_success_rate = (total_successful / total_processed * 100) if total_processed else 0
                
                print(f"\n✅ 第 {round_number} 轮完成:")
                print(f"   本轮: {round_successful}/{len(round_results)} 成功 ({round_success_rate:.1f}%)")
                print(f"   累计: {total_successful}/{total_processed} 成功 ({overall_success_rate:.1f}%)")
                
                self.logger.info(f"第 {round_number} 轮完成: 本轮={round_successful}/{len(round_results)}, 累计={total_successful}/{total_processed}")
                
                # 检查是否有可处理的数据
                if len(round_results) == 0:
                    self.logger.warning("API返回空数据，等待更长时间后重试")
                    print("⚠️ API暂无数据，等待60秒后重试...")
                    await asyncio.sleep(60)
                elif len(round_results) < batch_size:
                    self.logger.info(f"获取到的数据少于预期({len(round_results)}<{batch_size})，可能API数据不足")
                    print(f"⚠️ 获取数据不足({len(round_results)}<{batch_size})，等待30秒后继续...")
                    await asyncio.sleep(30)
                else:
                    # 正常情况下的轮次间延迟
                    print(f"⏳ 等待 {batch_delay} 秒后开始下一轮...")
                    await asyncio.sleep(batch_delay)
                
                round_number += 1
                
            except KeyboardInterrupt:
                self.logger.info("收到中断信号，停止持续处理")
                print(f"\n🛑 处理被中断")
                break
            except Exception as e:
                self.logger.error(f"第 {round_number} 轮处理出错: {e}")
                print(f"❌ 第 {round_number} 轮出错: {e}")
                print("⏳ 等待30秒后重试...")
                await asyncio.sleep(30)
                continue
        
        # 最终统计
        print(f"\n🎯 持续处理结束!")
        print(f"📊 总统计: 处理 {round_number-1} 轮，{total_successful}/{total_processed} 成功")
        self.logger.info(f"持续处理结束: {round_number-1}轮, {total_successful}/{total_processed}成功")
        
        return {
            "total_rounds": round_number - 1,
            "total_processed": total_processed,
            "total_successful": total_successful,
            "overall_success_rate": (total_successful / total_processed * 100) if total_processed else 0
        }
    
    async def save_results(self, results, filename=None):
        """将结果保存到JSON文件"""
        if filename is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"telegram_groups_info_{timestamp}.json"
        
        self.logger.info(f"保存结果到文件: {filename}")
        
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(results, f, ensure_ascii=False, indent=2, default=str)
            
            self.logger.info(f"✅ 结果文件保存成功: {filename}")
            print(f"📁 结果已保存到 {filename}")
            return filename
        except Exception as e:
            self.logger.error(f"保存结果文件失败: {e}")
            raise
    
    async def close(self):
        """关闭Telegram客户端"""
        self.logger.info("关闭Telegram客户端连接")
        try:
            await self.client.disconnect()
            self.logger.info("✅ Telegram客户端已断开连接")
        except Exception as e:
            self.logger.error(f"关闭客户端时发生错误: {e}")

async def main():
    """主函数 - 从API获取群组链接并处理"""
    # 初始化群组信息获取器
    fetcher = TelegramGroupInfo()
    
    try:
        # 启动客户端
        fetcher.logger.info("=" * 60)
        fetcher.logger.info("开始执行Telegram群组信息获取和提交任务")
        fetcher.logger.info("=" * 60)
        
        await fetcher.start_client()
        
        # 持续循环处理群组
        print("\n🔄 持续循环处理群组...")
        print("=" * 50)
        print("💡 提示: 按 Ctrl+C 停止处理")
        
        # 从配置文件读取参数
        try:
            from batch_config import get_batch_config
            config = get_batch_config()
            batch_size = config.get('total_size', 500)
            group_delay = config.get('delay', 2)
            batch_delay = config.get('batch_delay', 10)
            print(f"📋 使用配置: 每轮={batch_size}个, 群组延迟={group_delay}秒, 轮次延迟={batch_delay}秒")
        except ImportError:
            # 如果没有配置文件，使用默认值
            batch_size = 500
            group_delay = 2
            batch_delay = 10
            print("📋 使用默认配置")
        
        # 持续处理群组
        final_stats = await fetcher.process_groups_continuously(
            batch_size=batch_size,
            group_delay=group_delay,
            batch_delay=batch_delay
        )
        
        # 保存最终统计
        final_summary = {
            "mode": "continuous_processing",
            "timestamp": datetime.now().isoformat(),
            "final_statistics": final_stats,
            "configuration": {
                "batch_size": batch_size,
                "group_delay": group_delay,
                "batch_delay": batch_delay
            }
        }
        
        summary_filename = f"continuous_processing_summary_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
        fetcher.logger.info("保存最终统计")
        await fetcher.save_results(final_summary, summary_filename)
        
        print(f"\n📊 最终统计已保存到: {summary_filename}")
        
        # 记录最终统计
        fetcher.logger.info("=" * 60)
        fetcher.logger.info("持续处理任务结束 - 最终统计:")
        fetcher.logger.info(f"总轮次: {final_stats['total_rounds']}")
        fetcher.logger.info(f"总处理: {final_stats['total_processed']}")
        fetcher.logger.info(f"总成功: {final_stats['total_successful']}")
        fetcher.logger.info(f"总成功率: {final_stats['overall_success_rate']:.1f}%")
        fetcher.logger.info("=" * 60)
        
    except Exception as e:
        print(f"❌ 错误: {e}")
        if 'fetcher' in locals():
            fetcher.logger.error(f"主程序执行失败: {e}", exc_info=True)
    finally:
        await fetcher.close()


if __name__ == "__main__":
    asyncio.run(main())
