#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Telegram群组数据爬取脚本
按照指定顺序依次调用多个API接口获取群组数据
"""

import requests
import json
import time
import logging
import threading

import re
from typing import Dict, List, Optional, Any
from datetime import datetime
from urllib.parse import urlparse
from config import config


# 导入关键词匹配模块
from crypto_keywords import match_crypto_keywords
from stock_keywords import match_stock_keywords
from spam_keywords import match_spam_keywords
from language_keywords import (
    lang_map,
    match_language_string,
    get_language_cn_from_code,
    get_region_by_language_code,
)



BASE_URL = "http://137.175.106.122:5001"

# 配置日志
import os
from datetime import datetime

# 创建log文件夹
log_dir = 'log'
if not os.path.exists(log_dir):
    os.makedirs(log_dir)

# 自定义按小时轮转的日志处理器（线程安全）
class HourlyRotatingFileHandler(logging.FileHandler):
    def __init__(self, log_dir):
        self.log_dir = log_dir
        self.current_hour = None
        self.current_file = None
        self._lock = threading.Lock()  # 添加线程锁
        # 确保log目录存在
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        super().__init__(self._get_current_filename(), encoding='utf-8', mode='a')
    
    def _get_current_filename(self):
        now = datetime.now()
        hour = now.strftime('%Y%m%d-%H')
        return os.path.join(self.log_dir, f"{hour}:00.log")
    
    def emit(self, record):
        with self._lock:  # 使用锁确保线程安全
            try:
                now = datetime.now()
                current_hour = now.strftime('%Y%m%d-%H')
                
                # 如果小时发生变化，切换到新的日志文件
                if self.current_hour != current_hour:
                    self.current_hour = current_hour
                    self.close()
                    self.baseFilename = self._get_current_filename()
                    self.stream = self._open()
                
                super().emit(record)
                # 强制刷新缓冲区
                if self.stream:
                    self.stream.flush()
            except Exception as e:
                # 如果文件写入失败，至少输出到控制台
                print(f"日志写入失败: {e}")
                print(f"日志内容: {record.getMessage()}")

# 配置日志
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)  # 强制设置为INFO级别

# 清除现有的处理器
for handler in logger.handlers[:]:
    logger.removeHandler(handler)

# 创建格式化器
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')

# 创建文件处理器（按小时轮转）
file_handler = HourlyRotatingFileHandler(log_dir)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)

# 创建控制台处理器
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)

# 确保日志输出
logger.info("日志系统初始化完成")


# 1、获取群组链接
def get_group_links(size: int):
    url = f"{BASE_URL}/tgGroup/getDataByNum?num={size}"
    try:
        response = requests.get(url, timeout=30)
        response.raise_for_status()
        data = response.json() or {}
        result = data.get("result") or []
        group_info: List[Dict[str, Any]] = []
        for item in result:
            if not isinstance(item, dict):
                continue
            gid = item.get("id")
            glink = item.get("groupLink")
            if gid is None or not glink:
                continue
            group_info.append({
                "id": gid,
                "groupLink": glink,
            })
        return group_info
    except Exception as e:
        logger.error(f"获取群组链接失败: {e}")
        return []


# 智能分析群组类型和语言
def analyze_group_content(title: str, description: str, language: str):
    """分析群组内容，判断类型和地区"""
    content = (title + " " + description).lower()
    # 若未提供语言，尝试从标题与描述推断（返回的是语言代码，如 'zh','en','it'）
    if not language:
        language = match_language_string(title + " " + description) or ''
    
    # 1. 判断是否为垃圾群
    spam_score = match_spam_keywords(content)
    is_trash = spam_score >= 2  # 降低阈值，更容易识别spam群组  # 如果匹配到3个或以上垃圾关键词，认为是垃圾群
    
    # 2. 判断是否为加密货币群
    crypto_score = match_crypto_keywords(content, language)
    is_crypto = crypto_score >= 2  # 如果匹配到2个或以上加密货币关键词，认为是币圈群
    
    # 3. 判断是否为股票群
    stock_score = match_stock_keywords(content, language)
    is_stock = stock_score >= 2  # 如果匹配到2个或以上股票关键词，认为是股票群
    
    # 4. 判断是否为广告群
    ad_keywords = ['ad', 'advertisement', 'promo', 'sponsor', '广告', '推广', '宣传', '代理', '引流']
    ad_score = sum(1 for keyword in ad_keywords if keyword in content)
    is_ad = ad_score >= 2
    
    # 5. 语言与地区
    lang_code = (language or "").lower()
    region = get_region_by_language_code(lang_code)
    language_cn = get_language_cn_from_code(lang_code)
    
    return {
        'isStock': '1' if is_stock else '0',
        'isCrypto': '1' if is_crypto else '0',
        'isAd': '1' if is_ad else '0',
        'isTrash': '1' if is_trash else '0',
        'region': region,
        'language': language_cn,
        'language_code': lang_code
    }

def get_region_by_language(language: str):
    """兼容旧调用，内部转向 language_keywords.get_region_by_language_code"""
    from language_keywords import get_region_by_language_code
    return get_region_by_language_code(language)

# 2、提交数据
def submit_data(scraped_data: dict, group_id: int, original_group_info: dict):
    url = f"{BASE_URL}/tgGroup/updateByGroupLink"
    
    # 智能分析群组类型
    title = scraped_data.get("title", "")
    description = scraped_data.get("description", "")
    language = scraped_data.get("language", "")
    
    # 分析群组内容
    analysis = analyze_group_content(title, description, language)
    # analysis['language'] 为中文名称；language_code 为原始代码
    language_cn = analysis.get('language', '')
    language_code = analysis.get('language_code', '')
    
    # 构造提交数据，按照接口要求的格式
    submit_data = {
        "id": group_id,
        "title": title,
        "groupLink": original_group_info.get("groupLink", ""),
        "groupUsername": scraped_data.get("username", ""),
        "memberCount": scraped_data.get("member_count", 0),
        "description": description,
        "isVerified": 1 if scraped_data.get("is_verified", False) else 0,
        "rawData": json.dumps(scraped_data, ensure_ascii=False),  # 将爬取的数据作为原始数据保存
        "language": language_cn,
        "isStock": analysis['isStock'],
        "isCrypto": analysis['isCrypto'],
        "isPublic": '1' if scraped_data.get("is_public", True) else '0',
        "isAd": analysis['isAd'],
        "isTrash": analysis['isTrash'],
        "region": analysis['region'],
        "groupName": title,
        "update_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    }
    
    # 打印提交的参数
    logger.info(f"[ID:{group_id}] 提交参数: {json.dumps(submit_data, ensure_ascii=False, indent=2)}")
    
    response = requests.post(url, json=submit_data)
    return response.json()

# 公用方法处理群组链接提取username
def get_username_from_link(link: str):
    return link.split("/")[-1]

# 3、爬取群组数据方式一：combot.org (已注释 - API不可用)
# def get_group_data_by_combot(username: str):
#     url = f"https://combot.org/api/chats/{username}"
#     response = requests.get(url)
#     return response.json()

# 4、爬取群组数据方式二：telegramchannels.me (已注释 - API不可用)
# def get_group_data_by_telegramchannels(username: str):
#     url = f"https://telegramchannels.me/api/channel/info?channel={username}"
#     response = requests.get(url)
#     return response.json()

# 5、爬取群组数据方式三：tgstat.com (已注释 - API不可用)
# def get_group_data_by_tgstat(username: str):
#     url = f"https://tgstat.com/api/channel/info?channel={username}"
#     response = requests.get(url)
#     return response.json() 

# 6、爬取群组数据方式四：telegram-group.com (已注释 - API不可用)
# def get_group_data_by_telegramgroup(username: str):
#     url = f"https://api.telegram-group.com/api/channel/info?channel={username}"
#     response = requests.get(url)
#     return response.json()

# 7、爬取群组数据方式五：Telegram网页爬虫 (可用)
def get_group_data_by_telegramweb(username: str, group_id: str = None):
    url = f"https://t.me/{username}"
    response = requests.get(url)
    content = response.text

    # logger.info(f"爬取到的HTML内容: {content}")
    
    # 提取标题 - 优先使用og:title，更准确
    title = ''
    og_title_match = re.search(r'<meta[^>]*property=["\']og:title["\'][^>]*content=["\']([^"\']*)["\']', content, re.IGNORECASE)
    if og_title_match:
        title = og_title_match.group(1).strip()
        # 如果og:title是默认的"Telegram: View @username"格式，则尝试其他方法
        if title.startswith('Telegram: View @') or title.startswith('Telegram: Contact @'):
            # 尝试从页面内容中提取真实的群组名称
            # 查找可能的群组名称模式
            name_patterns = [
                r'<h1[^>]*>([^<]+)</h1>',
                r'<div[^>]*class=["\'][^"\']*title[^"\']*["\'][^>]*>([^<]+)</div>',
                r'<span[^>]*class=["\'][^"\']*title[^"\']*["\'][^>]*>([^<]+)</span>',
                r'<div[^>]*class=["\'][^"\']*name[^"\']*["\'][^>]*>([^<]+)</div>'
            ]
            for pattern in name_patterns:
                name_match = re.search(pattern, content, re.IGNORECASE)
                if name_match:
                    potential_title = name_match.group(1).strip()
                    if potential_title and not potential_title.startswith('Telegram:'):
                        title = potential_title
                        break
    else:
        title_match = re.search(r'<title[^>]*>([^<]+)</title>', content, re.IGNORECASE)
        if title_match:
            title = title_match.group(1).strip()
    
    # 提取描述 - 优先使用og:description
    description = ''
    og_desc_match = re.search(r'<meta[^>]*property=["\']og:description["\'][^>]*content=["\']([^"\']*)["\']', content, re.IGNORECASE)
    if og_desc_match:
        description = og_desc_match.group(1).strip()
    else:
        desc_match = re.search(r'<meta[^>]*name=["\']description["\'][^>]*content=["\']([^"\']*)["\']', content, re.IGNORECASE)
        if desc_match:
            description = desc_match.group(1).strip()
    
    # 提取成员数
    member_count = 0
    member_match = re.search(r'(\d+)\s*members?', content, re.IGNORECASE)
    if member_match:
        member_count = int(member_match.group(1))
    
    # 提取头像图片
    avatar_url = ''
    og_image_match = re.search(r'<meta[^>]*property=["\']og:image["\'][^>]*content=["\']([^"\']*)["\']', content, re.IGNORECASE)
    if og_image_match:
        avatar_url = og_image_match.group(1).strip()
    
    # 判断群组类型
    group_type = 'unknown'
    if 'channel' in content.lower():
        group_type = 'channel'
    elif 'group' in content.lower():
        group_type = 'group'
    
    # 判断验证状态
    is_verified = 'verified' in content.lower() or 'checkmark' in content.lower()
    
    # 提取语言
    language = ''
    lang_match = re.search(r'<html[^>]*lang=["\']([^"\']*)["\']', content, re.IGNORECASE)
    if lang_match:
        language = lang_match.group(1)
    if not language:
        # 统一从 language_keywords 获取语言代码
        language = match_language_string(f"{title} {description}") or ''

    data = {
        'source': 'telegram_web',
        'title': title,
        'description': description,
        'member_count': member_count,
        'avatar_url': avatar_url,
        'group_type': group_type,
        'is_verified': is_verified,
        'language': language,
        'is_public': True,
        'username': username,
        'scraped_at': datetime.now().isoformat()
    }
    
    logger.info(f"[ID:{group_id}] 爬取到的数据: {json.dumps(data, ensure_ascii=False, indent=2)}")
    return data, content  # 同时返回HTML内容

# 保存原始数据到文件
def save_raw_data_to_file(data: dict, username: str, html_content: str = None, group_id: str = None):
    """保存原始数据到单独的文件"""
    try:
        import json
        from datetime import datetime
        
        # 创建文件名，包含时间戳
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # 保存解析后的数据
        json_filename = f"raw_data_{username}_{timestamp}.json"
        with open(json_filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        
        # 保存HTML原始内容（如果提供）
        if html_content:
            html_filename = f"raw_html_{username}_{timestamp}.html"
            with open(html_filename, 'w', encoding='utf-8') as f:
                f.write(html_content)
            logger.info(f"[ID:{group_id}] 原始HTML已保存到: {html_filename}")
        
        logger.info(f"[ID:{group_id}] 原始数据已保存到: {json_filename}")
        return json_filename
        
    except Exception as e:
        logger.error(f"[ID:{group_id}] 保存原始数据失败: {e}")
        return None

# 尝试爬取方法
def try_get_group_data(username: str, group_id: str = None):
    try:
        data, html_content = get_group_data_by_telegramweb(username, group_id)
        if data and data.get('title'):
            logger.info(f"[ID:{group_id}] 通过 telegram web 获取到数据: {username}")
            logger.info(f"[ID:{group_id}] telegram web 爬取到的数据: {json.dumps(data, ensure_ascii=False, indent=2)}")
            
            # 保存原始数据到文件（包括HTML内容）
            # save_raw_data_to_file(data, username, html_content)
            
            #将data转化成接口需要的字段
            

            return data
    except Exception as e:
        logger.warning(f"[ID:{group_id}] telegram web 失败 {username}: {e}")
    
    logger.error(f"[ID:{group_id}] 无法获取数据: {username}")
    return None

# 流程控制：
def main():
    logger.info("=== Telegram群组爬虫程序启动 ===")
    logger.info(f"当前时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    logger.info(f"日志目录: {os.path.abspath(log_dir)}")
    
    size = 500
    logger.info(f"每次获取群组数量: {size}")

    while True:
        try:
            # 获取群组信息
            group_info_list = get_group_links(size)
            logger.info(f"获取到 {len(group_info_list)} 个群组信息")
            
            if not group_info_list:
                logger.info("没有更多群组信息，等待10秒后重新请求...")
                time.sleep(10)
                continue
            
            # 处理每个群组
            for i, group_info in enumerate(group_info_list, 1):
                try:
                    group_id = group_info["id"]
                    group_link = group_info["groupLink"]

                    # 测试数据
                    # group_id = 7
                    # group_link = 'https://t.me/axstral_spam'


                    logger.info(f"处理进度: {i}/{len(group_info_list)} - ID:{group_id} - {group_link}")
                    
                    username = get_username_from_link(group_link)
                    if not username:
                        logger.warning(f"[ID:{group_id}] 无法提取用户名: {group_link}")
                        continue
                    
                    data = try_get_group_data(username, group_id)
                    if data:
                        # 能获取到数据，说明是公开群组
                        data["is_public"] = True
                        # 提交数据（包含ID和原始群组信息）
                        result = submit_data(data, group_id, group_info)
                        logger.info(f"[ID:{group_id}] 提交结果: {result}")
                        logger.info(f"[ID:{group_id}] 成功处理: {username} - 公开群组")
                        logger.info(f"==========================================================================================================")
                    else:
                        # 无法获取数据，说明是私有群组，提交默认数据
                        private_data = {
                            "source": "telegram_web",
                            "title": "",
                            "description": "",
                            "member_count": 0,
                            "avatar_url": "",
                            "group_type": "private",
                            "is_verified": False,
                            "language": "",
                            "is_public": False,
                            "username": username,
                            "scraped_at": datetime.now().isoformat()
                        }
                        result = submit_data(private_data, group_id, group_info)
                        logger.info(f"[ID:{group_id}] 处理私有群组: {username} - 私有群组")
                        logger.info(f"[ID:{group_id}] 私有群组数据: {json.dumps(private_data, ensure_ascii=False, indent=2)}")
                        logger.info(f"[ID:{group_id}] 提交结果: {result}")
                    
                    time.sleep(1)
                    
                except Exception as e:
                    logger.error(f"[ID:{group_id}] 处理群组失败 {group_info.get('groupLink', 'Unknown')}: {e}")
                    continue
            
            logger.info("当前批次处理完成，准备获取下一批群组信息...")
            
            # 测试：当批量大小为5时，处理完一批后退出脚本
            # if size == 5:
            #     logger.info("测试模式：已处理5条，退出脚本")
            #     break
            
        except Exception as e:
            logger.error(f"主循环异常: {e}")
            logger.info("等待30秒后重试...")
            time.sleep(30)



if __name__ == "__main__":
    main()