import os
import re
import time
from time import sleep
import glob
import pymysql
from datetime import datetime, timedelta
from pymysql.cursors import DictCursor
from custom_logger import PrefixDateLogger
import argparse
import subprocess
from check_log_sync import LogSyncChecker
# 数据库配置
DB_CONFIG = {
    'host': 'rm-gw8a4drz5t1dde7g3.mysql.germany.rds.aliyuncs.com',
    'user': 'root',
    'password': 'suKeepgo123@cys',
    'database': 'cfx_t',
    'port': 3306,
    'charset': 'utf8mb4'
}

# OSS配置
OSS_CONFIG_FILE = "/home/cys/www/script/ch_recollect_log/ossutils_eu.config"
OSS_BUCKET = "oss://log-bdp-eu"
OSS_FILE_LINE_DIR = "file_line"
LOCAL_TEMP_DIR = "/home/cys/www/script/ch_recollect_log/temp/file_line"

# IP地址列表
IP_LIST = [
    "160.153.178.48",
    "172.29.151.172",
    "172.29.151.173",
    "172.29.151.174",
    "172.29.151.176",
    "172.29.151.179",
    "172.29.151.180",
    "184.168.29.103",
    "208.109.191.70",
    "208.109.233.234"
]
# 时间格式正则表达式模式 (匹配 YYYY-MM-DD_HH 格式)
TIME_PATTERN = r'\d{4}-\d{2}-\d{2}_\d{2}'

# 创建日志记录器
logger = PrefixDateLogger("file_line_counter")


def get_target_hours(hour_num):
    """计算当前小时及前5个小时的时间字符串列表"""
    target_hours = []
    now = datetime.now()
    
    # 计算当前及前5个小时，共6个时间点（包含当前小时）
    for i in range(hour_num):
        hour_delta = timedelta(hours=i)
        target_time = now - hour_delta
        # 格式化为 YYYY-MM-DD_HH 格式
        time_str = target_time.strftime("%Y-%m-%d_%H")
        target_hours.append(time_str)
    
    return target_hours

def get_target_hours_by_date(date):

    target_hours = []

    # 计算当天的24个小时
    for i in range(24):
        # 格式化为 YYYY-MM-DD_HH 格式
        time_str = date+"_"f"{i:02d}"
        target_hours.append(time_str)

    return target_hours

def get_db_connection():
    
    """获取数据库连接"""
    return pymysql.connect(
        host=DB_CONFIG['host'],
        user=DB_CONFIG['user'],
        password=DB_CONFIG['password'],
        database=DB_CONFIG['database'],
        port=DB_CONFIG['port'],
        charset=DB_CONFIG['charset'],
        cursorclass=DictCursor
    )

def create_table_if_not_exists():
    """如果表不存在则创建"""
    conn = get_db_connection()
    try:
        with conn.cursor() as cursor:
            sql = """
            CREATE TABLE IF NOT EXISTS file_line_counts_2 (
                id INT AUTO_INCREMENT PRIMARY KEY,
                ip VARCHAR(45) NOT NULL,
                app_id VARCHAR(100) NOT NULL,
                event_id VARCHAR(100) NOT NULL,
                date DATE NOT NULL,
                hour INT NOT NULL,
                line_count INT NOT NULL,
                created_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                UNIQUE KEY unique_record (ip, app_id, event_id, date, hour)
            ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
            """
            cursor.execute(sql)
        conn.commit()
    finally:
        conn.close()

def check_record_exists(ip, app_id, event_id, date, hour):
    """检查记录是否已存在"""
    conn = get_db_connection()
    try:
        with conn.cursor() as cursor:
            sql = """
            SELECT 1 FROM file_line_counts_2 
            WHERE ip = %s AND app_id = %s AND event_id = %s AND date = %s AND hour = %s
            """
            cursor.execute(sql, (ip, app_id, event_id, date, hour))
            return cursor.fetchone() is not None
    finally:
        conn.close()

def insert_record(ip, app_id, event_id, date, hour, line_count):
    """插入记录到数据库"""
    conn = get_db_connection()
    try:
        with conn.cursor() as cursor:
            sql = """
            INSERT INTO file_line_counts_2 (ip, app_id, event_id, date, hour, line_count)
            VALUES (%s, %s, %s, %s, %s, %s)
            """
            cursor.execute(sql, (ip, app_id, event_id, date, hour, line_count))
        conn.commit()
        return True
    except Exception as e:
        logger.info(f"插入数据失败: {e}")
        conn.rollback()
        return False
    finally:
        conn.close()

def upsert_batch(data_list):
    start_time = time.perf_counter()
    """
    批量插入/更新数据
    :param data_list: 包含200条数据的列表，每条为字典：
                     [{'ip': 'x.x.x.x', 'app_id': 'xxx', 'event_id': 'xxx',
                       'date': '2025-10-16', 'hour': 8, 'line_count': 100}, ...]
    """
    if not data_list:
        return

    sql = """
        INSERT INTO file_line_counts_2 (ip, app_id, event_id, date, hour, line_count, created_time) 
        VALUES (%s, %s, %s, %s, %s, %s, NOW())
        ON DUPLICATE KEY UPDATE 
            line_count = IF(VALUES(line_count) != line_count, VALUES(line_count), line_count),
            created_time = IF(VALUES(line_count) != line_count, NOW(), created_time)
        """

    # 2. 提取数据列表中的值（按fields顺序排列）
    values = [
        (
            item['ip'],
            item['app_id'],
            item['event_id'],
            item['date'],
            item['hour'],
            item['line_count']
        ) for item in data_list
    ]

    connection=None
    try:
        connection = get_db_connection()
        cursor = connection.cursor()
        cursor.executemany(sql, values)
        connection.commit()

        duration = time.perf_counter() - start_time
        logger.info(f"批量插入完成，影响行数: {cursor.rowcount}，执行耗时: {duration:.6f} 秒")

    except pymysql.Error as e:
        logger.info(f"数据库错误: {e}")
        if connection:
            connection.rollback()
        raise
    finally:
        if connection:
            connection.close()


def count_lines(file_path):
    """统计文件行数"""
    try:
        with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
            return sum(1 for _ in f)
    except Exception as e:
        logger.info(f"统计文件 {file_path} 行数失败: {e}")
        return 0

def download_oss_file_line_logs(target_hours):
    """从LOSS下载文件行数统计文件"""
    os.makedirs(LOCAL_TEMP_DIR, exist_ok=True)
    downloaded_files = []
    
    # 遍历每个IP地址
    for ip in IP_LIST:
        logger.info(f"处理IP: {ip}")
        ip_temp_dir = os.path.join(LOCAL_TEMP_DIR, ip)
        os.makedirs(ip_temp_dir, exist_ok=True)
        
        for time_str in target_hours:
            # 构建文件名: log_file_line_2025-11-21_15.txt
            file_name = f"log_file_line_{time_str}.txt"
            oss_path = f"{OSS_BUCKET}/{ip}/{OSS_FILE_LINE_DIR}/{file_name}"
            local_file = os.path.join(ip_temp_dir, file_name)
            
            logger.info(f"  尝试从LOSS下载: {oss_path}")
            
            try:
                # 使用ossutil下载文件
                cmd = [
                    "ossutil",
                    "--config-file", OSS_CONFIG_FILE,
                    "cp",
                    oss_path,
                    local_file
                ]
                
                result = subprocess.run(cmd, capture_output=True, text=True)
                
                if result.returncode == 0 and os.path.exists(local_file):
                    logger.info(f"  下载成功: {local_file}")
                    downloaded_files.append((ip, local_file))
                else:
                    logger.info(f"  未找到文件: {file_name} (IP: {ip})")
                    
            except Exception as e:
                logger.info(f"  下载文件 {file_name} 失败 (IP: {ip}): {e}")
    
    return downloaded_files

def parse_file_line_log(file_path, ip):
    """
    解析log_file_line文件
    格式: AnySelectedNew/widget_text_click_detail.2025-11-21_15.0.txt/3592
    返回: [{'ip': 'x.x.x.x', 'app_id': 'xxx', 'event_id': 'xxx', 
             'date': 'YYYY-MM-DD', 'hour': HH, 'line_count': count}, ...]
    """
    data_list = []
    
    try:
        with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
            for line_num, line in enumerate(f, 1):
                line = line.strip()
                if not line:
                    continue
                
                # 解析格式: app_id/event_id.date_hour.0.txt/line_count
                # 例: AnySelectedNew/widget_text_click_detail.2025-11-21_15.0.txt/3592
                parts = line.split('/')
                
                if len(parts) != 3:
                    logger.info(f"跳过无效行 ({file_path}:{line_num}): {line}")
                    continue
                
                app_id = parts[0]
                filename_with_ext = parts[1]
                line_count_str = parts[2]
                
                # 解析文件名: event_id.date_hour.0.txt
                # 使用正则表达式提取 event_id, date, hour
                match = re.match(r'^(.+)\.(\d{4}-\d{2}-\d{2})_(\d{2})\.0\.txt$', filename_with_ext)
                
                if not match:
                    logger.info(f"无法解析文件名 ({file_path}:{line_num}): {filename_with_ext}")
                    continue
                
                event_id = match.group(1)
                date = match.group(2)
                hour = int(match.group(3))
                
                try:
                    line_count = int(line_count_str)
                except ValueError:
                    logger.info(f"无效的行数 ({file_path}:{line_num}): {line_count_str}")
                    continue
                
                data_list.append({
                    'ip': ip,
                    'app_id': app_id,
                    'event_id': event_id,
                    'date': date,
                    'hour': hour,
                    'line_count': line_count
                })
                
    except Exception as e:
        logger.info(f"解析文件 {file_path} 失败: {e}")
    
    return data_list
def process_oss_file_line_logs(target_hours):
    """从LOSS下载并处理文件行数统计文件"""
    logger.info(f"处理目标时间范围: {target_hours}")
    
    # 1. 从LOSS下载文件
    downloaded_files = download_oss_file_line_logs(target_hours)
    
    if not downloaded_files:
        logger.info("没有下载到任何文件")
        return
    
    logger.info(f"总共下载 {len(downloaded_files)} 个文件")
    
    # 2. 解析并插入数据库
    total_records = 0
    insert_data = []
    
    # downloaded_files 现在是 (ip, file_path) 的元组列表
    for ip, file_path in downloaded_files:
        logger.info(f"处理文件: {file_path}, IP: {ip}")
        
        # 解析文件
        data_list = parse_file_line_log(file_path, ip)
        logger.info(f"从 {file_path} 解析出 {len(data_list)} 条记录")
        
        total_records += len(data_list)
        insert_data.extend(data_list)
        
        # 批量插入（每200条）
        if len(insert_data) >= 200:
            upsert_batch(insert_data)
            insert_data.clear()
    
    # 插入剩余数据
    if insert_data:
        upsert_batch(insert_data)
        insert_data.clear()
    
    logger.info(f"处理完成，总记录数: {total_records}")

if __name__ == "__main__":
    # 设置命令行参数解析
    parser = argparse.ArgumentParser(description='count file line')
    parser.add_argument('--hour_num', default=6, help='统计现在往前的小时数')
    parser.add_argument('--date', help='统计日期，格式YYYY-MM-DD')

    # 解析命令行参数
    args = parser.parse_args()

    try:
    
        """主函数"""
        # 确保表存在
        create_table_if_not_exists()
        if args.date:
            target_hours = get_target_hours_by_date(args.date)
        else:
            target_hours = get_target_hours(int(args.hour_num))
        logger.info(f"target_hours:{target_hours}")
        # 开始处理（使用新的OSS方式）
        process_oss_file_line_logs(target_hours)
        logger.info("处理完成")

    except ValueError as e:
        logger.info(f"错误: {str(e)}")
    except Exception as e:
        logger.info(f"处理过程中发生错误: {str(e)}")

    # 创建检查器并执行检查
    checker = LogSyncChecker()
    checker.run_check()
