import os
import re
import time
from time import sleep
import glob
import pymysql
from datetime import datetime, timedelta
from pymysql.cursors import DictCursor
from custom_logger import PrefixDateLogger
import argparse
from check_log_sync import LogSyncChecker
# 数据库配置
DB_CONFIG = {
    'host': 'rm-gw8a4drz5t1dde7g3.mysql.germany.rds.aliyuncs.com',
    'user': 'root',
    'password': 'suKeepgo123@cys',
    'database': 'cfx_t',
    'port': 3306,
    'charset': 'utf8mb4'
}

# 根目录
ROOT_DIR = '/home/cys/www/data'
#ROOT_DIR ='D:[\\/]workspace[\\/]test_log'
# 时间格式正则表达式模式 (匹配 YYYY-MM-DD_HH 格式)
TIME_PATTERN = r'\d{4}-\d{2}-\d{2}_\d{2}'

# 创建日志记录器
logger = PrefixDateLogger("file_line_counter")


def get_target_hours(hour_num):
    """计算当前小时及前5个小时的时间字符串列表"""
    target_hours = []
    now = datetime.now()
    
    # 计算当前及前5个小时，共6个时间点（包含当前小时）
    for i in range(hour_num):
        hour_delta = timedelta(hours=i)
        target_time = now - hour_delta
        # 格式化为 YYYY-MM-DD_HH 格式
        time_str = target_time.strftime("%Y-%m-%d_%H")
        target_hours.append(time_str)
    
    return target_hours

def get_target_hours_by_date(date):

    target_hours = []

    # 计算当天的24个小时
    for i in range(24):
        # 格式化为 YYYY-MM-DD_HH 格式
        time_str = date+"_"f"{i:02d}"
        target_hours.append(time_str)

    return target_hours

def get_db_connection():
    
    """获取数据库连接"""
    return pymysql.connect(
        host=DB_CONFIG['host'],
        user=DB_CONFIG['user'],
        password=DB_CONFIG['password'],
        database=DB_CONFIG['database'],
        port=DB_CONFIG['port'],
        charset=DB_CONFIG['charset'],
        cursorclass=DictCursor
    )

def create_table_if_not_exists():
    """如果表不存在则创建"""
    conn = get_db_connection()
    try:
        with conn.cursor() as cursor:
            sql = """
            CREATE TABLE IF NOT EXISTS file_line_counts (
                id INT AUTO_INCREMENT PRIMARY KEY,
                ip VARCHAR(45) NOT NULL,
                app_id VARCHAR(100) NOT NULL,
                event_id VARCHAR(100) NOT NULL,
                date DATE NOT NULL,
                hour INT NOT NULL,
                line_count INT NOT NULL,
                created_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                UNIQUE KEY unique_record (ip, app_id, event_id, date, hour)
            ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
            """
            cursor.execute(sql)
        conn.commit()
    finally:
        conn.close()

def check_record_exists(ip, app_id, event_id, date, hour):
    """检查记录是否已存在"""
    conn = get_db_connection()
    try:
        with conn.cursor() as cursor:
            sql = """
            SELECT 1 FROM file_line_counts 
            WHERE ip = %s AND app_id = %s AND event_id = %s AND date = %s AND hour = %s
            """
            cursor.execute(sql, (ip, app_id, event_id, date, hour))
            return cursor.fetchone() is not None
    finally:
        conn.close()

def insert_record(ip, app_id, event_id, date, hour, line_count):
    """插入记录到数据库"""
    conn = get_db_connection()
    try:
        with conn.cursor() as cursor:
            sql = """
            INSERT INTO file_line_counts (ip, app_id, event_id, date, hour, line_count)
            VALUES (%s, %s, %s, %s, %s, %s)
            """
            cursor.execute(sql, (ip, app_id, event_id, date, hour, line_count))
        conn.commit()
        return True
    except Exception as e:
        logger.info(f"插入数据失败: {e}")
        conn.rollback()
        return False
    finally:
        conn.close()

def upsert_batch(data_list):
    start_time = time.perf_counter()
    """
    批量插入/更新数据
    :param data_list: 包含200条数据的列表，每条为字典：
                     [{'ip': 'x.x.x.x', 'app_id': 'xxx', 'event_id': 'xxx',
                       'date': '2025-10-16', 'hour': 8, 'line_count': 100}, ...]
    """
    if not data_list:
        return

    sql = """
        INSERT INTO file_line_counts (ip, app_id, event_id, date, hour, line_count, created_time) 
        VALUES (%s, %s, %s, %s, %s, %s, NOW())
        ON DUPLICATE KEY UPDATE 
            line_count = IF(VALUES(line_count) != line_count, VALUES(line_count), line_count),
            created_time = IF(VALUES(line_count) != line_count, NOW(), created_time)
        """

    # 2. 提取数据列表中的值（按fields顺序排列）
    values = [
        (
            item['ip'],
            item['app_id'],
            item['event_id'],
            item['date'],
            item['hour'],
            item['line_count']
        ) for item in data_list
    ]

    connection=None
    try:
        connection = get_db_connection()
        cursor = connection.cursor()
        cursor.executemany(sql, values)
        connection.commit()

        duration = time.perf_counter() - start_time
        logger.info(f"批量插入完成，影响行数: {cursor.rowcount}，执行耗时: {duration:.6f} 秒")

    except pymysql.Error as e:
        logger.info(f"数据库错误: {e}")
        if connection:
            connection.rollback()
        raise
    finally:
        if connection:
            connection.close()


def count_lines(file_path):
    """统计文件行数"""
    try:
        with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
            return sum(1 for _ in f)
    except Exception as e:
        logger.info(f"统计文件 {file_path} 行数失败: {e}")
        return 0

def parse_file_info(file_path):
    """解析文件路径获取信息，支持多种事件ID"""
    # 正则表达式匹配路径格式，支持任意事件ID，只要包含时间信息
    # 格式: /home/cys/www/data/IP/app_id/事件ID.日期_小时.0.txt
    #pattern = ROOT_DIR+r'/([\d.]+)/([^/]+)/([^.]+)\.(\d{4}-\d{2}-\d{2})_(\d{2})\.0\.txt'
    #pattern = ROOT_DIR+r'[\\/]([\d.]+)[\\/]([^\\/]+)[\\/]([^.]+)\.(\d{4}-\d{2}-\d{2})_(\d{2})\.0\.txt'
    #pattern = ROOT_DIR+r'/([\d.]+)/([^/]+)/([^.]+).(\d{4}-\d{2}-\d{2})_(\d{2})\.0\.txt'
    pattern = r'^(/home/cys/www/data)[\\/]([\d.]+)[\\/]([^\\/]+)[\\/](.+)\.(\d{4}-\d{2}-\d{2})_(\d{2})\.0\.txt$'
    #logger.info(pattern)
    match = re.match(pattern, file_path)
    
    if not match:
        return None
    
    _, ip, app_id, event_id, date, hour = match.groups()
    
    return {
        'ip': ip,
        'app_id': app_id,
        'event_id': event_id,
        'date': date,
        'hour': int(hour)
    }
def traverse_directory2(root_dir,target_hours):
    logger.info(f"处理目标时间范围: {target_hours}")

    insert_date=[]
    total_count=0
    start_time = time.time()
    for pattern in target_hours:
        logger.info(f"遍历处理时间: {pattern}")
        # 构建完整的glob模式

        search_pattern = os.path.join(root_dir, f"**/*{pattern}*")
        # recursive=True 表示递归搜索子目录
        files = glob.glob(search_pattern, recursive=True)
        for file_path in files:
            total_count+=1
            # 解析文件信息
            file_info = parse_file_info(file_path)
            if not file_info:
                logger.info(f"无法解析文件路径: {file_path}")
                continue

            # 统计行数
            line_count = count_lines(file_path)
            if line_count == 0:
                logger.info(f"文件 {file_path} 行数为0，跳过")
                continue
            if total_count%100==0:
                sleep(2)
                logger.info(f"已处理文件数: {total_count}")
            #修改为批量插入
            insert_date.append({
                'ip': file_info['ip'],
                'app_id': file_info['app_id'],
                'event_id': file_info['event_id'],
                'date': file_info['date'],
                'hour': file_info['hour'],
                'line_count': line_count
            })
            if len(insert_date)>=200:
                upsert_batch(insert_date)
                insert_date.clear()
    if insert_date:
        upsert_batch(insert_date)
        insert_date.clear()
    end_time = time.time()
    logger.info(f"处理完成，已处理文件数: {total_count} ,cost:{end_time - start_time:.3f} s")

def traverse_directory(root_dir,target_hours):
    logger.info(f"处理目标时间范围: {target_hours}")

    insert_date=[]
    total_count=0
    parse_count=0
    for dirpath, _, filenames in os.walk(root_dir):

        for filename in filenames:
            total_count+=1
            if total_count%1000==0:
                logger.info(f"扫描1000个文件循环暂停5秒，total_count: {total_count}")
                time.sleep(5)  # 扫描1000个文件循环暂停1秒
            # 只处理txt文件且文件名包含时间格式
            if filename.endswith('.txt'):
                # 提取文件名中的时间部分
                time_match = re.search(TIME_PATTERN, filename)
                #logger.info(type(time_match))
                #logger.info(time_match)
                #matched_data = time_match.group()
                #logger.info(matched_data) 
                if time_match and time_match.group() in target_hours:
                    file_path = os.path.join(dirpath, filename)
                    #logger.info(f"处理文件: {file_path}")
                    parse_count+=1
                    # 解析文件信息
                    file_info = parse_file_info(file_path)
                    if not file_info:
                        logger.info(f"无法解析文件路径: {file_path}")
                        continue
                    
                    # 统计行数
                    line_count = count_lines(file_path)
                    if line_count == 0:
                        logger.info(f"文件 {file_path} 行数为0，跳过")
                        continue
                    if parse_count%200==0:
                        logger.info(f"已处理文件数: {parse_count}, 总文件数: {total_count}")
                    #修改为批量插入
                    insert_date.append({
                        'ip': file_info['ip'],
                        'app_id': file_info['app_id'],
                        'event_id': file_info['event_id'],
                        'date': file_info['date'],
                        'hour': file_info['hour'],
                        'line_count': line_count
                    })
                    if len(insert_date)>=200:
                        upsert_batch(insert_date)
                        insert_date.clear()

                    
                    # # 检查记录是否已存在
                    # if check_record_exists(
                    #     file_info['ip'],
                    #     file_info['app_id'],
                    #     file_info['event_id'],
                    #     file_info['date'],
                    #     file_info['hour']
                    # ):
                    #     logger.info(f"记录已存在，跳过: {file_info}")
                    #     continue
                    #
                    # # 插入记录
                    # if insert_record(
                    #     file_info['ip'],
                    #     file_info['app_id'],
                    #     file_info['event_id'],
                    #     file_info['date'],
                    #     file_info['hour'],
                    #     line_count
                    # ):
                    #     logger.info(f"插入成功: {file_info}, 行数: {line_count}")
                    # else:
                    #     logger.info(f"插入失败: {file_info}")
    if insert_date:
        upsert_batch(insert_date)
        insert_date.clear()
    logger.info(f"处理完成，已处理文件数: {parse_count}, 总文件数: {total_count}")

if __name__ == "__main__":
    # 设置命令行参数解析
    parser = argparse.ArgumentParser(description='count file line')
    parser.add_argument('--hour_num', default=6, help='统计现在往前的小时数')
    parser.add_argument('--date', help='统计日期，格式YYYY-MM-DD')

    # 解析命令行参数
    args = parser.parse_args()

    try:
    
        """主函数"""
        # 确保表存在
        create_table_if_not_exists()
        if args.date:
            target_hours = get_target_hours_by_date(args.date)
        else:
            target_hours = get_target_hours(int(args.hour_num))
        logger.info(f"target_hours:{target_hours}")
        # 开始遍历处理
        traverse_directory(ROOT_DIR,target_hours)
        logger.info("处理完成")

    except ValueError as e:
        logger.info(f"错误: {str(e)}")
    except Exception as e:
        logger.info(f"处理过程中发生错误: {str(e)}")

    # 创建检查器并执行检查
    checker = LogSyncChecker()
    checker.run_check()
