##读当天的文件夹 便利所有用户 读用户下的每个文件

from datetime import datetime,timedelta
import os
import json
import re
import json
import re
import pandas as pd
import time
from sqlalchemy import create_engine,text

engine = create_engine(
    'mysql+mysqlconnector://root:Bz_202501@bj-cdb-ckq2r8ro.sql.tencentcdb.com:25622/bz_timecheck',
    pool_recycle=3600,
    echo=False,  # 将echo从True改为False以关闭SQL日志
    isolation_level="READ COMMITTED",
    pool_pre_ping=True
)

def extract_timestamps_from_filenames(data):
    """
    从JSON的file_name中提取时间戳并返回列表
    
    参数:
        json_data (str): 包含文件信息的JSON字符串
        
    返回:
        list: 包含所有时间戳的整数列表
    """
    # 解析JSON数据

    timestamps = []
    
    # 遍历每个文件条目
    for file_info in data["files"]:
        file_name = file_info["file_name"]
        
        # 使用正则表达式匹配文件名中的数字时间戳
        # 模式说明: 匹配'ael_'后的数字，直到'.ott'前
        match = re.search(r'ael_(\d+)\.ott', file_name)
        if match:
            # 转换为整数并添加到列表
            timestamp = int(match.group(1)[0:10])
            timestamps.append(datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S'))
    
    return timestamps


def insert_to_mysql(result):
    result['dateflag'] = pd.to_datetime(result['dateflag']).dt.date
    table = 'bz_timecheck.ott_time_analyse'
    #print(f"[INFO] 开始插入数据到 MySQL 表 {table}，共 {len(result)} 条记录")
    #print(result.head())
    if result.empty:
        return
    try:
        with engine.begin() as conn:  # begin() 会自动commit
            # 构造批量插入语句
            sql = text(f"""
                INSERT INTO {table} (profile_id, dateflag, hourflag)
                VALUES (:profile_id, :dateflag, :hourflag)
                ON DUPLICATE KEY UPDATE profile_id=profile_id
            """)
            # 转换为字典列表
            data = result.to_dict(orient='records')
            conn.execute(sql, data)
    except Exception as e:
        print(f"[ERROR] 数据库操作失败：{str(e)}")

def main():

    root_path = '/mnt/ott_analysis'
    now_date  = datetime.now().strftime('%Y-%m-%d')
    date_path = os.path.join(root_path,now_date)
    data_list = []
    latest_time = (datetime.now()-timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S')
    if not os.path.exists(date_path):
        print(f"[INFO] {now_date} 数据目录不存在，跳过处理")
        return
    for profile_id in os.listdir(date_path):
        user_path = os.path.join(date_path,profile_id)
        for file_name in os.listdir(user_path):
            file_time = datetime.fromtimestamp(int(file_name.split('_')[1].split('.')[0]) / 1000000).strftime('%Y-%m-%d %H:%M:%S')
            if file_time < latest_time:
                continue 
            file_path = os.path.join(user_path,file_name)
            with open(file_path,'r') as f:
                data = json.load(f)
                file_time = extract_timestamps_from_filenames(data)
                for str_time in file_time:
                    dateflag = str_time[:10]
                    hourflag = int(str_time[11:13])  
                    data_list.append({
                    'profile_id': profile_id,
                    'dateflag': dateflag,
                    'hourflag': hourflag
                })
            
            #break
        #break
    if data_list:
        result = pd.DataFrame(data_list)
        result = result.drop_duplicates(subset=['profile_id', 'dateflag', 'hourflag'])
    else:
        result = pd.DataFrame(columns=['profile_id', 'dateflag', 'hourflag'])
        
        result = result.drop_duplicates(subset=['profile_id', 'dateflag', 'hourflag'])
    insert_to_mysql(result)
if __name__ == '__main__':
    while True:
        
        main()
        print(f"[INFO] {datetime.now()},数据处理完成")
        time.sleep(5*60)