# code_sample/pidtimer-log2json.py

#!/usr/bin/env python3
# -*- coding: utf-8 -*

import os
import re
import json
import argparse
import subprocess
from multiprocessing import Pool
import sys 

sys.path.append("..")
from package.utility import ensure_exists
from package.utility import find_targets
from package.utility import scan_runcases

"""
功能: 将每个进程的 TIMER 日志文件转换成可自动加载的 json 文件
输出格式:
{
    "timer_name_XXX1" : [level_id1, time1],
    "timer_name_XXX2" : [level_id2, time2],
}
实现思路:
1. 通过"find . -name \"reapet*\" -xtype d"找到所有的运行算例所在目录
2. 以运行算例为单位, 搜索 pid*.log 日志文件
3. 将搜素到的 pid*.log 日志文件通过多进程并行, 映射到进程池 Pool
4.timerlog2json 是具体的处理函数

日期: 2023-01-13T15:51
作者: 田鸿运
"""
def make_parser(): 
    parser = argparse.ArgumentParser(
        prog="jxpamg-predo-timerlog2json",
        description="jxpamg timer data preprocess tool, transform log to json")
    parser.add_argument(
            "LOGDIR",
            metavar="LOGDIR",
            help="Destination log directories."
        )
    parser.add_argument(
            "-p", "--proc",
            dest="num_proc",
            default=14,
            help="Number of processes used to run the task, default:%(default)s"
        )
    parser.add_argument(
            "--clean-cache",
            dest="clean_cache",
            action="store_true",
            default=False,
            help="Remove json data before transform."
        )
    return parser

def split_line2words(logline):
    """split timer log's data to json format"""
    words = logline.split()
    assert len(words)==4, f"unexpected line: {logline}"
    level_id = len(words[0])
    return (level_id, words[1], words[3])

def timerlog2json(args):
    """transform timer log's data to json format."""
    timerlog, clean_cache = args
    timerlog = os.path.abspath(timerlog)
    logdir = os.path.dirname(timerlog)
    basename = os.path.basename(timerlog) #basename返回文件名
    output = os.path.join(logdir, basename.replace(".log", ".json"))

    # remove output if clean_cache is set as True.
    if clean_cache is True and os.path.exists(output):
        os.remove(output)

    if os.path.exists(output):
        return

    ensure_exists(timerlog)
    jsondata = {}
    with open(timerlog, "r", encoding="utf-8") as fp:
        loglines = fp.readlines()
    for logline in loglines:
        level_id, keywords, val = split_line2words(logline.strip())
        # if keywords.startswith("Setup_RAP_SetRAP_"): #TODO:以后可以删除，将第一个关键字改为RAP_int
        #     amg_level = keywords.split("_")[-1]
        #     temp_keywords = f"Setup_RAP_SetRAPint_{amg_level}"
        #     if(temp_keywords not in jsondata):
        #         keywords = temp_keywords
        # elif keywords.startswith("Solve_Cycle_Relax_CreateCommPkg_") or keywords.startswith("Solve_Cycle_Relax_AllPoints_"):
        #     if(keywords in jsondata):
        #         val = float(val)
        #         val +=  jsondata[keywords][1]
        jsondata[keywords] = [level_id, round(float(val),6)]

    print(f">> dump to {output}")
    with open(output, "w", encoding="utf-8") as fp: 
        json.dump(jsondata, fp, indent=2, sort_keys=True)
    #os.system(f"rm -f {timerlog}") #TODO:是否删除还得斟酌，但是tianhe等超算有文件数限制.

def main():
    parser = make_parser()
    args = parser.parse_args()
    pool = Pool(int(args.num_proc))

    runcases = scan_runcases(args.LOGDIR)

    for runcase in runcases:
        tag = os.path.relpath(runcase, args.LOGDIR)
        print(f">> scan timer logs under {tag} ...")
        timerlogs = find_targets(runcase, "pid*.log")
        timerlog_num = len(timerlogs)
        print(f">> scan timer logs under {tag} ... [{timerlog_num} logs]")
        tasks = [(i, args.clean_cache) for i in timerlogs]
        pool.map(timerlog2json, tasks)

if __name__ == "__main__":
    main()
