#! python
from email.policy import default
import os
from typing import Any, Callable, Dict, List, Tuple
from matplotlib.pyplot import tight_layout
import pandas as pd
import re
import scipy as sp


class NameFilter(object):
    def __init__(self, drop_rule: List[str], 
                       sep_rule: List[Tuple[str, str, Any]], 
                       rewrite_rule: List[Tuple[str, str]]):
        super().__init__()
        self.sep = sep_rule
        self.drop = drop_rule
        self.rewrite = rewrite_rule

    def _drop(self, string):
        for rule in self.drop:
            string = re.sub(rule, "", string, count=0).strip()
        return string

    def _sep(self, string):
        for rule in self.sep:
            while True:
                match = re.search(r"(({}){})+({})".format(rule[1], rule[0], rule[1]), string)
                if match is None:
                    break
                s = match.span()[0]
                e = match.span()[1]
                prefix = string[:s]
                sufix = string[e:]
                seperated = string[s:e].split(rule[0])
                target = "**"
                if isinstance(rule[2], Callable): # execute custom filter
                    target = rule[2](string)
                elif isinstance(rule[2], int): # keeps only the entry at the position
                    target = seperated[rule[2]]
                elif isinstance(rule[2], str): # keeps all the matching entry
                    matches = []
                    for s in seperated:
                        if re.search(rule[2], s):
                            matches.append(s)
                    string = rule[0].join(matches)
                            
                string = prefix + target + sufix
        return string
    
    def _rewrite(self, string):
        for rule in self.rewrite:
            string = re.sub(rule[0], rule[1], string)
        return string

    def _demangling(self, string):
        if string[:2] == '_Z':
            string = os.popen(f"c++filt -n {string}").read().strip()
        return string

    def filt(self, string):
        string = self._demangling(string)
        string = self._drop(string)
        string = self._sep(string)
        return self._rewrite(string)

name_filter = NameFilter(
    drop_rule=[r"\(anonymous namespace\)::", r"::operator.{1,2}\(.*\)", "void", "Functor", "int=", 
                "bool=", "_kernel", "Kernel","cunn_Class", "volta_", ", \w*Calculator<.*>",
                " ,\w*Cast"],
    sep_rule=[("::",r"[_a-zA-Z]\w*", -1)],
    rewrite_rule=[(r">\(.*\)", ">"), 
                  ("elementwise", "elewise"), ("vectorized", "vec"), ("unrolled","url"), 
                  ("unsigned ","u"), ("int","i"), ("long","l"), ("float","f"), ("char", "c"),
                  ("Device", "Dev"), ("device", "dev"), ("_agent", "kern_agent"), 
                  ("with", "w/"), ("index", "idx")]
)

label_filter = NameFilter(
    drop_rule=[r"<.*", "void", "_kernel", "Kernel", "cunn_Class", "volta_"],
    sep_rule=[("::", r"[_a-zA-Z]\w*", -1)],
    rewrite_rule=[(r">\(.*\)", ">"), 
                  ("elementwise", "elewise"), ("vectorized", "vec"), ("unrolled","url"), 
                  ("unsigned ","u"), ("int","i"), ("long","l"), ("float","f"), ("char", "c"),
                  ("Device", "Dev"), ("device", "dev"), ("_agent", "kern_agent"), 
                  ("with", "w/"), ("index", "idx")]
)


_VALUE_PARSE_MAP = {'s':1e3, 'm':1, 'u':1e-3, 'n':1e-6, 'B':1, 'K':1e3, 'M':1e6, 'G':1e9}

def parse_data(val_str):
    if val_str.replace(",","").isdigit():
        return int(val_str.replace(",",""))
    elif not re.match(r'\d+\.\d+', val_str.replace(",","")) is None:
        return float(val_str.replace(",",""))
    elif val_str.find('%') != -1:
        return float(val_str[:-1])*1e-2
    elif val_str.find('B') != -1:
        loc = val_str.find('B')
        last_char = None
        while not val_str[loc].isdigit():
            last_char = val_str[loc]
            loc -= 1
        return float(val_str[:loc+1]) * _VALUE_PARSE_MAP[last_char]
    elif val_str.find('s') != -1:
        loc = val_str.find('s')
        last_char = None
        while not val_str[loc].isdigit():
            last_char = val_str[loc]
            loc -= 1
        return float(val_str[:loc+1]) * _VALUE_PARSE_MAP[last_char]
    else:
        raise ValueError(f"Could not parse to value {val_str}")


def name_filter_naive(str):
    if str.find('<') != -1:
        str = str[:str.find('<')]
    if str.rfind("::") != -1:
        str = str[str.rfind("::")+2:]
    if str.find("void ") != -1:
        str = str.replace("void ","")

    deco_at_native = re.search(r'_ZN\d*at\d*native\d*', str)
    if deco_at_native:
        str = str[deco_at_native.span()[1]:]
    deco_param_begin = re.search(r'I[a-zA-Z0-9]{1,20}NS\d', str)
    if deco_param_begin:
        str = str[:deco_param_begin.span()[0]]

    return str



def parse_timing_table(stream, do_filter = True):
    tab = {}
    line = stream.readline()
    while line and line.find("GPU activities:") == -1:
        line = stream.readline()

    line = line[line.find(':')+1:]
    while line and line.find("API calls") == -1:
        parts = line.split()
        fname = name_filter.filt(" ".join(parts[6:])) if do_filter else " ".join(parts[6:])
        
        time_p = parse_data(parts[0])
        avg_t = parse_data(parts[3])
        calls = parse_data(parts[2])

        if fname in tab.keys():
            print(f"Entry folding detected on name: {fname}")
            tab[fname]["time%"] += time_p
            tab[fname]["avg_time"] += avg_t
            tab[fname]["calls"] += calls
        else:
            entry = { "time%": time_p, 
                    "avg_time": avg_t,
                    "calls": calls }

            tab[fname] = entry
        line = stream.readline()

    return tab


def parse_timing_table_nsys(stream, do_filter = True):
    tab = {}
    line = stream.readline()
    while line and line.find("CUDA Kernel Statistics:") == -1:
        line = stream.readline()

    stream.readline() # \n
    stream.readline() # header
    stream.readline() # ---sep---
    line = stream.readline()

    # line = line[line.find(':')+1:]
    while line != '\n':
        parts = line.split()
        fname = name_filter.filt(" ".join(parts[6:])) if do_filter else " ".join(parts[6:])
        
        time_p = parse_data(parts[0])
        avg_t = parse_data(parts[3])
        calls = parse_data(parts[2])

        if fname in tab.keys():
            print(f"Entry folding detected on name: {fname}")
            tab[fname]["time%"] += time_p
            tab[fname]["avg_time"] += avg_t
            tab[fname]["calls"] += calls
        else:
            entry = { "time%": time_p, 
                    "avg_time": avg_t,
                    "calls": calls }

            tab[fname] = entry
        line = stream.readline()

    return tab


def parse_metrics_table(stream, do_filter = True):
    tab = {}
    cur_dict = {}
    line = stream.readline()
    while line:
        parts = line.split()
        fn = " ".join(parts[1:])
        if do_filter:
            fn = name_filter.filt(fn)

        if parts[0] == 'Kernel:':
            cur_dict = {}
            tab[fn] = cur_dict
        if parts[0].isdigit():
            cur_dict['run_cnt'] = int(parts[0])
            cur_dict[parts[1]] = parse_data(parts[-1])
        line = stream.readline()

    return tab


def parse_trace_table(stream, do_filter = True):
    trace = []
    section = [0]
    # skip header
    line = stream.readline()
    while line.find("DstMemType") == -1:
        line = stream.readline()
    line = stream.readline()
    # parse table
    while line != '\n':
        parts = line.split()
        entry = {}
        entry['start'] = parse_data(parts[0])
        entry['duration'] = parse_data(parts[1])
        if parts[2] == '-': # memory api
            entry['fn'] = name_filter.filt(" ".join(parts[15:])) if do_filter else " ".join(parts[15:])
            entry['size'] = parse_data(parts[7])
            entry['throughput'] = parse_data(parts[8])
        else: # kernel launch
            entry['fn'] = name_filter.filt(" ".join(parts[19:-1])) if do_filter else " ".join(parts[19:-1])
            entry['block_size'] = {'x':int(parts[2][1:]), 'y':int(parts[3]), 'z':int(parts[4][:-1])}
            entry['grid_size'] = {'x':int(parts[5][1:]), 'y':int(parts[6]), 'z':int(parts[7][:-1])}
        trace.append(entry)
        line = stream.readline()
    
    for i in range(1,len(trace)):
        # FIXME 这个并不严谨，只是因为我保证每个区段之间间隔2s并且每个kernel执行时间不超过1s
        if trace[i]['start'] - trace[i-1]['start'] > 2000:
            section.append(i)
    section.append(len(trace))

    return trace, section


def reduce_keyword_in_index(df, col_pat, idx_key = None, reduction = None):
    idx = []
    col = []
    key_grp = []
    do_all = idx_key is None

    if not do_all:
        key_grp_str = idx_key.split(",")
        for str in key_grp_str:
            key_grp.append(str.split("&"))

    for i in df.index:
        if do_all or any([all([i.lower().find(k.lower())!= -1 for k in kg]) for kg in key_grp]):
            idx.append(i)

    for c in df.columns:
        if re.search(col_pat,c):
            col.append(c)
    # print(f"collected indices: {idx}")
    if reduction is None:
        return df.loc[idx][col]
    else:
        return eval(f"float(df.loc[idx][col].{reduction}())")


def trace_check_pattern_gat(trace):
    spmm = sddmm = False
    for r in trace:
        fn = r.lower()
        # if fn.find("spmm") != -1:
        #     return "forward"
        spmm = spmm or fn.find("spmm") != -1
        sddmm = fn.find("sddmm") != -1
        if sddmm:
            if not spmm:
                return "forward"
            else:
                return "backward"
    else:
        return "other"

def trace_check_pattern_gcn(trace):
    spmm = crite = False
    for r in trace:
        fn = r.lower()
        # if fn.find("spmm") != -1:
        #     return "forward"
        spmm = spmm or fn.find("spmm") != -1
        crite = fn.find("criterion") != -1
        if crite:
            if spmm:
                return "forward"
            else:
                return "backward"
    else:
        return "other"


def trace_section_induction(trace, section):
    patterns = set()
    group = {"forward":[], "backward":[], "other":[]}
    for i in range(len(section)-1):
        sec_trace = trace[section[i]:section[i+1]]
        patterns.add(tuple([x['fn'] for x in sec_trace]))
    
    for p in patterns:
        # FIXME 根据需要进行调整
        group[trace_check_pattern_gcn(p)].append(p)
    return group


class FakeFile(object):
    def __init__(self, strings) -> None:
        self.strings = strings
        self.cnt = 0

    def reset(self):
        self.cnt = 0

    def readline(self):
        ret = self.strings[self.cnt] if self.cnt < len(self.strings) else ""
        self.cnt += 1
        return ret

def parseTimingTable(stream, keys, reduction):
    ret = {}
    df = pd.DataFrame.from_dict(parse_timing_table_nsys(stream, False), 'index')
    for key in keys:
        ret[key] = reduce_keyword_in_index(df, 'time%', key, reduction)
    return ret

if __name__ == '__main__':
    import argparse
    import os.path as osp
    import matplotlib.pyplot as plt
    import numpy as np

    parser = argparse.ArgumentParser()
    parser.add_argument("--name", required=True)
    parser.add_argument("--file", default=None)
    args = parser.parse_args()

    tr = open(osp.join('workspace', "trace_" + args.name + ".txt"), "r")    
    met = open(osp.join('workspace', "profile_" + args.name + ".txt"), "r")
    tim = open(osp.join('workspace', "out_" + args.name + ".txt"), "r")
    
    #  ------------- print kernel time comsumptino proportions ---------------
    ret = pd.DataFrame.from_dict(parse_timing_table_nsys(tim, False), 'index') 
    for key in ["cusparse", "sgemm", "elementwise,elewise", "reduce", "criterion", "softmax", "sort,sweep,agent"]:
        print("collected {} : {}".format(key, reduce_keyword_in_index(ret, "time%", key, "sum")))
    exit(0)

    #  ------------- print time float/int kernel time coonsumption -----------
    # metrics = pd.DataFrame.from_dict(parse_metrics_table(met), 'index')
    # int_metrics = metrics[metrics["flop_count_sp"] == 0]
    # float_metrics = metrics[metrics["flop_count_sp"] > 0]

    # timing = pd.DataFrame.from_dict(parse_timing_table(tim, False), 'index')
    # int_timing = reduce_keyword_in_index(timing.loc[timing.index.intersection(int_metrics.index)], "time%")
    # float_timing = reduce_keyword_in_index(timing.loc[timing.index.intersection(float_metrics.index)], "time%")
        
    # print(f"timing split i:{int_timing} f:{float_timing}")
    # exit(0)

    #  ------------- print intreseted kernel metrics ------------------------
    # metrics = pd.DataFrame.from_dict(parse_metrics_table(met, False), 'index')
    # timing = pd.DataFrame.from_dict(parse_timing_table(tim, False), 'index')
    # major_index = timing[timing['time%'] > 0.1].index

    # diff = timing.index.intersection(metrics.index).difference(timing.index)

    # ret = reduce_keyword_in_index(metrics.loc[metrics.index.intersection(major_index)], "stall", "element&addf&float,spmm,element&mulf&float,sgemm")
    # ret.index = [name_filter.filt(x) for x in ret.index]
    # print(ret)
    # ret.to_excel(f"{args.name}_stall_statistics.xlsx")
    # exit(0)

    # ------------- check forward & backward trace --------------------------
    direction = "forward"
    if not args.file is None:
        tr.close()
        tr = open(args.file)
    t, s = parse_trace_table(tr, False)
    pat = trace_section_induction(t, s)
    fp = list(pat['forward'])
    bp = list(pat['backward'])

    name_trace = [x['fn'] for x in t]

    for i in range(len(s)-1):
        if trace_check_pattern_gcn(name_trace[s[i]:s[i+1]]) == 'other':
            continue
        
        try:
            bidx = -1
            fidx = fp.index(tuple(name_trace[s[i]:s[i+1]]))
        except:
            fidx = -1
            bidx = bp.index(tuple(name_trace[s[i]:s[i+1]]))

        if fidx >= 0:
            print(f"forward {fidx} at line {s[i]}~{s[i+1]}")
        elif bidx >= 0:
            print(f"backward {bidx} at line {s[i]}~{s[i+1]}")
        
        for l in name_trace[s[i]:s[i+1]]:
            # print(name_filter.filt(l))
            print(l)
        print("\n")
    # while a_cursor < len(a) or b_cursor < len(b):
    #     print(f"kernel {a_cursor} vs {b_cursor}:")
    #     print(a[a_cursor] if a_cursor < len(a) else " - ")
    #     print(b[b_cursor] if b_cursor < len(b) else " - ")
    #     print()
    #     if a[a_cursor] != b[b_cursor]:
    #         c = input()
    #         if c == 'a':
    #             a_cursor += 1
    #         if c == 'b':
    #             b_cursor += 1
    #     else:
    #         a_cursor += 1
    #         b_cursor += 1
    # exit(0)
    
    # -------------- draw flop efficiency timeline -------------------------

    # metrics = pd.DataFrame.from_dict(parse_metrics_table(met, False), 'index')
    # trace, sect = parse_trace_table(tr, False)

    # trace_df = pd.DataFrame(trace).join(metrics, on="fn").fillna(0)

    # cnt = {"backward" : 0, "forward" : 0}
    # figures = {"backward" : plt.figure("backward", figsize=[16.4,4.8], tight_layout=True)}
    # plt.xlabel('time: ms')
    # plt.ylabel('floating point efficiency: %')
    # plt.yscale('log')
    # figures["forward"] = plt.figure("forward",figsize=[16.4,4.8],tight_layout=True)
    # plt.xlabel('time: ms')
    # plt.ylabel('floating point efficiency: %')
    # plt.yscale('log')

    # for i in range(len(sect)-1):
    #     pat = trace_check_pattern( [x['fn'] for x in trace[ sect[i]:sect[i+1] ]])
    #     if pat in cnt.keys():
    #         cnt[pat] += 1
    #         plt.figure(pat)
    #         sect_df = trace_df.iloc[sect[i]:sect[i+1]][['start', 'duration', 'fn', 'flop_sp_efficiency', 'gld_throughput']]
    #         base_time = sect_df.iloc[0]['start']
    #         sect_df['start'] -= base_time
    #         sect_df.index = range(len(sect_df))
    #         x = []
    #         y = []
    #         if cnt[pat] == 3:
    #             for i in sect_df.index:
    #                 s = sect_df.loc[i]["start"]
    #                 d = sect_df.loc[i]["duration"]
    #                 eff = sect_df.loc[i]["flop_sp_efficiency"]
    #                 # start time
    #                 x.append(s)
    #                 y.append(0)
    #                 x.append(s)
    #                 y.append(eff)
    #                 # duration
    #                 x.append(s+d)
    #                 y.append(eff)
    #                 if eff > 0.00001:
    #                     if eff < 0.05:
    #                         rot = 60
    #                         h_align = "left"
    #                     else:
    #                         rot = 0
    #                         h_align = 'right'
    #                     plt.text(x=s+d/2, y=eff, s=label_filter.filt(sect_df.loc[i]['fn']), fontsize=10, 
    #                             verticalalignment='bottom', horizontalalignment=h_align, rotation=rot)
    #                 # end
    #                 x.append(s+d)
    #                 y.append(0)

    #         xarr,yarr = np.array(x),np.array(y)
    #         plt.plot(xarr, yarr)

    # figures["forward"].savefig(f"{args.name}_trace_forward.png")
    # figures["backward"].savefig(f"{args.name}_trace_backward.png")
