import os
import argparse
import csv
import subprocess
from itertools import product

benchamx = "../bin/benchamx.out"
params = "../include/params.h"

# {name: event}
pmc_dict = {"l1d_miss": "mem_load_retired.l1_miss", 
            "l1d_hit": "mem_load_retired.l1_hit",
            "l2_miss": "mem_load_retired.l2_miss", 
            "l2_hit": "mem_load_retired.l2_hit",
            "l3_miss": "mem_load_retired.l3_miss", 
            "l3_hit": "mem_load_retired.l3_hit",   
            "l2_request.all": "L2_REQUEST.ALL", 
            "l1d.replacement": "L1D.REPLACEMENT", 
            "all_loads": "MEM_INST_RETIRED.ALL_LOADS", 
            "exe.amx_busy": "EXE.AMX_BUSY", 
            "cycles": "cycles"}

metrics = ["MAC_Utilization"]

# [M,N,K]
# shapes = [[32,1024,1024], 
#           [64,1024,1024], 
#           [128,128,4096], 
#           [256,256,2048], 
#           [512,512,1024]]
shapes = [[128,128,4096]]

# design space
design_space = {"pft_dist": [0,1,2,4,8], 
                "m_cache": [2], 
                "k_cache": [4], 
                "n_cache": [2]}

# design_space = {"pft_dist": [0], 
#                 "m_cache": [2], 
#                 "k_cache": [8], 
#                 "n_cache": [2]}


class PerfEvent():
    """
    PerfEvent is a general class to describe a perf event that is specified by 
    a string name, or complex event specified by ucode and umask.
    """
    def __init__(self, name, event):
        self.name = name
        self.event = event
        self.values = {}  # {niters, value}

    @property
    def code(self):
        """
        Encoded string format for perf event, either in the format of string or dict.
        """
        # [TODO: wxc/06/04/2025] handle the complex dict type event ({"ucode": ucode, "umask": umask})
        return self.event
    
    def set_value(self, niters, value):
        self.values[niters] = value

    def get_value(self, niters):
        return self.values[niters]
    
    def print_event(self):
        print("{}: {}".format(self.name, self.code))

class PerfAnalyzer():
    def __init__(self):
        self.metrics_of_shapes = {}
        self.perf_events_of_shapes = {}

        for shape in shapes:
            shape_key = "["+",".join(str(x) for x in shape)+"]"

            perf_events = {}
            for name, event in pmc_dict.items():
                perf_events[name] = PerfEvent(name, event)

            self.perf_events_of_shapes[shape_key] = perf_events

            self.metrics_of_shapes[shape_key] = dict.fromkeys(metrics)

    def _run_perf(self, shape, bench, loop_order, warmups, iterations, core, perf_events, metrics_of_shape):
        events = []
        for _, event in perf_events.items():
            events.append(event.code)
        events = ",".join(events)

        args = ["perf", "stat", 
                "-e", events, 
                "-C", core, 
                "taskset", "-c", core, 
                benchamx, bench, warmups, iterations, 
                shape[0], shape[1], shape[2], 
                loop_order]
        args = [str(x) for x in args]
        
        print(args)

        result = subprocess.run(args, 
                                stdout = subprocess.PIPE, 
                                stderr = subprocess.PIPE, 
                                text = True)
        
        # benchamx output
        for line in result.stdout.splitlines():
            # print(line)
            for metric in metrics:
                if metric in line:
                    metrics_of_shape[metric] = line.split()[1]
        
        # perf output
        for line in result.stderr.splitlines():
            # print(line)
            if ((line != "") and ("Performance" not in line) and ("seconds" not in line)):
                value = float((line.split()[0]).replace(',', ''))
                event = line.split()[1]

                for _, e in perf_events.items():
                    if event == e.code:
                            e.set_value(iterations, value)

    def _exclude_warmup(self, perf_events, iterations):
        for _, e in perf_events.items():
            e.set_value(iterations, e.get_value(iterations) - e.get_value(0))
    
    def _calc_miss_rate(self, miss_name, hit_name, perf_events, iterations):
        miss = perf_events[miss_name].get_value(iterations)
        hit = perf_events[hit_name].get_value(iterations)

        return miss/(miss+hit)
    
    def _parse_perf_events(self, output, iterations):
        with open(output, "w") as f:
            writer = csv.writer(f)

            header = ["[M,N,K]"]+list(pmc_dict.keys())
            header = header + ["l1d_miss_rate", "l2_miss_rate", "L3_miss_rate"]
            header = header + metrics
            writer.writerow(header)

            for shape in shapes:
                shape_key = "["+",".join(str(x) for x in shape)+"]"
                perf_events = self.perf_events_of_shapes[shape_key]
                metrics_of_shape = self.metrics_of_shapes[shape_key]

                row = [shape_key]
                for _, event in perf_events.items():
                    row.append(event.get_value(iterations))

                l1d_miss_rate = self._calc_miss_rate("l1d_miss", "l1d_hit", perf_events, iterations)
                row.append(l1d_miss_rate)
                l2_miss_rate = self._calc_miss_rate("l2_miss", "l2_hit", perf_events, iterations)
                row.append(l2_miss_rate)
                l3_miss_rate = self._calc_miss_rate("l3_miss", "l3_hit", perf_events, iterations)
                row.append(l3_miss_rate)

                row = row + list(metrics_of_shape.values())

                writer.writerow(row)
    
    def _run_shapes(self, bench, loop_order, exclude, warmups, iterations, core, output):
        for shape in shapes:
            shape_key = "["+",".join(str(x) for x in shape)+"]"
            print(shape_key)
            perf_events = self.perf_events_of_shapes[shape_key]
            metrics_of_shape = self.metrics_of_shapes[shape_key]
            
            if True == exclude:
                self._run_perf(shape, bench, loop_order, warmups, 0, core, perf_events, metrics_of_shape)
                self._run_perf(shape, bench, loop_order, warmups, iterations, core, perf_events, metrics_of_shape)
                self._exclude_warmup(perf_events, iterations)
            else:
                self._run_perf(shape, bench, loop_order, warmups, iterations, core, perf_events, metrics_of_shape)
        
        self._parse_perf_events(output, iterations)
    
    def _gen_params(self, design_point):
        with open(params, 'w') as f:
            f.write("// Generated C header file containing matrix multiplication parameters.\n")
            f.write("// Do not edit manually.\n")

            for define, value in design_point.items():
                if define == "m_cache":
                    value = "(TILE_M*{})".format(value)
                if define == "k_cache":
                    value = "(TILE_K*{})".format(value)
                if define == "n_cache":
                    value = "(TILE_N*{})".format(value)

                f.write(f"#define {define.upper()} {value}\n")

    def _design_point_name(self, design_point):
        name = ""

        for i, (define, value) in enumerate(design_point.items()):
            if i != len(design_point) - 1:
                name += "{}_{}_".format(define, str(value))
            else:
                name += "{}_{}".format(define, str(value))

        return name

    def _rename_params(self, design_point):
        os.system("cp {} ../include/{}".format(params, "params_"+self._design_point_name(design_point)+".h"))

    def run(self, bench, loop_order, exclude, warmups, iterations, core):
        for values in product(*list(design_space.values())):
            design_point = dict(zip(design_space.keys(), list(values)))
            self._gen_params(design_point)

            # build benchamx
            os.system("cd ../ && make clean && make build")

            # run benchamx
            self._run_shapes(bench, loop_order, exclude, warmups, iterations, core, self._design_point_name(design_point)+".csv")
            self._rename_params(design_point)
            

if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    parser.add_argument("-b", 
                        "--bench", 
                        type=str,
                        default="l1",
                        help="benchmark name")
    
    parser.add_argument("-w", 
                        "--warmups", 
                        type=int,
                        default="100",
                        help="number of warmups")
    
    parser.add_argument("-i", 
                        "--iterations", 
                        type=int,
                        default="1000",
                        help="number of iterations")
    
    parser.add_argument("-e", 
                        "--exclude", 
                        action='store_true',
                        help="excluding warmup statistics or not")
    
    parser.add_argument("-c", 
                        "--core", 
                        type=int,
                        default="0",
                        help="core on which to run")
    
    parser.add_argument("-l", 
                        "--loop_order", 
                        type=str,
                        default="ijk",
                        help="loop order")
    
    args = parser.parse_args()

    perf_analyzer = PerfAnalyzer()
    perf_analyzer.run(args.bench, 
                      args.loop_order,
                      args.exclude, 
                      args.warmups, 
                      args.iterations, 
                      args.core)