# -*- coding: utf-8 -*-
import os
import re
import argparse
from utils import load_json_file, load_csv_file, dump_csv_file


class DataTransformer():
    """
    实现trt profiling内容到NPU Profiling内容转换
    """
    def __init__(self, ratio=1, match_method='norm'):
        self.out_key_list = ['TRT Op Name', 'TRT op time(us)', 'TRT normalized op time(us)']
        # normalized by ratio
        self.ratio_keys = {'TRT normalized op time(us)'}
        self.convert_keys = {'TRT op time(us)', 'TRT normalized op time(us)'}
        self.trt_key_list = ['name', 'averageMs', 'averageMs']
        self.ratio = ratio
        self.match_methods = {
            'norm': self._norm_match
        }

        assert len(self.out_key_list) == len(self.trt_key_list)
        if match_method not in self.match_methods:
            raise NotImplementedError('Not supported method: {} in {}'.format(
                match_method, self.match_methods.keys()
            ))
        self.match_keys = [['Op Name', 'TRT Op Name']]
        self._match = self.match_methods.get(match_method)

    def norm_match(self, master_data, match_data_list):
        """
        @des        对master_data和match_data_list按照节点名字进行模糊匹配
                    基本匹配策略：
                    1. 对master_data/match_data_list的节点进行分割，
                       如match_data_list的子节点完全包含于master_data的子节点则认为匹配成功，分割策略如下：
                         1.1 match_data_list node分割策略：按照'PWN|[()]'正则分割，去除分隔符
                         1.2 master_data node分割策略：按照大写字母分割
                    2. 多个匹配情况取匹配度最高的
                    3. 匹配度相同取最前面的
        @param      master_data: {name1: info1,...}
                    match_data_list: [{name1: info1...},...]
        @return     return type
        """
        matched_data = []
        for match_data in match_data_list:
            matched = True
            for master_key, match_key in self.match_keys:
                # split master node names
                master_contents = re.findall('[A-Z][^A-Z]*', master_data[master_key])

                # split match node name
                match_contents = re.sub("PWN|[()]", "", match_data[match_key])
                match_contents = match_contents.replace(' + ', '')
                match_contents = match_contents.replace(',', '')
                match_contents = match_contents.split(' ')

                # start match
                cur_matched = False
                for match_content in match_contents:
                    for m_content in master_contents:
                        if m_content in match_content:
                            cur_matched = True
                            break
                    if cur_matched:
                        break
                if not cur_matched:
                    matched = False
                    break

            if matched:
                matched_data.append(match_data)

        def cmp(data):
            # sorted by length of main key
            return len(data[self.match_keys[0][1]])

        if not matched_data:
            return None

        matched_data = sorted(matched_data, key=cmp)
        # get the first one
        return matched_data[0]

    def convert_to_csv(self, input_data_list):
        out_data_list = []
        for input_data in input_data_list:
            out_data = {}
            # transfer to npu by key map
            for idx in range(len(self.out_key_list)):
                trt_data = input_data[self.trt_key_list[idx]]
                out_key = self.out_key_list[idx]
                # convert ms to us
                if out_key in self.convert_keys:
                    trt_data *= 1000
                # normalize the specific value
                if out_key in self.ratio_keys:
                    trt_data *= self.ratio
                out_data[out_key] = trt_data
            out_data_list.append(out_data)
        return out_data_list

    def match_data(self, master_list, match_list):
        """
        @des        match data by node name
        @param      master_list: master list for match
                    match_list: data list for matched
                    key_pairs: [[main_key, sub_key],...]
        @return     output list for matched data, get default dict if not matched
        """
        # init default dic
        default_data = match_list[0].copy()
        for key in default_data:
            default_data[key] = None

        out_list = []
        for master_data in master_list:
            match_data = self._match(master_data, match_list)
            if match_data is not None:
                out_list.append(match_data)
            else:
                out_list.append(default_data)
        return out_list

    @staticmethod
    def filter(input_data, key_list):
        return [[data[key] for key in key_list] for data in input_data]


def parser_func():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input_csv_file', required=True,
                        help="input summmary csv file for om model profiling")
    parser.add_argument('-j', '--input_json_file', required=True,
                        help="input profiling json file for gpu model")
    parser.add_argument('-o', '--output_csv_file', required=True,
                        help="output compared result csv file")
    parser.add_argument('-r', '--ratio', type=float, default=1.0,
                        help="GPU/NPU算子耗时比例，如T4/310单卡=4")
    parser.add_argument('-s', '--save_trt_csv', action='store_true',
                        help="flag for save csv file for trt profiling file")
    args = parser.parse_args()
    return args


def merge_list_dicts(input_data_list):
    """
    合并list中的dict数据:
    input_data_list: [[dic1_1, dic1_2, dic1_3,...],[dic2_1, dic2_2, dic2_3,...],...]
    return: [dic1_1+dic2_1+..., dic1_2+dic2_2+....,...]
    """
    num_dic = len(input_data_list[0])
    assert sum([len(data) != num_dic for data in input_data_list]) == 0, "Num of dic should be same for all input data!"

    out_data_list = []
    for idx_dic in range(num_dic):
        out_dic = input_data_list[0][idx_dic].copy()
        for idx_list in range(1, len(input_data_list)):
            cur_dic = input_data_list[idx_list][idx_dic]
            out_dic.update(cur_dic)
        out_data_list.append(out_dic)
    return out_data_list


def compare_profiling():
    # get input data
    trt_data = load_json_file(args.input_json_file)[1:]
    trt_data = transformer.convert_to_csv(trt_data)
    npu_data = load_csv_file(args.input_csv_file)

    if args.save_trt_csv:
        json_name = os.path.basename(args.input_json_file)
        json_name = os.path.splitext(json_name)[0]
        output_path = os.path.join(
            os.path.dirname(args.output_csv_file), json_name + '_ori.csv'
        )
        dump_csv_file(trt_data, output_path)

    # match trt data with npu data
    matched_data = transformer.match_data(npu_data, trt_data)
    # merge npu data and trt data
    out_data = merge_list_dicts([npu_data, matched_data])
    dump_csv_file(out_data, args.output_csv_file)


if __name__ == '__main__':
    args = parser_func()
    transformer = DataTransformer(ratio=args.ratio)
    compare_profiling()
