# from icecream import ic
import json
import os
import sys
import pandas as pd
import requests

from get_sql_res import gen_run_detail, gen_data_cluster
from get_response import gen_wafermap_config_mapping, gen_wafermap_config
from gen_info_dic import gen_info_dic

directory_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(directory_path, 'cfgs/mapping_char.json')
with open(file_path, 'r',encoding='utf-8') as file:
    mapping_char = json.load(file)


def gen_RsRuleSummary(info_dic, df_run_detail):

    rule_map = info_dic['rule_map']
    lst = []
    for rule_alia, rule_key in rule_map.items():
        if rule_key == 'SYL':
            continue
        dic = {}
        rule_one = df_run_detail[df_run_detail['rule_alias']
                                 == rule_alia].iloc[0]
        config = json.loads(rule_one['config'])
        if config['holdLowLimit'] == '0':
            hold_limit = f"[{config['holdLowLimit']}, {config['holdHighLimit']}%]"
        else:
            hold_limit = f"[{config['holdLowLimit']}%, {config['holdHighLimit']}%]"
        param = config['param']
        rule_detail = ''
        if rule_key == 'GDBN':
            rule_detail = (
                f"上限阈值:{param['threshold']}, "
                f"边缘上限阈值:{param['edgeThreshold']}, "
                f"边缘圈数:{param['edgeTurns']}"
            )
        elif rule_key in ['SPAT', 'DPAT']:
            rule_detail = (
                f"DataLevel:{mapping_char[config['param']['dataLevel']]}, "
                f"BySite:{config['param'].get('BySite', 'no')}, "
                f"LimitMethod:{mapping_char[config['param']['limitMethod']]}"
            )
        elif rule_key == 'ZPAT':
            rule_detail = (
                f"MinWaferCount:{param['minWaferCount']}, "
                f"MinWaferYield:{param['minWaferYield']}, "
                f"Limit:{mapping_char[param['limitInfoParamDTO']['limitType']]}({param['limitInfoParamDTO']['fixedValue']})"
            )
        elif rule_key == 'CORRELATION_RULE':
            rule_detail = (
                f"DataLevel:{mapping_char[config['param']['dataLevel']]}, "
                f"LimitMethod:{mapping_char[config['param']['limitMethod']]}"
            )
        elif rule_key == 'CLUSTER':
            rule_detail = (
                f"最少连续点:{param['minContinuousPoint']}, "
                f"最大连续点:{param['maxContinuousPoint']}, "
                f"Ink圈数:{param['inkTurns']}"
            )
        elif rule_key == 'WAFER_EDGE':
            rule_detail = (f"边缘圈数:{config['param']['inkTurns']}")
        elif rule_key == 'CPAT':
            rule_detail = (
                f"DataLevel:{mapping_char[config['param']['dataLevel']]}, "
                f"LimitMethod:{mapping_char[config['param']['limitMethod']]}"
            )
        elif rule_key == 'MergeBin':
            res = ''

            for i in config['param']['needMergedDatasets']:
                res += i['name']+','
            res = res[:-1]

            rule_detail = (
                f"Merge输入:{res}, "
                f"Merge输出:{config['param']['toDataset']['name']}"
            )

        # 构造df的一行数据
        dic['Rule Type'] = rule_one['rule_key']
        dic['Rule Name'] = rule_alia
        dic['Rule Detail'] = rule_detail
        dic['Hold Limit'] = hold_limit
        dic['Stage'] = info_dic['test_stage']
        tmp_df = df_run_detail[df_run_detail['rule_key'] == rule_key]
        input_virtual_id = tmp_df['input_virtual_ids'].iloc[0]
        input_dict = info_dic['input_dict_for_mapping']
        output_dict = info_dic['output_dict_for_mapping']
        if ',' in input_virtual_id:
            dic['Output Data Source'] = mapping_char[output_dict[int(
                input_virtual_id.split(',')[0])]]
            # dic['Test Program'] = ''
            res = ''
            for i in input_virtual_id.split(','):
                res += input_dict[int(i)]+','
            res = res[:-1]
            dic['Input Virtual DataSet'] = res
            dic['Output Virtual DataSet'] = input_dict[int(
                input_virtual_id.split(',')[0])]
        else:
            dic['Output Data Source'] = mapping_char[output_dict[int(input_virtual_id)]]
            # dic['Test Program'] = ''
            dic['Input Virtual DataSet'] = input_dict[int(input_virtual_id)]
            dic['Output Virtual DataSet'] = input_dict[int(input_virtual_id)]
        lst.append(dic)

    res_df = pd.DataFrame(lst)
    res_df.insert(0, 'No', range(1, len(res_df) + 1))
    info_dic['df_info']['RsRuleSummary']={
        'col_num':len(res_df.columns),
        'row_num':len(res_df)+1
    }
    return res_df


if __name__ == '__main__':
    # from icecream import ic

    with open('./cfgs/input.json', 'r') as file:
        config = json.load(file)

    rule_run_record_id = config['recordId']
    sql_info = {}
    sql_info['mysqlInfo'] = config['mysqlInfo']
    sql_info['ckInfo'] = config['ckInfo']
    sql_info['dasHost'] = config['dasHost']
    re_file_path = config['fileFullPath']

    df_run_detail = gen_run_detail(rule_run_record_id, sql_info)
    info_dic = gen_info_dic(df_run_detail, sql_info)

    ##
    res_df=gen_RsRuleSummary(info_dic, df_run_detail)
    # res_df.to_csv('bb.csv')
