import os
import json
import argparse

try:
    import numpy as np
except:
    os.system("pip install --upgrade " + "numpy")

try:
    import pandas as pd
except:
    os.system("pip install --upgrade " + "pandas")

try:
    import yaml
except:
    os.system("pip install --upgrade " + "pyyaml")

def parse_args():
    parser = argparse.ArgumentParser("集中化IT智能决策平台统一AI平台测试接口POST数据生成脚本")

    # 配置文件
    parser.add_argument('-c',
                        '--config',
                        type=str,
                        default='./configs/config.yaml',
                        help='配置文件的路径')

    # # 提交数据的时间戳
    # parser.add_argument('-t',
    #                     '--time',
    #                     type=str,
    #                     default='2021/03/01 08:10:00',
    #                     help='测试数据的时间戳，格式为:年/月/日 时/分/秒; 如2021/03/01 08:10:00')
    
    # # 数据库维度的数据
    # parser.add_argument('--db',
    #                     type=str,
    #                     default='./data/1-数据库健康度数据.csv',
    #                     help='包含数据库维度的所有资源指标的csv文件路径')
    
    # # 业务维度的数据
    # parser.add_argument('--crm',
    #                     type=str,
    #                     default='./data/2-业务健康度数据.csv',
    #                     help='包含业务维度的所有资源指标的csv文件路径')
    
    # # 应用维度的数据
    # parser.add_argument('--app',
    #                     type=str,
    #                     default='./data/3-应用健康度数据.csv',
    #                     help='包含应用维度的所有资源指标的csv文件路径')
    
    # # 中间件维度的数据
    # parser.add_argument('--kafka',
    #                     type=str,
    #                     default='./data/4-中间件健康度数据.csv',
    #                     help='包含中间件维度的所有资源指标的csv文件路径')
    
    # # 主机维度的数据
    # parser.add_argument('--host',
    #                     type=str,
    #                     default='./data/5-主机健康度数据.csv',
    #                     help='包含主机维度的所有资源指标的csv文件路径')

    # # 生成的post data和curl.sh的保存文件
    # parser.add_argument('-o',
    #                     '--output',
    #                     type=str,
    #                     default='./post_data',
    #                     help='生成的post body的json文件和curl.sh文件的保存目录')

    args = parser.parse_args()
    return args

def genetate_curl_sh(out_path, post_data, api):
    shell_script = 'curl --location --request POST' + ' '
    shell_script += "\'" + api + "\'"
    shell_script += " " + "\\" +"\n"
    shell_script += "--header \'Content-Type: application/json\'" + " " + "\\" + "\n"
    shell_script += '--data-raw' + ' '
    shell_script += "\'" + json.dumps(post_data, indent=4) + "\'"

    with open(out_path, 'w') as f:
        f.write(shell_script)
    f.close()

def process(timestamp, config):
    

    #获取生成文件的保存目录
    out_dir = config['output']
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    post_data = {}
    post_data['time'] = timestamp

    # 获取请求结果的对比数据
    response_data = {}
    for key, dim_file_path in config['dim_files'].items():
        
        dim_df = pd.read_csv(dim_file_path)
        dim_df = dim_df[dim_df['时间'] == timestamp]
        dim_values = dim_df.values

        # 每个维度的response_data
        dim_response_data_id = dim_values[:, 0]
        dim_response_data_health = dim_values[:, 6]
        dim_response_data_id = dim_response_data_id[:, np.newaxis]
        dim_response_data_health = dim_response_data_health[:, np.newaxis]
        dim_response_data_array = np.concatenate([dim_response_data_id, dim_response_data_health], axis=1)
        dim_response_data = []
        for item in dim_response_data_array:
            item_dict = {}
            item_dict['id'] = item[0]
            item_dict['score'] = item[1]
            dim_response_data.append(item_dict)
        response_data[key] = dim_response_data

        dim_values_1_2 = dim_values[:, 0:2]
        # print(dim_values_1)
        if key in ['host', 'app', 'crm', 'db']:
            dim_values_3 = dim_values[:, 3]
            dim_values_4 = dim_values[:, 4]
            dim_values_5 = dim_values[:, 5]
        if key in ['kafka']: # 需要交换一下列的次序
            dim_values_3 = dim_values[:, 5]
            dim_values_4 = dim_values[:, 4]
            dim_values_5 = dim_values[:, 3]
        
        dim_values_3 = dim_values_3[:, np.newaxis]
        dim_values_4 = dim_values_4[:, np.newaxis]
        dim_values_5 = dim_values_5[:, np.newaxis]
        # print(dim_values_1_2.shape, dim_values_3.shape, dim_values_4.shape, dim_values_5.shape)
        dim_values = np.concatenate([dim_values_1_2, dim_values_3, dim_values_4, dim_values_5], axis=1)

        post_data[key] = dim_values.tolist()

    # timestamp由于有'\'需要替换一下
    timestamp_str = timestamp.replace("/", "-")
    timestamp_str = timestamp_str.replace(" ", "-")
    timestamp_folder = os.path.join(out_dir, timestamp_str)
    if not os.path.exists(timestamp_folder):
        os.makedirs(timestamp_folder)
    
    post_body_data_file_path = os.path.join(timestamp_folder, "post_body_data_" + timestamp_str + ".json")
    with open(post_body_data_file_path, 'w', encoding='utf8') as f:
        # json_str = json.dump(post_data, ensure_ascii=False, indent=4).encode('utf8')
        json.dump(post_data, f, ensure_ascii=False, indent=4)
    f.close()

    # 生成curl脚本
    curl_file_path = os.path.join(timestamp_folder, "curl_" + timestamp_str + ".sh")
    genetate_curl_sh(curl_file_path, post_data, config['api'])

    # 读取系统总体健康度的文件
    system_health_df = pd.read_csv(config['sys'])
    system_health_df.set_index("时间", inplace=True)
    sys_health = system_health_df.loc[timestamp, '系统健康度']
    response_data['system'] = sys_health
    # 对key进行重新排序
    key_list = ['app', 'crm', 'db', 'host', 'kafka', 'system']
    temp_dict = response_data
    response_data = {}
    response_data['code'] = 1
    response_data['data'] = {}
    for key in key_list:
        response_data['data'][key] = temp_dict[key]
    response_data['msg'] = 'success'
    response_data_file_path = os.path.join(timestamp_folder, "response_data_" + timestamp_str + ".json")
    with open(response_data_file_path, 'w', encoding='utf8') as f:
        json.dump(response_data, f, ensure_ascii=False, indent=4)
    f.close()

def main():
    # 定义参数
    args = parse_args()
    with open(args.config, 'r') as fc:
        config = yaml.load(fc, Loader=yaml.FullLoader)
    fc.close()
    for timestamp in config['times']:
        process(timestamp, config)

if __name__ == "__main__":
    main()