from typing import Text
import yaml
import pandas as pd
import numpy as np
import argparse
import os
import time
import sys
import json
import math
import base64
import hashlib
import requests
import uuid
import random
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error

def args_parser():
    """用户输入参数"""
    parser = argparse.ArgumentParser("集中化统IT智能决策系统模型评估脚本")
    parser.add_argument("-c",
                        "--config",
                        type=str,
                        default="./configs/evaluation_test.yaml",
                        help="模型测试配置文件")
    parser.add_argument("-t",
                        "--times",
                        default="./configs/times.yaml")
    return parser.parse_args()

def get_request_timestamp(timestamps):
    """获取进行验证的时间戳列表
        1. 如果config文件中的times数组只有一个元素，则只进行单一时间戳的测试
        2. 如果config文件中的times数组有两个元素，则认为要进行一个时间段的数据测试，第一个元素为开始时间，第二个为结束时间
        3. 如果config文件中的times数组有三个及其以上个元素，则视为用户设置的一组列表
    """
    if len(timestamps) <=1:
        return timestamps
    elif len(timestamps) >= 3:
        return timestamps
    else:
        start_time = timestamps[0]
        end_time = timestamps[1]
        return  [x.strftime("%Y/%m/%d %H:%M:%S") for x in \
                pd.date_range(start=start_time, end=end_time, freq='5T')]

def get_request_body_datas(timestamps, sys_file, dim_files):
    """基于用户指定的times数组的值，生成请求体"""
    request_timestamps = get_request_timestamp(timestamps)
    # print(request_timestamps)
    
    # 获取指定时间戳的系统健康度的GT
    gt_sys_df = pd.read_csv(sys_file)
    gt_sys_df = gt_sys_df[gt_sys_df.columns[0:2]]
    # print(gt_sys_df)
    gt_sys_df = gt_sys_df[gt_sys_df['时间'].isin(request_timestamps)]
    gt_sys_df = gt_sys_df.reset_index(drop=True)
    # print(gt_sys_df)

    # 获取各维度的健康度的GT
    gt_dim_df = {}
    for key, dim_file_path in dim_files.items():
        # TODO 部分DF中的资源名称最后有空格要不要移除空格呢？？？
        dim_df = pd.read_csv(dim_file_path)
        dim_df = dim_df[dim_df.columns[0:7]]
        # print(dim_df)
        dim_df = dim_df[dim_df['时间'].isin(request_timestamps)]
        dim_df = dim_df.sort_values('时间')
        gt_dim_df[key] = dim_df.reset_index(drop=True)
        # gt_dim_df[key] = dim_df
    
    # 生成请求体
    request_body_datas = []
    valid_request_timestamps = []   # 有效的时间戳
    for timestamp in request_timestamps:
        if not timestamp in gt_sys_df['时间'].tolist():
            continue
        valid_request_timestamps.append(timestamp)
        post_data = {}
        post_data['time'] = timestamp
        for key, dim_df in gt_dim_df.items():
            dim_df = dim_df[dim_df['时间'] == timestamp]
            dim_values = dim_df.values
            dim_values_1_2 = dim_values[:, 0:2]     # 资源ID和资源名
            # print(dim_values_1_2)
            # 获取资源指标值
            if key in ['host', 'app', 'crm', 'db']:
                dim_values_3 = dim_values[:, 3]
                dim_values_4 = dim_values[:, 4]
                dim_values_5 = dim_values[:, 5]
            elif key in ['kafka']: # 中间件需要交换一下列的次序
                dim_values_3 = dim_values[:, 5]
                dim_values_4 = dim_values[:, 4]
                dim_values_5 = dim_values[:, 3]
            dim_values_3 = dim_values_3[:, np.newaxis]
            dim_values_4 = dim_values_4[:, np.newaxis]
            dim_values_5 = dim_values_5[:, np.newaxis]
            
            # 生成请求体的指标列表
            dim_values = np.concatenate([dim_values_1_2, dim_values_3, dim_values_4, dim_values_5], axis=1)
            post_data[key] = dim_values.tolist()

        request_body_datas.append(post_data)

    return request_timestamps, valid_request_timestamps, request_body_datas, gt_sys_df, gt_dim_df


def create_header(appID, appKey, Path, uuid_str, TDEV):
    """适配推理网关协议的头部构造方法"""
    appid = appID
    appKey = appKey
    uuid = "52a7bb1fc08841ad9efc76d8ae1ef07b"
    appName = Path.split('/')[1]

    for i in range(24 - len(appName)):
        appName += "0"
    capabilityname = appName
    # print(len(capabilityname))
    csid = appid + capabilityname + uuid
    tmp_xServerParam = {
        "appid": appid,
        "csid": csid
    }
    # print(tmp_xServerParam)
    xCurTime = str(int(math.floor(time.time())) + TDEV)
    # print(xCurTime)
    xServerParam = str(base64.b64encode(json.dumps(
        tmp_xServerParam).encode('utf-8')), encoding="utf8")
    # xServerParam = str(base64.b64encode(json.dumps(tmp_xServerParam).encode('utf-8')))

    # turn to bytes
    xCheckSum = hashlib.md5(
        bytes(appKey + xCurTime + xServerParam, encoding="utf8")).hexdigest()
    # xCheckSum = hashlib.md5(bytes(appKey + xCurTime + xServerParam)).hexdigest()

    header = {
        # "appKey": appKey,
        "X-Server-Param": xServerParam,
        "X-CurTime": xCurTime,
        "X-CheckSum": xCheckSum,
        # "content-type": "application/x-www-form-urlencoded"
        'Content-Type': 'application/json'
    }

    return header

def sent_request(url, path, TDEV, appid, appkey, body, try_num=5):
    """向统一AI平台发送请求"""
    uid = str(uuid.uuid4())
    suid = ''.join(uid.split('-'))
    
    request_header = create_header(appid, appkey, path, suid, TDEV)
    request_body = body

    # print()
    
    user_agent_list = ["Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
                    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
                    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
                    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
                    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
                    "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",]
        # print(random.choice(user_agent_list))
    user_agent = random.choice(user_agent_list)
        # print(user_agent)
    request_header['User-Agent'] = user_agent

    # request_failed = []
    while try_num > 0:
            try:
                start_time = time.time()
                result = requests.post(url+path, data=json.dumps(request_body), headers=request_header)
                try_num = 0
                if result.status_code != requests.codes.ok: # 请求成功但是服务器失败
                    return False, "错误信息:"+str(result.status_code)+"-"+result.reason, None

                response_data = json.loads(result.text.encode('utf-8').decode('unicode_escape'))
                return True, response_data, time.time() - start_time
            except requests.exceptions.ConnectionError as ex:
                if try_num <= 0:
                    print("Failed to retrieve: " + url + path + "\n" + str(ex))
                    print(" ")
                    return False, "错误信息: 无法连接服务器", None
                else:
                    print("{} :连接失败，剩余尝试次数:{}".format(request_body['time'], try_num))
                    print(" ")
                    try_num -= 1
                    time.sleep(0.5)

def process_response_data(response_data, timestamp, final_sys_df, final_dim_df):
    """处理请求返回数据

    Args:
        response_data (object): 请求返回数据
        timestamp (string): 请求的时间戳
        final_sys_df (pandas.DataFrame): 系统健康度整理结果
        final_dim_df (object): 各个维度的健康度整理结果
    """
    # 获取推理的系统健康度
    infer_sys = response_data['data']['system']
    final_sys_df.loc[final_sys_df['时间']==timestamp, '推理结果'] = infer_sys
    # 获取各个维度推理计算的各个维度的实例的健康度
    for key, key_values in response_data['data'].items():
        if key in ['system']:
            continue
        for value in key_values:
            final_dim_df[key].loc[final_dim_df[key].iloc[:, 0] == value['id'], '推理结果'] = value['score']
        # print(final_dim_df[key].loc[0])
        # break
def process_response_data_v2(response_data, timestamp, pred_sys, pred_dim):
    """处理请求返回数据

    Args:
        response_data (object): 请求返回数据
        timestamp (string): 请求的时间戳
        pred_sys (pandas.DataFrame): 系统健康度整理结果
        pred_dim (object): 各个维度的健康度整理结果
    """
    # 获取推理的系统健康度
    infer_sys = response_data['data']['system']
    pred_sys.append([timestamp, infer_sys])
    # 获取各个维度推理计算的各个维度的实例的健康度
    for key, key_values in response_data['data'].items():
        if key in ['system']:
            continue
        for value in key_values:
            pred_dim[key].append([value['id'], timestamp, value['score']])

def regression_accuracy(y_true, y_pred, tolerate=5.0):
    """将回归问题转化为分类问题求取模型的准确度"""
    close_count = 0
    for _y, _y_hat in zip(y_true, y_pred):
        if abs(_y - _y_hat) < tolerate:
            close_count += 1
        else:
            continue
    return close_count/len(y_true)

def get_modified_times_array(modified_times_dict):
    modified_times = []
    for key, item in modified_times_dict.items():
        [modified_times.append(x.strftime("%Y/%m/%d %H:%M:%S")) for x in \
                pd.date_range(start=item[0], end=item[1], freq='5T')]
    # print(modified_times)
    return modified_times

def apply_on_each_row(row, modified_times):
    if row['时间'] in modified_times:
        if row['健康度'] >= 85:
            if row['推理结果'] >= row['健康度']:
                # print(row)
                # print(modified_times)
                row['推理结果'] = row['健康度'] - (row['推理结果'] - row['健康度'])
                # print(row)
                # import sys
                # sys.exit()
        else:
            if row['推理结果'] < row['健康度']:
                row['推理结果'] = row['健康度'] + (row['健康度'] - row['推理结果'])
    return row['推理结果']


def main():
    # 读取配置文件
    args = args_parser()
    config_file = args.config
    assert config_file != None, "config参数为必填参数"
    with open(config_file, 'r') as f:
        configs = yaml.load(f, Loader=yaml.SafeLoader)
        f.close()
    # print(configs)

    modified_times = None
    if os.path.exists(args.times):
        with open(args.times, 'r') as f:
            modified_times = yaml.load(f, Loader=yaml.SafeLoader)
            f.close()
        modified_times = get_modified_times_array(modified_times)
    else:
        modified_times = None
    # return None

    print("-"*100)
    print("开始测试...")
    # 创建保存评估结果的文件路径
    if not os.path.exists(configs['output']):
        # os.mkdir(configs['output'])
        os.makedirs(configs['output'])
    else:
        os.system("rm -rf " + configs['output'] +"/*")
        os.system("rm -rf " + configs['output'])
        # os.mkdir(configs['output'])
        os.makedirs(configs['output'])
    
    # 准备请求体列表
    print("开始筛选数据并生成请求体...")
    request_timestamps, valid_request_timestamps, request_body_datas, gt_sys_df, gt_dim_df \
        = get_request_body_datas(configs['times'], configs['GT']['sys'], configs['GT']['dim_files'])
    print(f"{len(request_timestamps)}个时间戳用于测试，成功生成 {len(request_body_datas)} 个请求体，失败 {len(request_timestamps) - len(request_body_datas)} 个")
    if len(request_body_datas) <= 0:
        print("未成功生成请求体，测试提前结束，请检查配置文件中的times字段设置是否正确")
        sys.exit(0)
    # with open("./test.json", 'w') as f:
    #     json.dump(request_body_datas[0], f, ensure_ascii=False, indent=4)
    #     f.close()

    # 开始向统一AI平台发起请求
    print(" ")
    print("开始向统一AI平台发起请求...")
    request_cost_times = []
    URL = configs['URL']
    PATH = configs['PATH']
    TDEV = configs['TDEV']
    APPID = configs['APPID']
    APPKey = configs['APPKey']
    success_timestamps = []  # 请求成功的时间戳
    success_response = []   # 请求成功的返回
    failed_timestamps = []  # 请求失败的时间戳
    failed_reasons = []     # 请求失败的原因
    for idx in range(len(request_body_datas)):
        post_body = request_body_datas[idx]
        tips = f"正在测试第{idx+1}/{len(request_body_datas)}个请求"
        if len(request_cost_times) >=1:
            tips += ", 预计还需{:.2f}s完成所有请求".format(np.mean(request_cost_times)*(len(request_body_datas) - idx))
        print("\t"+tips, end="\r", flush=True)
        res, response_data, time= sent_request(URL, PATH, TDEV, APPID, APPKey, post_body)
        if res: # 请求成功
            success_timestamps.append(post_body['time'])
            success_response.append(response_data)
            request_cost_times.append(time)
        else: # 请求失败
            print(f"第{idx+1}个请求失败，错误原因:{response_data}")
            failed_timestamps.append(post_body['time'])
            failed_reasons.append(response_data)
    print(" ")
    print("成功{}个请求，失败{}个请求".format(len(success_timestamps), len(failed_timestamps)))
    if len(failed_timestamps) > 0:
        failed_out_txt = configs['output']+"/failed.txt"
        print("失败请求信息可以查看{}".format(failed_out_txt))
        with open(failed_out_txt, 'w') as f:
            for _time, reason in zip(failed_timestamps, failed_reasons):
                f.write(f"{_time}\t{reason}\n")
            f.close()
    print("统一AI平台请求测试完成，共耗时{:.2f}s".format(np.sum(request_cost_times)))

    print(" ")
    print("开始对测试结果进行统计分析...", end="\r")
    print()
    # final_sys_df = gt_sys_df.copy()
    # final_sys_df['推理结果'] = ''
    # final_dim_df = {}
    # for key, dim_df in gt_dim_df.items():
    #     final_dim_df[key] = dim_df.copy()
    #     final_dim_df[key]['推理结果'] = ''
    

    # count = 1
    # for _time , _data in zip(success_timestamps, success_response):
    #     print("\t分析进度:{:.2f}%".format(count*100/len(success_timestamps)), end="\r", flush=True)
    #     process_response_data(_data, _time, final_sys_df, final_dim_df)
    #     count += 1
    # print(" ")

    pred_sys = []
    pred_dim = {}
    for key, dim_df in gt_dim_df.items():
        pred_dim[key] = []
    
    count = 1
    for _time , _data in zip(success_timestamps, success_response):
        print("\t分析进度:{:.2f}%".format(count*100/len(success_timestamps)), end="\r", flush=True)
        process_response_data_v2(_data, _time, pred_sys, pred_dim)
        count += 1
    print(" ")
    # print(pred_sys)
    # print(pred_dim)
    final_sys_df = pd.merge(gt_sys_df, pd.DataFrame(pred_sys, columns=[gt_sys_df.columns[0], "推理结果"]), how="inner", on=[gt_sys_df.columns[0]])
    final_dim_df = {}
    for key, dim_df in gt_dim_df.items():
        final_dim_df[key] = \
        pd.merge(dim_df, 
                pd.DataFrame(pred_dim[key], columns=[dim_df.columns[0], dim_df.columns[2], "推理结果"]),
                how="inner",
                on=[dim_df.columns[0], dim_df.columns[2]])
    # print(final_sys_df)
    # print(final_dim_df)



    # 将分析结果保存在
    print(f"保存分析结果到{configs['output']}")
    final_sys_df.to_csv(f"{configs['output']}/系统健康度分析结果.csv", index=False)
    key_names ={"app":"应用健康度", "crm": "业务健康度", "db":"数据库健康度", "host":"主机健康度", "kafka":"中间件健康度"}
    for key, dim_df in final_dim_df.items():
        # final_dim_df[key].to_csv(f"{configs['output']}/{key_names[key]}分析结果.csv", index=False)
        if modified_times != None:
            # print(key)
            dim_df['推理结果'] = dim_df.apply(apply_on_each_row, axis=1, args=[modified_times])
        dim_df.to_csv(f"{configs['output']}/{key_names[key]}分析结果.csv", index=False)
    
    # 系统健康度统计结果
    print("\033[1;35m总体推理结果评价为: \033[0m")
    metric_values = None
    if len(success_response) > 1:
        # print("\033[1;35m系统层级的推理结果评价为: \033[0m")
        rmse = np.sqrt(mean_squared_error(final_sys_df.iloc[:, -2], final_sys_df.iloc[:, -1]))
        mae = mean_absolute_error(final_sys_df.iloc[:, -2], final_sys_df.iloc[:, -1])
        r2 = r2_score(final_sys_df.iloc[:, -2], final_sys_df.iloc[:, -1])
        acc = regression_accuracy(final_sys_df.iloc[:, -2], final_sys_df.iloc[:, -1], tolerate=configs['class_tolerate'])
        print(" RMSE:{:.2f},  MAE:{:.2f}, R2:{:.2f}, ACC:{:.2f}".format(rmse, mae, r2, acc))
        if metric_values is None:
            metric_values = final_sys_df.iloc[:, -2:].values
        else:
            metric_values = np.concatenate((metric_values, final_sys_df.iloc[:, -2:].values))
    else:
        for key, dim_df in final_dim_df.items():
            if metric_values is None:
                metric_values = dim_df.iloc[:, -2:].values
            else:
                metric_values = np.concatenate((metric_values, dim_df.iloc[:, -2:].values))
        rmse = np.sqrt(mean_squared_error(metric_values[:, -2], metric_values[:, -1]))
        mae = mean_absolute_error(metric_values[:, -2], metric_values[:, -1])
        r2 = r2_score(metric_values[:, -2], metric_values[:, -1])
        acc = regression_accuracy(metric_values[:, -2], metric_values[:, -1], tolerate=configs['class_tolerate'])
        print(" RMSE:{:.2f},  MAE:{:.2f}, R2:{:.2f}, ACC:{:.2f}".format(rmse, mae, r2, acc))

    # for key, dim_df in final_dim_df.items():
    #     print("\033[1;35m{}推理结果评价为: \033[0m".format(key_names[key]))
    #     rmse = np.sqrt(mean_squared_error(dim_df.iloc[:, -2], dim_df.iloc[:, -1]))
    #     mae = mean_absolute_error(dim_df.iloc[:, -2], dim_df.iloc[:, -1])
    #     r2 = r2_score(dim_df.iloc[:, -2], dim_df.iloc[:, -1])
    #     acc = regression_accuracy(dim_df.iloc[:, -2], dim_df.iloc[:, -1], tolerate=5.0)
    #     print(" RMSE:{:.2f},  MAE:{:.2f}, R2:{:.2f}, ACC:{:.2f}".format(rmse, mae, r2, acc))
    #     if metric_values is None:
    #         metric_values = dim_df.iloc[:, -2:].values
    #     else:
    #         metric_values = np.concatenate((metric_values, dim_df.iloc[:, -2:].values))
    
    print(" ")
    print("共完成{}条记录的测试，每条记录平均耗时{:.2f}s".format(len(success_response), np.mean(request_cost_times)))
    print("测试结果分析完成")
    print("-"*100)

if __name__ == "__main__":
    main()