# -*- encoding=utf-8 -*-
"""
# **********************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# [oecp] is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
#     http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
# Author:
# Create: 2021-10-15
# Description: similarity
# **********************************************************************************
"""

import logging
from oecp.result.constants import *
from oecp.proxy.rpm_proxy import RPMProxy
from oecp.result.test_result import *
from oecp.result.json_result import *

logger = logging.getLogger("oecp")

PKG_NAME = {
        '1': "same",
        '1.1': "same",
        '2': "same",
        '3': "diff",
        '4': "less",
        '5': "more"
    }
MARK_PKG = {'gcc', 'glibc', 'kernel'}

def get_similarity(rows, side_a, side_b):
    similarity = {}
    count = {}
    count[CMP_TYPE_RPM_LEVEL], similarity['remark'] = rpm_count(rows, side_a, side_b)
    count[CMP_TYPE_RPMS_TEST] = rpm_test_count(rows.get(CMP_TYPE_RPM))

    for node, results in rows.items():
        if not isinstance(results, list):
            for rpm_type, rpm_results in results.items():
                if rpm_type in SIMILARITY_TYPES:
                    for result in rpm_results:
                        if rpm_type == CMP_TYPE_RPM_JABI:
                            count.setdefault(CMP_TYPE_RPM_JABI, [])
                            score = float(result["compare result"][:-1])
                            count[CMP_TYPE_RPM_JABI].append(score)
                        else:
                            count_single_result(count, result, rpm_type)

    level1_name_rate = count_rate(count[CMP_TYPE_RPM_LEVEL][1]["same"], (count[CMP_TYPE_RPM_LEVEL][1]["same"] + count[CMP_TYPE_RPM_LEVEL][1]["diff"]))
    level2_same = count[CMP_TYPE_RPM_LEVEL][1]["same"] + count[CMP_TYPE_RPM_LEVEL][2]["same"]
    level2_diff = count[CMP_TYPE_RPM_LEVEL][1]["diff"] + count[CMP_TYPE_RPM_LEVEL][2]["diff"]
    leve2_name_rate = count_rate(level2_same, (level2_same + level2_diff))
    similarity["level1 pkg"] = level1_name_rate
    similarity["level2 pkg"] = leve2_name_rate
    del count[CMP_TYPE_RPM_LEVEL]

    rmp_test_score = count_rate(count[CMP_TYPE_RPMS_TEST]["same"], (count[CMP_TYPE_RPMS_TEST]["same"] + count[CMP_TYPE_RPMS_TEST]["diff"]))
    similarity[CMP_TYPE_RPMS_TEST] = rmp_test_score
    del count[CMP_TYPE_RPMS_TEST]

    similarity[CMP_TYPE_PERFORMANCE] = performance_rate(rows.get(CMP_TYPE_PERFORMANCE), side_b)

    for count_type, result in count.items():
        if count_type == CMP_TYPE_RPM_JABI:
            jabi_rate = count_rate(sum(count[CMP_TYPE_RPM_JABI]), len(count[CMP_TYPE_RPM_JABI]))
            similarity[CMP_TYPE_RPM_JABI] = jabi_rate / 100
        else:
            rate = count_rate(count[count_type]["same"], (count[count_type]["same"] + count[count_type]["diff"]))
            similarity[count_type] = rate

    return similarity

def count_single_result(count, result, cmp_type):
    count.setdefault(cmp_type, {
        "same": 0,
        "diff": 0
        })
    if result["compare result"] in RESULT_SAME:
        count[cmp_type]["same"] += 1
    else:
        count[cmp_type]["diff"] += 1

def rpm_count(rows, side_a, side_b):
    # rows = simplify_key(rows)
    rpm_results = rows.get(CMP_TYPE_RPM)
    count = {
	    1 : {"same": 0, "diff": 0},
	    2 : {"same": 0, "diff": 0}
    }
    mark_pkgs = []
    if not rpm_results:
        return count, mark_pkgs

    for result in rpm_results:
        if not result["compare type"] == CMP_TYPE_RPM_LEVEL:
            continue

        pkg = result[side_a + " binary rpm package"]
        name = RPMProxy.rpm_name(pkg) if pkg else ''
        if name in MARK_PKG and float(result["compare result"]) > 2:
            mark_pkgs.append(result[side_b + " binary rpm package"])

        if float(result["category level"]) > 2:
            continue
        if float(result["compare result"]) > 3:
            continue
        if is_same_rpm(rows.get(pkg)):
            count[result["category level"]][PKG_NAME[result["compare result"]]] += 1
        else:
            count[result["category level"]]["diff"] += 1
    return count, mark_pkgs

def is_same_rpm(rpm_cmp_result):
    if not rpm_cmp_result:
        return True
    for cmp_type, result in rpm_cmp_result.items():
        for row in result:
            if row.get("compare type") == CMP_TYPE_RPM_FILES and row.get("compare result") == CMP_RESULT_MORE:
                continue
            if row.get("compare result") not in RESULT_SAME:
                return False
    return True

def rpm_test_count(results):
    count = {
	"same": 0,
	"diff": 0
    }
    for result in results:
        if not result["compare type"] == CMP_TYPE_RPMS_TEST:
            continue
        if result["compare result"] == "same":
            count["same"] += 1
        if result["compare result"] == "diff":
            count["diff"] += 1
    return count


# weight:
#   lmbench3.*  : 15%
#   unixbench.* : 50%
#   mysql.*:      35%
def performance_rate(results, side_b):
    weight = {
        'lmbench3': 0.15,
        'unixbench': 0.5,
        'mysql': 0.35
    }
    rate = 0
    perf_count = perfomance_count(results, side_b)
    for test, count in perf_count.items():
        if count:
            test_rate = sum(count) / len(count) * weight[test]
            rate += test_rate

    return rate

def perfomance_count(results, side_b):
    count = {
        'lmbench3': [],
        'unixbench': [],
        'mysql': []
    }
    if not results:
        return count
    small_better_reg = get_perf_reg()['small_better']
    perf_reg_path = os.path.join(os.path.dirname(__file__), "../conf/performance/perf-reg.json")
    perf_reg = load_json_result(perf_reg_path)
    perf_filter = tuple(perf_reg['filter'])

    for result in results:
        metric = result['metric']
        if metric in perf_filter:
            if not result[side_b]:
                logger.warn(f"{side_b} missed the performance test: {result['metric']}")
                continue

            side_b_result = result[side_b]
            baseline_result = result['baseline']
            cmp_result = result['compare result']
            score = 0
            if small_better(metric, small_better_reg):
                if cmp_result == 'pass':
                    score = (baseline_result - side_b_result)/baseline_result + 1
                else:
                    score = 1 - (side_b_result - baseline_result)/baseline_result
            else:
                score = side_b_result/baseline_result
            if 'lmbench3' in metric:
                count['lmbench3'].append(score)
            elif 'unixbench' in metric:
                count['unixbench'].append(score)
            else:
                count['mysql'].append(score)
    return count

def count_rate(a, b):
    rate = 0
    if b:
        rate = round(a/b, 4)

    return rate
