import logging
import math
from hashlib import md5
import pandas as pd
import jieba
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from commens import get_back_data, reParam
import copy

# from Levenshtein import distance as d
jieba.setLogLevel(logging.INFO)
logALIGN = logging.getLogger("ALM_ALIGN")
logALIGN.setLevel(logging.DEBUG)
hdALIGN = logging.FileHandler("ALM_ALIGN.log", mode='w', encoding='utf-8')
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
hdALIGN.setLevel(logging.DEBUG)
logALIGN.addHandler(hdALIGN)
logALIGN.addHandler(ch)

data = []
uni_set = set()


def insert_data(d):
    global data
    global uni_set
    k = md5((d['头实例'] + d['尾实例'] + d['头实体属性值'] + d['尾实例属性值']).encode('utf-8')).hexdigest()
    if k in uni_set:
        return
    uni_set.add(k)
    data.append(d)


def dump_to_excel(data, filename):
    df = pd.DataFrame(data)
    filename = '%s.xlsx' % filename
    df.to_excel(filename, index=False)
    print("生成%s : %d" % (filename, len(data)))


def specific_map(alm, tmml, param_alm, param_tmml, relation, alm_params, tmml_params, way=-1):
    _index = tmml.find('_')
    if _index != -1:
        tmml = tmml[:_index]
    row = {
        '头实例': alm,
        '头实例概念': '告警',
        '头实体属性值': param_alm,
        '头实体参数': alm_params,
        '关系': relation,
        '尾实例': tmml,
        '尾实例概念': 'MML命令',
        '尾实例属性值': param_tmml,
        '尾实体参数': tmml_params,
        '类型': way
    }
    return row


def cosine_similarity(v1, v2):
    # compute cosine similarity of v1 to v2: (v1 dot v2)/{||v1||*||v2||)

    sumxx, sumxy, sumyy = 0, 0, 0
    for i in range(len(v1)):
        x = v1[i]
        y = v2[i]
        sumxx += x * x
        sumyy += y * y
        sumxy += x * y

    return sumxy / math.sqrt(sumxx * sumyy)


def cal_tfidf(hparam, tparam_list):

    ismeaningNone_h, ismeaningNone_t = False, False
    if hparam['参数含义'] is not None:
        ismeaningNone_h = True
    for tparam in tparam_list:
        if tparam['参数含义'] is not None:
            ismeaningNone_t = True

    if ismeaningNone_h and ismeaningNone_t:
        tparam_list_nonone = []
        corpus = [' '.join(jieba.cut(hparam['参数含义'].strip(' ').strip('\n').strip('\t')))]
        for tparam in tparam_list:
            if tparam['参数含义'] is not None:
                corpus.append(' '.join(jieba.cut(tparam['参数含义'].strip(' ').strip('\n').strip('\t'))))
                tparam_list_nonone.append(tparam)
        # print(corpus)
        vectorizer = CountVectorizer()  # 该类会将文本中的词语转换为词频矩阵，矩阵元素a[i][j] 表示j词在i类文本下的词频
        tfidf_transformer = TfidfTransformer()  # 该类会统计每个词语的tf-idf权值
        tfidf = tfidf_transformer.fit_transform(
            vectorizer.fit_transform(corpus))  # 第一个fit_transform是计算tf-idf，第二个fit_transform是将文本转为词频矩阵
        word = vectorizer.get_feature_names()  # 获取词袋模型中的所有词语
        weight = tfidf.toarray()  # 将tf-idf矩阵抽取出来，元素a[i][j]表示j词在i类文本中的tf-idf权重
        # for i in range(len(weight)):  # 打印每类文本的tf-idf词语权重，第一个for遍历所有文本，第二个for便利某一类文本下的词语权重
        #     print(u"-------这里输出第", i, u"类文本的词语tf-idf权重------")
        #     for j in range(len(word)):
        #         print(word[j], weight[i][j])
        cosList = []
        for j in range(len(weight) - 1):
            cos = cosine_similarity(weight[0], weight[j + 1])
            cosList.append(cos)

        index = sorted(range(len(cosList)), key=lambda k: cosList[k], reverse=True)
        assert max(cosList) == cosList[index[0]]

        return tparam_list_nonone[index[0]], max(cosList)
    else:
        return None, None


def min_and_index(list):
    index = 0
    min = 1111
    for i in range(0, len(list)):
        if list[i] < min:
            min = list[i]
            index = i
    return min, index


def max_and_index(list):
    index = 0
    max = -1111
    for i in range(0, len(list)):
        if list[i] > max:
            max = list[i]
            index = i
    return max, index


def getMaxCommonSubstr(s1, s2):
    # 求两个字符串的最长公共子串
    # 思想：建立一个二维数组，保存连续位相同与否的状态
    s1, s2 = str(s1), str(s2)
    len_s1 = len(s1)
    len_s2 = len(s2)
    # 生成0矩阵，为方便后续计算，多加了1行1列
    # 行: (len_s1+1)
    # 列: (len_s2+1)
    record = [[0 for i in range(len_s2 + 1)] for j in range(len_s1 + 1)]
    maxNum = 0  # 最长匹配长度
    p = 0  # 字符串匹配的终止下标
    for i in range(len_s1):
        for j in range(len_s2):
            if s1[i] == s2[j]:
                # 相同则累加
                record[i + 1][j + 1] = record[i][j] + 1
                if record[i + 1][j + 1] > maxNum:
                    maxNum = record[i + 1][j + 1]
                    p = i  # 匹配到下标i
    # 返回 子串长度，子串
    # return maxNum, s1[p + 1 - maxNum: p + 1]
    return maxNum


# 此函数用于比较两字符串的包含关系
# 若在str1中按序包含所有str2中的字符，则返回true
# 例如：str1是a1s2d3a1s2d3 str2是asdasd 则此函数返回true
def containAllOrderedChars(str1, str2):
    # logALIGN.info(str1)
    # logALIGN.info(str2)
    len_s1 = len(str1)
    len_s2 = len(str2)
    i = 0
    if len_s2 > len_s1:
        return False
    for c in str1:
        if str2[i] == c:
            i += 1
            if i == len_s2:
                return True
    return False


def jointContentMatch(jointContent, mml, param_list):
    '''

    :param jointContent: 句子 可能的格式如下    该参数与ADD 5GCSUBQOS中的“SUBQOSINDEX”参数相等。
    :param mml: mml命令标识                    ADD 5GCSUBQOS
    :param param_list:  mml命令参数列表                [SUBQOSINDEX, MNOID, NOID]
    :return: 识别句子中参数，如果存在param_list中，返回该参数    SUBQOSINDEX
    '''

    '''
    该参数与ADD 5GCSUBQOS中的“SUBQOSINDEX”参数相等。
    该取值必须和ADD MNO中配置的“MNOID”参数取值相同。
    须先在ADD MNO中配置取值相同的“MNOID”参数。
    该参数取值必须和ADD NGMNO中配置的"NOID"参数取值相同。
    该参数的取值必须和ADD ADDRPOOL命令的POOLNAME参数相同。
    该取值必须和ADD MNO配置的“MNOID”参数取值相同。
    请先在ADD MNO中配置取值相同的“MNOID”参数。
    此参数与ADD QOSPROFILE或SET QOSGLOBAL命令中的SUBQOS5GC参数相等。
    该参数与命令ADD 5GCSUBQOS中的“SUBQOSINDEX”参数相等。
    ----------------
    要求和ADD AUTOSCALINGSERVICE命令中配置的SERVICENAME参数保持一致。
    本参数需要与ADD NFUUID命令中的NFINSTANCENAME值保持一致。
    本参数与ADD NFSERVICE命令中的SRVINSTANCEID值一致时生效。
    需要与ADD NFPROFILE中该NF使用的FQDN一致。
    本参数取值需要与ADD PNFPROFILE命令中的“NF实例标识”参数取值保持一致。
    '''
    keywords = ['相等', '相同', '一致', '对应']
    jointContent = jointContent.replace(',', '，')
    jointContent = jointContent.replace('。', '，')
    contents = jointContent.split('，')
    keyContents = []
    for content in contents:
        for keyword in keywords:
            if content.find(keyword) != -1 and content.find(mml) != -1:
                keyContents.append(content)

    if len(keyContents) == 0:
        return None
    jointContent = keyContents[len(keyContents)-1]

    jointContent = jointContent.replace(mml, '')
    if jointContent.find("不") != -1:
        return None
    param_list = sorted(param_list, key = lambda i:len(i), reverse = True)
    for param in param_list:
        if jointContent.find(param) != -1:
            return param
    return None


def main():
    global data
    global uni_set
    jsonData = get_back_data('ALM_EXTRACT_1.json')
    total, total_nouse = 0, 0
    total_1, total_2, total_3 = 0, 0, 0
    for alm, relation in jsonData.items():
        alm_params = relation["参数"]
        alm_params_ori = copy.copy(alm_params)
        if alm_params is None or len(alm_params) == 0:
            continue
        alm_params_name = [alm_param['参数名称'] for alm_param in alm_params]

        for tmml, params in relation['尾命令'].items():
            total += 1
            useFlag = False
            jointContent = params['关联内容']
            # logALIGN.debug("● %s --- %s [%s] %s" % (hmml, tmml, jointPoint, jointContent))

            tpl = params['参数']
            tpl_ori = copy.copy(tpl)
            if tpl is None or len(tpl) == 0:
                continue
            candidate_tpl = []
            # jointContent 匹配参数
            for tparam in tpl:
                if jointContent.find(tparam['参数名称']) != -1:
                    candidate_tpl.append(tparam)
            if len(candidate_tpl) == 0:
                candidate_tpl = tpl

            # 参数名称对齐
            for tparam in candidate_tpl:
                if tparam['参数名称'] in alm_params_name:
                    insert_data(specific_map(alm, tmml, tparam['参数名称'], tparam['参数名称'], 'use', alm_params_ori, tpl_ori, 1))
                    total_1 += 1
                    useFlag = True
                    candidate_tpl.remove(tparam)
                    alm_params.pop(alm_params_name.index(tparam['参数名称']))
                    alm_params_name = [alm_param['参数名称'] for alm_param in alm_params]

            # 参数名称相似 & 参数含义相关
            if len(candidate_tpl) != 0 and len(alm_params) != 0:
                for alm_param in alm_params:
                    alm_param_name = alm_param['参数名称']
                    alm_param_meaning = alm_param['参数含义']
                    for tparam in candidate_tpl:
                        tparam_name = tparam['参数名称']
                        # tparam_meaning = tparam[参数含义]
                        if getMaxCommonSubstr(alm_param_name, tparam_name) / min(len(alm_param_name), len(tparam_name)) > 0.8:
                            matchp, score = cal_tfidf(alm_param, [tparam])
                            if matchp is not None:
                                if score > 0.50:
                                    insert_data(specific_map(alm, tmml, alm_param_name, tparam_name, 'use', alm_params_ori, tpl_ori, 2))
                                    total_2 += 1
                                    useFlag = True
                                    candidate_tpl.remove(tparam)
                                    alm_params.remove(alm_param)

            # # 参数含义相关
            # if len(candidate_tpl) != 0 and len(alm_params) != 0:
            #     for alm_param in alm_params:
            #         matchp, score = cal_tfidf(alm_param, candidate_tpl)
            #         if matchp is not None:
            #             if score > 0.85:
            #                 insert_data(specific_map(alm, tmml, alm_param['参数名称'], matchp['参数名称'], 'use', 3))
            #                 total_3 += 1
            #                 useFlag = True
            #                 candidate_tpl.remove(matchp)
            #                 alm_params.remove(alm_param)

            if useFlag is False:
                total_nouse += 1
                insert_data(specific_map(alm, tmml, " ", " ", '相关', alm_params_ori, tpl_ori, -1))

    print(total)
    print(total_nouse)
    print(total_1, total_2, total_3)


if __name__ == '__main__':
    main()
    dump_to_excel(data, 'ALM_ALIGN')