from collections import Counter
import CONST


def get_mid_key_and_sa(data_include_seed, mid_num=CONST.NUM_MID_KEY):
    """
    获取中介关键词以及其对应的sa值
    :param data_include_seed: 包含种子关键词的词条
    :return: {'中介关键词名': sa值}
    """
    # 统计每个词的词频后
    # 排序
    count_result_sort = sorted(Counter(data_include_seed).items(), key=lambda x: x[1], reverse=True)

    # 返回NUM_MID_KEY个中介关键词，第一个词为seed
    return dict(count_result_sort[1: mid_num + 1])


def get_ka_and_a(data_exclude_seed, sa, competitive_num=CONST.NUM_COMPETITIVE_KEY):
    """
    获得竞争关键词ka和中介关键词数量a
    :param data_exclude_seed: 不包含种子关键词的词条
    :param sa: {'中介关键词名': sa值}
    :return: ka:{'中介关键词名': [(竞争关键字, ka)]}
             a: {'中介关键词名': a}
    """
    mid_key = list(sa.keys())  # 中介关键词名

    # 利用jieba提取出每个词
    a = dict(sa)  # 中介关键词的数量，只需要在之前的统计上，继续累加就行
    # 用于保存竞争关键字，初始化字典
    wordlist = dict(sa)
    for k in wordlist:
        wordlist[k] = list()
    for sentence in data_exclude_seed:
        # 判断该词条中是否有中介关键词
        target_word = [x for x in mid_key if x in sentence]
        if target_word:  # 该词条中包含中间关键词，需要保留统计
            # 由于一个句子中可能包含不只一个中介关键词，这里遍历target_word，提取ka和a
            for word in target_word:
                # 累加a的数量
                a[word] += 1
                # 将该句子的词，进行词频统计
                wordlist[word].extend(sentence)
    # 统计
    count_result = {}
    for key, value in wordlist.items():
        count_result[key] = sorted(Counter(wordlist[key]).items(), key=lambda x: x[1], reverse=True)
        # 每组中介关键词返回NUM_COMPETITIVE_KEY个竞争关键词，第一个词为中介关键词
        count_result[key] = count_result[key][1: int(competitive_num/len(sa.keys())) + 1]
    return count_result, a


def get_competitive(seed, num=CONST.NUM_COMPETITIVE_KEY):
    """
    获得竞争关键词
    :param seed: 种子关键词
    :param num: 竞争关键词数量
    :return: 字典格式的data:{'中介关键词':[('竞争关键词',comp值),]}
    """
    # 种子关键词的数量s
    s = 0
    # 首先读取预处理后的数据集，将数据集分为包含种子关键词的词条和不包含种子关键词的词条
    data_include_seed = []
    data_exclude_seed = []
    # 读取预处理后的数据集
    with open(CONST.AFTER_CUT_DATA, 'r', encoding='utf-8') as processing_data:
        datas = processing_data.readlines()
        for line in datas:
            line = line[:-1]  # 去除词条尾行的\n
            # 如果line为‘’直接退出
            if not line:
                continue
            words = line.split('\t')
            # 过滤words中的''值
            words = list(filter(lambda x: x.strip(), words))
            if seed in line:  # 判断词条中是否有种子关键词
                data_include_seed.extend(words)
                s += 1
            else:
                data_exclude_seed.append(words)

    # 获得中介关键词和sa的值{'中介关键词':sa}
    sa = get_mid_key_and_sa(data_include_seed)

    # 计算ka, a
    ka, a = get_ka_and_a(data_exclude_seed, sa, num)

    # 开始计算各个中介关键词的权重
    # Wa(k)= |{sa}| / |{s}|
    Wak = {}
    for mid_key, numT in sa.items():
        Wak[mid_key] = numT / s

    # 开始计算各个中介关键词排名比较高的Comp
    Comp = {}
    for mid_key_name, a_num in a.items():
        for competitive_key_name, ka_num in ka[mid_key_name]:
            if competitive_key_name in Comp:
                Comp[competitive_key_name] += Wak[mid_key_name] * ka_num / (a_num - sa[mid_key_name])
            else:
                Comp[competitive_key_name] = Wak[mid_key_name] * ka_num / (a_num - sa[mid_key_name])

    # 该函数的返回值，用于图表显示：{'中介关键词':[('竞争关键词',comp值), ]}，这里每个comp值，按照从大到小排序
    mid_with_comp = {}

    for mid_word, key_words in ka.items():
        mid_with_comp[mid_word] = []
        for key_word in key_words:
            mid_with_comp[mid_word].append((key_word[0], Comp[key_word[0]]))

    # 对中介值对应的竞争关键词也排序一下
    for k, v in mid_with_comp.items():
        mid_with_comp[k] = sorted(v, key=lambda x: x[1], reverse=True)

    return mid_with_comp

def get_mid_key(seed, num=CONST.NUM_MID_KEY):
    """
    获得种子关键词的中介关键词
    :param seed: 种子关键词
    :param num: 指定获取的中介关键词个数
    :return: [('中介关键词', sa/a), ]
    """
    # 首先读取预处理后的数据集，将数据集分为包含种子关键词的词条和不包含种子关键词的词条
    data_include_seed = []
    data_exclude_seed = []
    # 读取预处理后的数据集
    with open(CONST.AFTER_CUT_DATA, 'r', encoding='utf-8') as processing_data:
        datas = processing_data.readlines()
        for line in datas:
            line = line[:-1]  # 去除词条尾行的\n
            # 如果line为‘’直接退出
            if not line:
                continue
            words = line.split('\t')
            # 过滤words中的''值
            words = list(filter(lambda x: x.strip(), words))
            if seed in line:  # 判断词条中是否有种子关键词
                data_include_seed.extend(words)
            else:
                data_exclude_seed.extend(words)

    # 获得中介关键词和sa的值{'中介关键词':sa}
    sa = get_mid_key_and_sa(data_include_seed, num)

    # 计算a
    a = Counter(data_exclude_seed)

    # 计算sa/a
    for key in sa:
        sa[key] = sa[key] / (sa[key] + a[key])
    return sorted(sa.items(), key=lambda x: x[1], reverse=True)

if __name__ == '__main__':
    # mid_with_comp = get_competitive('三星')
    # print(mid_with_comp)
    t = get_mid_key('三星',200)
    print(t)
    print('12331')
