# -*- coding: utf-8 -*-
import hashlib
import redis
import re
import config.setting as setting
from utils import my_utils

similarity_sentence_namespace = 'sim_sentence:'


def get_sentences(doc):
    '''
    把文章中所有的句子分出来，并且当排序，最长的在前面,如果长度大于3，返回前三个
    :param doc:
    :return:
    '''
    delimiter = re.compile(r'[。？！：.!:?…*@]+')
    sentences = list()
    for sent in delimiter.split(doc):
        # 如果句子长度小于5，就不认为是长句子
        if len(sent.strip()) <= 5:
            continue
        sentences.append(sent)

    sentences = sorted(sentences, key=lambda x: len(x), reverse=True)
    return sentences[:3]


def calc_duplicate(origin_id, doc, r):
    sentence_list = get_sentences(doc)
    '''
    计算最长句子重复
    :param origin_id:
    :param sentence_list: 长句子列表，要求是string类型
    :return: 没有重复的，返回None，有重复的，返回重复文章的信息
    '''
    if sentence_list is None or len(sentence_list) == 0:
        return None
    # 返回的是一个列表，列表的每一项是重复的信息
    return_value = None

    hash_list = []
    for sent in sentence_list:
        md5_obj = hashlib.md5()
        md5_obj.update(sent.encode())
        hash_md5 = md5_obj.hexdigest()
        hash_list.append(hash_md5)

    # 用于写入redis
    hash_list_str = '/'.join(hash_list)

    # 如果id已经存过了，就直接return
    saved_list = []
    for hs in hash_list:
        if r.exists(setting.NAME_SPACE + similarity_sentence_namespace + hs):
            saved_list.extend(r.lrange(setting.NAME_SPACE + similarity_sentence_namespace + hs, 0, -1))

    id_dic = set()
    if len(saved_list) != 0:
        id_dic = set(saved_list)

    for i in id_dic:
        # 获取相应id的redis中存储的content
        content_value = r.get(setting.NAME_SPACE + similarity_sentence_namespace + i.decode())
        if content_value is None:
            continue
        content_values_seg = content_value.decode().split(',')
        saved_hash_list = content_values_seg[1].split('/')

        # caculate intersection
        # 找出每个id中存储的与现有id中存储的hash，是否有一样的（也就是相同的句子）
        tmp = [val for val in hash_list if val in saved_hash_list]

        distance = len(tmp)
        # 最长句的相同数量大于等于3，说明是文章重复
        if distance >= 3:
            dic_value_id = content_values_seg[0]
            if int(dic_value_id) >= int(origin_id):
                continue
            else:
                return_value = (int(dic_value_id), distance, '/'.join(sentence_list))
                break

    # 把自己的信息写入redis 1、写入三句所有的hash；2、写入id对应的信息
    for hs in hash_list:
        # r.lrange结果全部是bytes类型，而不是str
        if str(origin_id).encode() not in dict.fromkeys(
                r.lrange(setting.NAME_SPACE + similarity_sentence_namespace + hs, 0, -1)):
            hash_values = '%s' % str(origin_id)
            r.rpush(setting.NAME_SPACE + similarity_sentence_namespace + hs, hash_values)
            r.expire(setting.NAME_SPACE + similarity_sentence_namespace + hs, setting.REDIS_EXPIRE_TIME)

    id_values = '{},{}'.format(str(origin_id), hash_list_str)
    r.set(setting.NAME_SPACE + similarity_sentence_namespace + str(origin_id), id_values)
    r.expire(setting.NAME_SPACE + similarity_sentence_namespace + str(origin_id), setting.REDIS_EXPIRE_TIME)
    return return_value


if __name__ == '__main__':
    # 清除一下测试用的redis
    r = redis.Redis(host="172.16.202.40",
                    port=6379,
                    db=7)
    origin_id = 12345678
    # r.flushdb()
    content = '<p class="article-pic"><img src="http://snsimg.ztjystore.cn/article/2018/06/20/1529472099815541.jpg" data-type="jpg"></p><p class="article-pic"><img src="http://snsimg.ztjystore.cn/article/2018/06/20/1529472099230638.jpg" data-type="jpg"></p><p class="article-pic"><img src="http://snsimg.ztjystore.cn/article/2018/06/20/1529472100286204.jpg" data-type="jpg"></p><p class="article-pic"><img src="http://snsimg.ztjystore.cn/article/2018/06/20/1529472100418752.jpg" data-type="jpg"></p>'
    content = my_utils.clean_data_except_mark(content)
    print(content)

    print(calc_duplicate(origin_id, content, r))
