import base64
import struct
import jieba
from difflib import SequenceMatcher
import jieba.analyse as jana


class SimHash:
    # 构造函数
    def __init__(self, tokens=None, hashbits=128):
        self.hashbits = hashbits
        self.hash = self.simhash(tokens)

    # toString函数
    def __str__(self):
        return str(self.hash)

    # 生成simhash值
    def simhash(self, tokens):
        v = [0] * self.hashbits
        for t in [self._string_hash(x) for x in tokens]:  # t为token的普通hash值
            for i in range(self.hashbits):
                bitmask = 1 << i
                if t & bitmask:
                    v[i] += 1  # 查看当前bit位是否为1,是的话将该位+1
                else:
                    v[i] -= 1  # 否则的话,该位-1
        fingerprint = 0
        for i in range(self.hashbits):
            if v[i] >= 0:
                fingerprint += 1 << i
        return fingerprint  # 整个文档的fingerprint为最终各个位>=0的和

    # 求海明距离
    def hamming_distance(self, other):
        x = (self.hash ^ other.hash) & ((1 << self.hashbits) - 1)
        tot = 0
        while x:
            tot += 1
            x &= x - 1
        return tot

    # 求相似度
    def similarity(self, other):
        a = float(self.hash)
        b = float(other.hash)
        if a > b:
            return b / a
        else:
            return a / b

    # 针对source生成hash值   (一个可变长度版本的Python的内置散列)
    def _string_hash(self, source):
        if source == "":
            return 0
        else:
            x = ord(source[0]) << 7
            m = 1000003
            mask = 2 ** self.hashbits - 1
            for c in source:
                x = ((x * m) ^ ord(c)) & mask
            x ^= len(source)
            if x == -1:
                x = -2
            return x


def hamming_dist(hash1, hash2, hash_bits):
    x = (hash1 ^ hash2) & ((1 << hash_bits) - 1)
    tot = 0
    while x:
        tot += 1
        x &= x - 1
    return tot


def similarity(hash1, hash2):
    a = float(hash1)
    b = float(hash2)
    if a > b:
        return b / a
    else:
        return a / b


def string_hash(source, hash_bits):
    if source == "":
        return 0
    else:
        x = ord(source[0]) << 7
        m = 1000003
        mask = 2 ** hash_bits - 1
        for c in source:
            x = ((x * m) ^ ord(c)) & mask
        x ^= len(source)
        if x == -1:
            x = -2
        return x


def get_hash(tokens, hash_bits=128):
    v = [0] * hash_bits
    for t in [string_hash(x, hash_bits) for x in tokens]:  # t为token的普通hash值
        for i in range(hash_bits):
            bit_mask = 1 << i
            if t & bit_mask:
                v[i] += 1  # 查看当前bit位是否为1,是的话将该位+1
            else:
                v[i] -= 1  # 否则的话,该位-1
    fingerprint = 0
    for i in range(hash_bits):
        if v[i] >= 0:
            fingerprint += 1 << i
    return fingerprint  # 整个文档的fingerprint为最终各个位>=0的和


def encode_simhash(fingerprint):
    b64 = base64.b64encode(struct.pack('=Q', fingerprint)).decode()
    return b64


def decode_simhash(b64):
    fingerprint = struct.unpack('=Q', base64.b64decode(b64))
    return fingerprint


def get_b64_hash(tokens, hash_bits=64):
    return encode_simhash(get_hash(tokens, hash_bits))


def tokenize(text):
    return jieba.lcut(text)


def string_similar(a, b):
    return SequenceMatcher(None, a, b).ratio()


#############################################################################
#                      extract                                              #
#############################################################################
def extract(s, top_k=10, allow_pos=('n', 'nz', 'vn'), type='text_rank'):
    if type == 'tfidf':
        keywords = jana.extract_tags(s.lower(), topK=top_k, allowPOS=allow_pos, withWeight=False)
    else:
        keywords = jana.textrank(s.lower(), topK=top_k, allowPOS=allow_pos, withWeight=False)
    return keywords


#############################################################################
#                     extract_keywords                                      #
#############################################################################
def extract_keywords(s, allow_pos=('ns', 'n', 'vn', 'eng'), data_type='text'):
    # extract the keywords given some text
    s_len = len(s)
    if data_type == 'title':
        keywords = extract(s, top_k=3, allow_pos=allow_pos, type='tfidf')
    elif s_len < 15:
        keywords = []
    elif s_len < 50:
        keywords = extract(s, top_k=5, allow_pos=allow_pos, type='tfidf')
    elif s_len < 100:
        keywords = extract(s, top_k=10, allow_pos=allow_pos, type='tfidf')
    elif s_len < 200:
        keywords = extract(s, top_k=15, allow_pos=allow_pos, type='tfidf')
    elif s_len < 300:
        keywords = extract(s, top_k=20, allow_pos=allow_pos, type='text_rank')
    else:
        keywords = extract(s, top_k=30, allow_pos=allow_pos, type='text_rank')

    return keywords


#
# if __name__ == '__main__':
#     s = '中国人 中国人 在 中国'
#     hash1 = SimHash(s.split(), 128)
#     s1 = s
#     s = '中国人 中国'
#     hash2 = SimHash(s.split(), 128)
#     s2 = s
#     s = 'nai nai ge xiong cao'
#     hash3 = SimHash(s.split(), 128)
#     s3 = s
#
#     print(similarity(get_hash(s1.split()), get_hash(s2.split())))
#     print(hamming_dist(get_hash(s1.split()), get_hash(s2.split()), 128))
#
#     print(hash1.hamming_distance(hash2), "   ", hash1.similarity(hash2))
#     print(hash1.hamming_distance(hash3), "   ", hash1.similarity(hash3))
