from django.shortcuts import render

# Create your views here.
from fuzzywuzzy import fuzz
from difflib import SequenceMatcher
import Levenshtein as lst
import jieba
from gensim import corpora, models, similarities
import numpy as np
from django.http import HttpResponse

stopwords = (r"cn_stopwords.txt")
stopwordss = (r"cn_stopwords.txt")


class Nlpname():

    def __init__(self, name, tname):
        self.name = name
        self.tname = tname

    def fuzz(self, name, tname):
        # difflib
        sequenceMatcher = SequenceMatcher()
        sequenceMatcher.set_seqs(name, tname)
        a = sequenceMatcher.ratio() * 100
        # fuzzywuzzy
        b = fuzz.ratio(name, tname)
        c = lst.distance(name, tname)
        return a, b, c

    # def gensimSimilarities(self, str1, str2):
    #     all_doc = []
    #     all_doc.append(str1)
    #     all_doc.append(str2)
    #     # 以下对目标文档进行分词，并且保存在列表all_doc_list中
    #     all_doc_list = []
    #     for doc in all_doc:
    #         doc_list = [word for word in jieba.cut(doc) if word not in stopwords]
    #         all_doc_list.append(doc_list)
    #     # 首先用dictionary方法获取词袋（bag-of-words)
    #     dictionary = corpora.Dictionary(all_doc_list)
    #     # 以下使用doc2bow制作语料库
    #     corpus = [dictionary.doc2bow(doc) for doc in all_doc_list]
    #
    #     # 使用TF-IDF模型对语料库建模
    #     tfidf = models.TfidfModel(corpus)
    #     index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=len(dictionary.keys()))
    #     sim = index[tfidf[corpus]]
    #     return sim

    def cosine_similarity(self, sentence1: str, sentence2: str) -> float:
        """
        :param sentence1: s
        :param sentence2:
        :return: 两句文本的相识度
        """
        seg1 = [word for word in jieba.cut(sentence1) if word not in stopwordss]
        seg2 = [word for word in jieba.cut(sentence2) if word not in stopwordss]
        word_list = list(set([word for word in seg1 + seg2]))  # 建立词库
        word_count_vec_1 = []
        word_count_vec_2 = []
        for word in word_list:
            word_count_vec_1.append(seg1.count(word))  # 文本1统计在词典里出现词的次数
            word_count_vec_2.append(seg2.count(word))  # 文本2统计在词典里出现词的次数

        vec_1 = np.array(word_count_vec_1)
        vec_2 = np.array(word_count_vec_2)
        # 余弦公式
        num = vec_1.dot(vec_2.T)
        denom = np.linalg.norm(vec_1) * np.linalg.norm(vec_2)
        cos = num / denom
        sim = 0.5 + 0.5 * cos
        return sim


class simhash:
    # 构造函数
    def __init__(self, tokens='', hashbits=128):
        self.hashbits = hashbits
        self.hash = self.simhash(tokens)

    # toString函数
    def __str__(self):
        return str(self.hash)

    # 生成simhash值
    def simhash(self, tokens):
        v = [0] * self.hashbits
        for t in [self._string_hash(x) for x in tokens]:  # t为token的普通hash值
            for i in range(self.hashbits):
                bitmask = 1 << i
                if t & bitmask:
                    v[i] += 1  # 查看当前bit位是否为1,是的话将该位+1
                else:
                    v[i] -= 1  # 否则的话,该位-1
        fingerprint = 0
        for i in range(self.hashbits):
            if v[i] >= 0:
                fingerprint += 1 << i
        return fingerprint  # 整个文档的fingerprint为最终各个位>=0的和

    # 求海明距离
    def hamming_distance(self, other):
        x = (self.hash ^ other.hash) & ((1 << self.hashbits) - 1)
        tot = 0
        while x:
            tot += 1
            x &= x - 1
        return tot

    # 求相似度
    def similarity(self, other):
        a = float(self.hash)
        b = float(other.hash)
        if a > b:
            return b / a
        else:
            return a / b

    # 针对source生成hash值
    def _string_hash(self, source):
        if source == "":
            return 0
        else:
            x = ord(source[0]) << 7
            m = 1000003
            mask = 2 ** self.hashbits - 1
            for c in source:
                x = ((x * m) ^ ord(c)) & mask
                x ^= len(source)
                if x == -1:
                    x = -2
                return x


def nplorcd(request):
    name = request.GET.get('name')
    namet = request.GET.get('namet')
    data = Nlpname(name, namet)
    # x = data.fuzz(name,namet)
    # y = data.gensimSimilarities(name, namet)
    z = data.cosine_similarity(name, name)
    return HttpResponse(z)


from simhash import Simhash


def simhash_demo(text_a, text_b):
    a_simhash = Simhash(text_a)
    b_simhash = Simhash(text_b)
    max_hashbit = max(len(bin(a_simhash.value)), len(bin(b_simhash.value)))
    # 汉明距离
    distince = a_simhash.distance(b_simhash)
    print(distince)
    similar = 1 - distince / max_hashbit
    return similar


def hashnlp(request):
    name = request.GET.get('name')
    tname = request.GET.get('tname')
    b = simhash_demo(name, tname)
    return HttpResponse(b)
