#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
作用：手动写simHash代码，运用jieba分词及对应的权重
输入：100条数据
输出：两两比较的相似度，以及相似度大于某阈值的数据阳历
结果：当前由于阈值的设定，存在较大的误判。
'''
import numpy as np
import thulac
import hashlib
import jieba
import jieba.analyse
import csv
from collections import defaultdict
thu = thulac.thulac(seg_only=True)#seg_only只分词，不考虑词性
from sklearn.feature_extraction.text import TfidfVectorizer
#获取中文停用词
stopwordsPath = '../ChineseStopwords.txt'
f = open(stopwordsPath,'r')
stopwords = [word for word in f.read().split('\n')]

import sys
sys.path.append("..")
from log_tool import logTool  #调入日志文件
LOG_PATH = "./log_note"#设置日志输出存储路径
log = logTool(LOG_PATH)#传入存储路径
#使用jieba分词，获取权重
def cut_words_jieba(intelligence):
    simHash_value = []
    for line in intelligence:
        seg = jieba.cut(line)
        keywords = jieba.analyse.extract_tags("|".join(seg), topK=100, withWeight=True)#topk=100的含义，最多100个词吗？
        keyList = []
        for keyword,weight in keywords:
            #hash为md5，可换成sha3_256等算法
            feature = hashlib.md5(keyword.encode('utf-8')).hexdigest()  # 将字符串以utf-8编码转换成bytes(字节)格式
            feature = bin(int(feature, 16)).replace('0b', '').replace('-', '').zfill(64)[-64:]  # 转换为64位的二进制
            # 二进制中1与权重正相乘，0则负相乘
            temp = [weight if num == '1' else -weight for num in feature]
            keyList.append(temp)  # 生成每个词的hash*权重
        sum_hash = np.sum(np.array(keyList), axis=0)  # 对应位置求和，长度为64
        simhash = ''.join(['1' if i > 0 else '0' for i in sum_hash])
        simHash_value.append(simhash)
    return simHash_value

#计算汉明距离
def compute_simhash_similarity(simhash_1,simhash_2):
    distance = 0
    s = str(bin(int(simhash_1,2) ^ int(simhash_2,2)))#^为异或符号,2为2进制
    for i in range(2, len(str(s))):
        if int(str(s)[i]) is 1:
            distance += 1
    #计算相似度
    simhash_similar = 1 - distance/ len(simhash_1)
    return simhash_similar
#主函数
if __name__=='__main__':
    file_path='../data/demo.txt'
    result_path = 'result/simHash_jieba_0702.txt'
    repeat_path = 'result/repeat_intelligence_jieba.csv'
    f2 = open(result_path, 'w')
    csv_writer=csv.writer(open(repeat_path,'w',encoding='utf-8-sig',newline=''))
    log.info('start hash compute')
    with open(file_path,'r',encoding='utf-8') as f1:
        intelligence = f1.readlines()
        simhash_value = cut_words_jieba(intelligence)
        #计算汉明距离
        k=0
        for i in range(len(simhash_value)):
            for j in range(i+1,len(simhash_value)):
                similarity = compute_simhash_similarity(simhash_value[i],simhash_value[j])
                if similarity>=0.75:
                    csv_writer.writerow([str(k),intelligence[i],intelligence[j],str(similarity)])#必须写入list格式
                    k+=1#计算相似度大于0.75的总条数
                f2.write(str(i)+'-'+str(j)+','+str(similarity)+'\n')
        f2.close()
    log.info('end hash compute')
