#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
说明：对情报的语序进行调整，观察相似度是否有变化
'''
import thulac
import jieba
from simhash import Simhash
import csv

#获取中文停用词
stopwordsPath = '../ChineseStopwords.txt'
f = open(stopwordsPath,'r')
stopwords = [word for word in f.read().split('\n')]
# thu = thulac.thulac(seg_only=True)#seg_only只分词，不考虑词性
#配置日志路径
import sys
sys.path.append("..") # 这句是为了导入_config
from log_tool import logTool  #调入日志文件
LOG_PATH = "./log_note"#设置日志输出存储路径
log = logTool(LOG_PATH)#传入存储路径


def cut_words(line):
    '''
    :param line: 原始文本
    :return: 去除停用词和分词后的文本,类型为str
    '''
    line = jieba.lcut(line)#text=true,获取str类型的文本
    cut_line =' '.join([word  for word in line if word not in stopwords])
    print(line)
    return cut_line
#计算hash
def compute_simhash_hamming_similarity(s1, s2):
    '''
    :param s1: 文本1 str
    :param s2: 文本2 str
    :return:相似度，float
    '''
    # 导入simhash中的语料为分好词的list格式
    simhash_1,simhash_2 = Simhash(s1.split(" ")),Simhash(s2.split(" "))
    # bin为转换为2进制，找出最大长度
    max_hashbit = max(len(bin(simhash_1.value)), (len(bin(simhash_2.value))))
    # 汉明距离
    distance = simhash_1.distance(simhash_2)
    #汉明距离转换为相似度
    simhash_similar = 1 - distance / max_hashbit
    return simhash_similar

if __name__=='__main__':
    thre = 0.8
    file_path = '../data/demo.txt'
    simi_result_path = './result/simhash_python_0702.txt'
    repeat_result_path = './result/repeat_intelligence_simhash.csv'#重复文本存储路径
    csv_writer = csv.writer(open(repeat_result_path,'w',newline='',encoding='utf-8-sig'))
    method = 'simhash_hamming'
    log.info('start simhash python compute 100')
    with open(file_path,'r',encoding='utf-8') as f1:
        intelligence = f1.readlines()
        f2 = open(simi_result_path,'w')
        new_data = [cut_words(data.strip()) for data in intelligence]
        k=0
        for i in range(0, len(new_data)):
            for j in range(i + 1, len(new_data)):
                similarity = eval('compute_' + method + '_similarity')(new_data[i], new_data[j])  # eval把字符串转换为可以调用的图像
                if similarity>=thre:
                    csv_writer.writerow([str(k),intelligence[i],intelligence[j],str(similarity)])#必须写入list格式
                    k+=1#计算相似度大于0.75的总条数
                f2.write(str(i)+'-'+str(j)+','+str(similarity)+'\n')
        f2.close()
        log.info('end simhash python compute 100 ')