#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
目的：实现两个文本相似度的计算
1、分词，去停用词
2、不同的方式计算相似度，并保存
'''

import thulac
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from scipy.linalg import norm
'''
记录日志参数
'''
from log_tool import logTool  #调 入日志文件
LOG_PATH = "./log_note"#设置日志输出存储路径
log = logTool(LOG_PATH)#传入存储路径

#获取中文停用词
stopwordsPath = './ChineseStopwords.txt'
f = open(stopwordsPath,'r')
stopwords = [word for word in f.read().split('\n')]
thu = thulac.thulac(seg_only=True)#seg_only只分词，不考虑词性

def get_data(file_path,result_path):
    '''
    :param file_path:情报样例的位置
    :return:
    '''
    with open(file_path,'r',encoding='utf-8') as f1:
        ti_data = f1.readlines()
        f2 = open(result_path,'w')
        new_data = [cut_words(data.strip()) for data in ti_data]
        #循环的算法验证
        for method in ['tfidf','jaccard']:
            log.info('start '+method+' compute ')
            for i in range (0,len(new_data)):
                for j in range (i+1,len(new_data)):
                    similar = eval('compute_'+method+'_similarity')(new_data[i],new_data[j])#eval把字符串转换为可以调用的图像
                    f2.write(method+','+str(i)+','+str(j)+','+str(similar)+'\n')#按照文章序号的格式保存
                print('the '+str(i)+ ' intelligence demos compare finish ')
            log.info('end '+method+' compute ')
        f2.close()
def cut_words(line):
    '''
    :param line: 原始文本
    :return: 去除停用词和分词后的文本,类型为str
    '''

    line = thu.cut(line,text=True)#text=true,获取str类型的文本
    cut_line =' '.join([word  for word in line.split(' ') if word not in stopwords])
    return cut_line

#词向量，计算余弦相似度
def compute_tfidf_similarity(s1, s2):
    '''
    :param s1: 文本1 str
    :param s2: 文本2 str
    :return: 余弦相似度 float
    '''
    # 转化为TF矩阵
    corpus = [s1, s2]#语料['str str','str str']格式,以空格区分
    vectors = TfidfVectorizer().fit_transform(corpus).toarray()
    # 计算余弦距离，范围为[-1,1]，通过公式(0.5cos+0.5)归一化至[0,1]区间
    cos_sim = 0.5*(np.dot(vectors[0], vectors[1]) / (norm(vectors[0]) * norm(vectors[1])))+0.5
    return cos_sim

from simhash import Simhash
#字符，计算hash相似度
def compute_simhash_hamming_similarity(s1, s2):
    '''
    :param s1: 文本1 str
    :param s2: 文本2 str
    :return:相似度，float
    '''
    # 导入simhash中的语料为分好词的list格式
    simhash_1,simhash_2 = Simhash(s1.split(" ")),Simhash(s2.split(" "))
    print(simhash_2)
    print(simhash_2.value)
    # bin为转换为2进制，找出最大长度
    max_hashbit = max(len(bin(simhash_1.value)), (len(bin(simhash_2.value))))
    print(max_hashbit)
    # 汉明距离
    distance = simhash_1.distance(simhash_2)
    #汉明距离转换为相似度
    simhash_similar = 1 - distance / max_hashbit
    return simhash_similar

#基于概率统计，杰德卡相似系数
def compute_jaccard_similarity(s1: str, s2: str) -> float:
    word_set1 = set(s1.strip(" ").split(" "))
    word_set2 = set(s2.strip(" ").split(" "))
    jaccard_sim = len(word_set1 & word_set2) / len(word_set1 | word_set2)
    return jaccard_sim


if __name__=='__main__':
    get_data(file_path='./data/demo.txt',result_path='similarity_result.txt')