import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from scipy.linalg import norm
import time
import csv

def tf_similarity(s1, s2):
    def add_space(s):
        return ' '.join(list(s))

    # 将字中间加入空格
    s1, s2 = add_space(s1), add_space(s2)
    # 转化为TF矩阵
    cv = CountVectorizer(tokenizer=lambda s: s.split())
    corpus = [s1, s2]
    vectors = cv.fit_transform(corpus).toarray()
    # 计算TF系数
    return np.dot(vectors[0], vectors[1]) / (norm(vectors[0]) * norm(vectors[1]))

start = time.time()
file = pd.read_csv('data.csv', sep='::',header=None,engine='python',encoding='utf-8')
df = pd.DataFrame(file)
data_ndarray = df.values
data = data_ndarray.tolist()
print('共有 %s 条数据' % len(data))
for i in range(len(data)):
    str1 = data[i][0]
    if str1 != '000':
        for j in range(len(data)):
            if i < j:
                print('开始第 %s 轮数据处理，%s' % (i, j))
                str2 = data[j][0]
                if str2 != '000':
                    s1 = tf_similarity(str1, str2)
                    if s1 > 0.7:
                        data[j][0] = '000'
data = list(set(data))
data.remove('000')
new_data_file = open('newdata.csv', 'w', newline='')
filewriter = csv.writer(new_data_file)
for i in data:
    filewriter.writerow(i)
end = time.time()
print("处理完成，程序运行时间:%.2f秒" % (end-start))
