import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from gensim import corpora, models, similarities
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import nltk
import jieba
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
import re

df=pd.read_csv('date/京东-麻辣王子辣条评论.csv')
# print(df["评论"])
# print(df.info())
df['评论'] = df['评论'].apply(lambda x: BeautifulSoup(x, 'html.parser').get_text())
# 去除特殊字符和表情符号
df['评论'] = df['评论'].apply(lambda x: re.sub(r'[^\w\s]', '', x))
# 去除广告链接
df['评论'] = df['评论'].apply(lambda x: re.sub(r'http\S+|www\S+|https\S+', '', x, flags=re.MULTILINE))
#去除链接
df['评论'] = df['评论'].replace("\r\n", "", regex=True)
# 去除换行空格
print('清洗后的评论：')
print(df['评论'])

# **************************去除相似评论******************************
# 下载停用词
# nltk.download('stopwords')
# nltk.download('punkt')

# col=df['评论']
# print(type(col))



# 假设comments是评论列表
comments = df['评论']
# print(len(comments))

# 读取停用词
with open('date/stopword/cn_stopwords.txt', 'r', encoding='utf-8') as f:
    chinese_stopwords = [line.strip() for line in f]

# 文本预处理
processed_comments = []
for comment in comments:
    words = jieba.cut(comment)  # 使用jieba库进行中文分词
    filtered_words = [word for word in words if word not in chinese_stopwords]#去除停用词
    # filtered_words = words  # 保留所有词
    processed_comments.append(' '.join(filtered_words))
#此处获取的processed_comments是已经去除停用词的评论的列表


# print(processed_comments)
# print(comments)

# 特征提取（TF-IDF）
vectorizer = TfidfVectorizer()#实例化特征提取器对象，用于提取句子的特征
tfidf_matrix = vectorizer.fit_transform(processed_comments)
#这里的tfidf_matrix是一个稀疏矩阵，表示每个文本中每个词的TF-IDF值。
# print(tfidf_matrix)

# 计算相似度
similarity_matrix = cosine_similarity(tfidf_matrix)
# print(cosine_similarities)
#
# 找到重复的评论
threshold = 0.3  # 设置相似度阈值
unique_comments = []
for i in range(len(comments)):
    if all(similarity_matrix[i, j] < threshold for j in range(len(comments)) if j!= i):
        unique_comments.append(comments[i])

# print("去除类似评论后的评论为:",unique_comments)
matched_df = pd.DataFrame()
#创建一个空的DataFrame来存储匹配的评论
# 遍历原始DataFrame的每一行
for index, row in df.iterrows():
    # 检查评论是否在评论列表中
    if row['评论'] in unique_comments:
        # 如果匹配，将该行添加到matched_df中
        matched_df = pd.concat([matched_df, pd.DataFrame([row])], ignore_index=True)
df=matched_df
#覆盖原本df对象，完成去重操作
condition = (df['评论时间'] == '辽宁')
df = df[~condition]
df['地点'].fillna(method='bfill',inplace=True)   # 后填充
df['评论时间'].fillna(method='bfill',inplace=True)   # 后填充
df['商品信息'].fillna(method='bfill',inplace=True)   # 后填充
df["评论时间"]=pd.to_datetime(df["评论时间"],format='%Y-%m-%d')
print(df.head(10))
print(df.info())
df.to_csv('date/京东-麻辣王子辣条评论cled.csv',index=False)