import requests
from bs4 import BeautifulSoup
import re
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

# 假设我们有一个函数可以从电商平台获取评论数据
def fetch_comments(url):
    response = requests.get(url)
    soup = BeautifulSoup(response.text, 'html.parser')
    comments = []
    for comment_elem in soup.find_all('div', class_='comment'):
        comment_info = {}
        comment_info['commenter'] = comment_elem.find('span', class_='commenter').text
        comment_info['content'] = comment_elem.find('p').text
        comment_info['time'] = comment_elem.find('span', class_='time').text
        comment_info['product'] = comment_elem.find('span', class_='product').text
        comments.append(comment_info)
    return comments

# 数据预处理
def preprocess_comments(comments):
    processed_comments = []
    for comment in comments:
        # 去除HTML标签
        comment['content'] = re.sub('<.*?>', '', comment['content'])
        # 去除特殊字符和表情符号
        comment['content'] = re.sub(r'[^\w\s]', '', comment['content'])
        # 去除广告链接
        comment['content'] = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\$ \ $,]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', comment['content'])
        # 去除多余的空格
        comment['content'] = ' '.join(comment['content'].split())
        processed_comments.append(comment)
    return processed_comments

# 重复评论识别与删除
def remove_duplicates(comments):
    # 将评论内容转换为TF-IDF向量
    vectorizer = TfidfVectorizer()
    tfidf_matrix = vectorizer.fit_transform([comment['content'] for comment in comments])
    # 计算评论之间的余弦相似度
    similarity_matrix = cosine_similarity(tfidf_matrix)
    # 设定相似度阈值，这里假设为0.9
    threshold = 0.9
    # 找出重复的评论
    duplicates = set()
    for i in range(len(similarity_matrix)):
        for j in range(i+1, len(similarity_matrix)):
            if similarity_matrix[i][j] > threshold:
                duplicates.add(j)
    # 删除重复的评论
    unique_comments = [comment for i, comment in enumerate(comments) if i not in duplicates]
    return unique_comments

# 数据整理与存储
def store_comments(comments, db_path, csv_path):
    # 将评论数据转换为DataFrame
    df = pd.DataFrame(comments)
    # 存储到数据库
    df.to_sql('comments', con=db_path, if_exists='replace')
    # 存储到CSV文件
    df.to_csv(csv_path, index=False)

# 主函数
def main():
    url = 'http://example.com/product/comments'
    comments = fetch_comments(url)
    processed_comments = preprocess_comments(comments)
    unique_comments = remove_duplicates(processed_comments)
    store_comments(unique_comments, 'sqlite:///comments.db', 'comments.csv')

if __name__ == '__main__':
    main()