import nltk,string,jieba,re,threading
from nltk.corpus import stopwords
from wordcloud import WordCloud
import pandas as pd
from bs4 import BeautifulSoup
from collections import Counter
import matplotlib.pyplot as plt
from textblob import TextBlob
from translate import Translator
# 数据读取、清洗
def data_clearing():
    df = pd.read_csv('spider_db.csv')
    # 1.删除'评论内容'这一列有空格的行
    df.dropna(subset=['评论内容'])

    # 2.格式转换astype
    # print(df.info())    # 查看数据信息
    df['评论内容'] = df['评论内容'].astype('str') # 将评论内容的object类型改为string类型

    # 3.删除重复行
    # x=df.duplicated(subset=['评论内容'],keep=False) # 查找重复的行，这里指定了只查”评论内容“这一列
    df.drop_duplicates(subset=['评论内容'],keep='first',inplace=True) # 删除重复的行，保留第一个重复数据，这里指定了只查”评论内容“这一列
    df.reset_index(drop=True)   # 删掉之后索引会空缺，所以重置一下索引

    # 4.数据映射
    # x = df['评论内容'].unique() # 输出'评论内容'这一列的内容
    # # 建立map映射，处理一些张数据，如将m替换为map等

    # 5.字符串拆分
    def clean_html(text):
        soup = BeautifulSoup(text, 'html.parser')
        return soup.get_text()
    df['评论内容'] = df['评论内容'].apply(clean_html)
    # 保存处理后的数据到新的CSV文件
    # df.to_csv('cleaned_data.csv', index=False)
    clear_data = df['评论内容'].to_string()
    # clear_data = df['评论内容'].head(5).to_string()
    # print(clear_data)
    return df['评论内容'].to_string()
    # return df['评论地点'].to_string()
# 词频分析
def word_frequency_analysis(data):
    # 设置中文字体
    plt.rcParams['axes.unicode'] = False  # 解决保存图像时负号'-'显示为方块的问题
    # 分词，使用全模式
    # tokens = jieba.lcut(''.join(data), cut_all=True) # 分词会很细
    tokens = jieba.lcut(''.join(data))
    
    remove_chars = set(string.punctuation)  # 从string模块中获取标点符号集合
    remove_chars.update(string.digits)  # 添加数字字符集合
    # 移除停用词和特定字符
    tokens = [word for word in tokens if word not in stop_words and word.strip() and word not in remove_chars and len(word) > 1 and len(word) < 3]
    # print(tokens)
    # 统计词频
    word_freq = Counter(tokens)
    # 获取前10个最频繁的词
    top_words = word_freq.most_common(100)
    print(top_words)
    
    # 绘图
    words, frequencies = zip(*top_words)
    plt.figure(figsize=(10, 5))
    plt.bar(words, frequencies)
    plt.xlabel('Words')
    plt.ylabel('Frequency')
    plt.xticks(rotation=45)
    plt.show()
# 词云分析
def draw_wordcloud(data):
    # 将文本数据转换为字符串形式
    text = ' '.join(data)
    # 创建词云对象
    wordcloud = WordCloud(font_path='msyh.ttc', background_color='white', width=800, height=600, max_words=200).generate(text)
    # 绘制词云
    plt.figure(figsize=(10, 8))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis()  # 关闭坐标轴
    plt.show()

# 翻译函数
def translate_to_english(text):
    # 创建翻译器对象
    translator = Translator(from_lang="ZH",to_lang="EN")
    # 将文本翻译成英文
    translated_text = translator.translate(text)
    # 返回翻译结果
    return translated_text
# 情感分析
def analyze_sentiment(data):
    # 将数据分割成单独的评论
    comments = data.split('\n')
    # print(comments)
    # 将每条评论进行情感分析，并存储情感极性
    sentiments = []
    for text in comments:
        try:
            text = translate_to_english(text)
            blob = TextBlob
            print(blob)
            sentiment = blob.sentiment.polarity
            print(sentiment)
            sentiments.append(sentiment)
        except Exception as e:
            print("发生错误:", e)
        # break
    # 统计情感极性的分布
    positive_count = sum(1 for sentiment in sentiments if sentiment > 0)
    negative_count = sum(1 for sentiment in sentiments if sentiment < 0)
    neutral_count = sum(1 for sentiment in sentiments if sentiment == 0)

    total_count = len(sentiments)

    # 计算情感极性的占比
    if total_count == 0:
        positive_percentage = 0
        negative_percentage = 0
        neutral_percentage = 0
    else:
        positive_percentage = (positive_count / total_count) * 100
        negative_percentage = (negative_count / total_count) * 100
        neutral_percentage = (neutral_count / total_count) * 100

    # 输出情感极性统计结果
    print("情感分析结果：")
    print(f"积极情绪条数：{positive_count}，占比：{positive_percentage:.2f}%")
    print(f"消极情绪条数：{negative_count}，占比：{negative_percentage:.2f}%")
    print(f"中立情绪条数：{neutral_count}，占比：{neutral_percentage:.2f}%")

    # 返回情感极性统计结果
    return sentiments


if __name__ == '__main__':
    # 下载停用词资源
    nltk.download('stopwords')
    # 读取清洗后的数据
    clear_data = data_clearing()
    print(clear_data)
    # 进行词频分析
    word_frequency_analysis(clear_data)
    # 进行词云分析
    # draw_wordcloud(clear_data)
    # 进行情感倾向分析
    # sentiments = analyze_sentiment(clear_data)
