import matplotlib.pyplot as plt
import pandas as pd
import jieba
from wordcloud import WordCloud
from snownlp import SnowNLP
from collections import Counter
import numpy as np
import csv
import os

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


def load_data():
    if not os.path.exists('douban_data/movies.csv') or not os.path.exists('douban_data/all_comments.csv'):
        print("数据文件不存在，请先运行爬虫程序！")
        return None, None

    movies = []
    with open('douban_data/movies.csv', 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            row['rating'] = float(row['rating'])
            if row['year']:
                row['year'] = int(row['year'])
            movies.append(row)

    comments = []
    with open('douban_data/all_comments.csv', 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            comments.append(row)

    print(f"已加载 {len(movies)} 部电影和 {len(comments)} 条评论")
    return movies, comments


def analyze_ratings(movies):
    ratings = [movie['rating'] for movie in movies]

    plt.figure(figsize=(10, 6))
    plt.hist(ratings, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
    plt.title('豆瓣Top100电影评分分布直方图', fontsize=16)
    plt.xlabel('评分', fontsize=12)
    plt.ylabel('电影数量', fontsize=12)
    plt.grid(True, alpha=0.3)

    mean_rating = np.mean(ratings)
    plt.axvline(mean_rating, color='red', linestyle='--', label=f'平均评分: {mean_rating:.2f}')
    plt.legend()

    plt.tight_layout()
    plt.savefig('douban_data/评分分布.png', dpi=300, bbox_inches='tight')
    plt.show()

    print(f"评分统计：平均分 {mean_rating:.2f}，最高分 {max(ratings)}，最低分 {min(ratings)}")


def analyze_years(movies):
    years = [movie['year'] for movie in movies if movie['year'] is not None]

    plt.figure(figsize=(12, 6))
    year_counts = Counter(years)
    sorted_years = sorted(year_counts.items())

    years_list = [item[0] for item in sorted_years]
    counts_list = [item[1] for item in sorted_years]

    plt.bar(years_list, counts_list, alpha=0.7, color='lightcoral')
    plt.title('豆瓣Top100电影年份分布', fontsize=16)
    plt.xlabel('年份', fontsize=12)
    plt.ylabel('电影数量', fontsize=12)
    plt.xticks(rotation=45)
    plt.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig('douban_data/年代分布1.png', dpi=300, bbox_inches='tight')
    plt.show()

    print(f"年份统计：最早 {min(years)}年，最晚 {max(years)}年")


def create_wordcloud(comments):
    all_text = ' '.join([comment['content'] for comment in comments])

    words = jieba.cut(all_text)

    stop_words = {'的', '了', '是', '在', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '还', '这', '电影', '影片', '故事', '剧情', '演员', '导演', '作品', '片子', '这部', "还是",
                  '那个', '这个', '可以', '但是', '如果', '因为', '所以', '虽然', '然后', '或者', '而且', '不过', '只是', '真的', '觉得', '感觉', '什么', '怎么', '为什么', '那么', '这么', '比较', '应该', '可能', '确实', '非常', '特别', '还有', '已经', '一直', '现在', '时候', '开始', '最后', '结果', '如此', '这样', '那样', "不是", "就是", "其实", "有点", "为了"}

    filtered_words = [word for word in words if len(word) > 1 and word not in stop_words]
    text = ' '.join(filtered_words)

    wordcloud = WordCloud(
        font_path='C:/Windows/Fonts/simhei.ttf',
        width=800,
        height=400,
        background_color='white',
        max_words=100,
        colormap='viridis'
    ).generate(text)

    plt.figure(figsize=(12, 6))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.title('短评词云图', fontsize=16)
    plt.axis('off')

    plt.tight_layout()
    plt.savefig('douban_data/词云.png', dpi=300, bbox_inches='tight')
    plt.show()


def sentiment_analysis(comments):
    sentiments = []

    print("正在进行情感分析...")
    for i, comment in enumerate(comments):
        if i % 1000 == 0:
            print(f"已分析 {i}/{len(comments)} 条评论")

        try:
            s = SnowNLP(comment['content'])
            sentiment_score = s.sentiments
            sentiments.append(sentiment_score)
        except:
            sentiments.append(0.5)

    positive = sum(1 for s in sentiments if s > 0.6)
    negative = sum(1 for s in sentiments if s < 0.4)
    neutral = len(sentiments) - positive - negative

    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))

    labels = ['积极', '中性', '消极']
    sizes = [positive, neutral, negative]
    colors = ['#90EE90', '#FFD700', '#FF6B6B']
    ax1.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90)
    ax1.set_title('短评情感分布', fontsize=14)

    ax2.hist(sentiments, bins=30, alpha=0.7, color='skyblue', edgecolor='black')
    ax2.set_title('情感分数分布', fontsize=14)
    ax2.set_xlabel('情感分数 (0=消极, 1=积极)')
    ax2.set_ylabel('评论数量')
    ax2.axvline(np.mean(sentiments), color='red', linestyle='--', label=f'平均分: {np.mean(sentiments):.3f}')
    ax2.legend()

    plt.tight_layout()
    plt.savefig('douban_data/情感分析.png', dpi=300, bbox_inches='tight')
    plt.show()

    print(f"情感分析结果：积极 {positive} 条({positive/len(sentiments)*100:.1f}%)，")


if __name__ == "__main__":
    main()