# -*- coding: utf-8 -*-
import os
import requests
from bs4 import BeautifulSoup
import jieba
import jieba.analyse
from snownlp import SnowNLP
import pandas as pd

# 设置Matplotlib后端为Agg（只保存图片不显示）
import matplotlib

matplotlib.use('Agg')  # 添加这行代码在导入plt之前
import matplotlib.pyplot as plt

from wordcloud import WordCloud
import re
from collections import Counter
import seaborn as sns
import numpy as np

# 配置
plt.rcParams['font.sans-serif'] = ['SimHei']  # 显示中文
plt.rcParams['axes.unicode_minus'] = False  # 显示负号


# ====== 确保词典文件存在 ======
def ensure_file_exists(filename, default_content=[]):
    if not os.path.exists(filename):
        with open(filename, 'w', encoding='utf-8') as f:
            for item in default_content:
                f.write(item + '\n')
        print(f"已创建默认文件: {filename}")


# 确保自定义词典存在
ensure_file_exists('user_dict.txt', [
    '周杰伦', '林俊杰', '陈奕迅', '张学友', '王菲',
    '中国风', 'R&B', '流行音乐', '华语乐坛', '歌词',
    '告白气球', '稻香', '夜曲', '旋律', '节奏'
])

# 确保停用词文件存在
ensure_file_exists('stopwords.txt', [
    '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要',
    '去', '你',
    '会', '着', '没有', '看', '好', '自己', '这', '里', '呢', '我们', '你们', '他们', '她', '他', '它', '啊', '哦',
    '嗯', '吧', '呀'
])

# 加载自定义词典
jieba.load_userdict('user_dict.txt')

# 城市和时间关键词库
CITY_KEYWORDS = ['北京', '上海', '广州', '深圳', '台北', '香港', '成都', '南京', '杭州', '重庆', '天津', '武汉', '苏州',
                 '西安', '长沙']
TIME_KEYWORDS = ['时光', '岁月', '昨天', '今天', '明天', '青春', '童年', '未来', '过去', '季节', '春天', '夏天', '秋天',
                 '冬天', '分钟', '小时']


class LyricAnalyzer:
    def __init__(self, artist_name):
        self.artist_name = artist_name
        self.all_lyrics = ""
        self.songs = []

    def crawl_lyrics(self, max_songs=50):
        """使用网易云音乐API爬取歌手歌词数据"""
        # 第一步：搜索歌手ID
        search_url = f"https://music.163.com/api/search/get/web?csrf_token=hlpretag=&hlposttag=&s={self.artist_name}&type=100"
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Referer': 'https://music.163.com/'
            }

            # 搜索歌手
            response = requests.get(search_url, headers=headers)
            result = response.json()

            # 找到歌手ID
            artist_id = None
            for artist in result['result']['artists']:
                if self.artist_name in artist['name']:
                    artist_id = artist['id']
                    break

            if not artist_id:
                print(f"未找到歌手: {self.artist_name}")
                return 0

            # 第二步：获取歌手的所有歌曲
            songs_url = f"https://music.163.com/api/artist/{artist_id}"
            response = requests.get(songs_url, headers=headers)
            artist_data = response.json()

            # 第三步：获取歌词.
            song_count = 0
            for song in artist_data['hotSongs']:
                if song_count >= max_songs:
                    break

                lyric_url = f"https://music.163.com/api/song/lyric?id={song['id']}&lv=1"
                lyric_response = requests.get(lyric_url, headers=headers)
                lyric_data = lyric_response.json()

                if 'lrc' in lyric_data and lyric_data['lrc'].get('lyric'):
                    lyric_text = lyric_data['lrc']['lyric']
                    # 清洗歌词
                    lyric_text = re.sub(r'\[.*?\]', '', lyric_text)  # 移除[时间标签]
                    lyric_text = re.sub(r'\s{2,}', ' ', lyric_text)  # 移除多余空格

                    self.songs.append({
                        'title': song['name'],
                        'lyrics': lyric_text
                    })
                    self.all_lyrics += lyric_text + "\n"
                    song_count += 1
                    print(f"已获取: {song['name']}")

            return song_count

        except Exception as e:
            print(f"爬取失败: {str(e)}")
            return 0

    def analyze_sentiment(self):
        """分析歌词情感"""
        if not self.songs:
            print("请先爬取歌词数据")
            return

        results = []
        for song in self.songs:
            try:
                s = SnowNLP(song['lyrics'])
                sentiment = s.sentiments  # 情感分值(0-1)

                # 情感分类
                if sentiment > 0.65:
                    sentiment_label = '积极'
                elif sentiment < 0.45:
                    sentiment_label = '消极'
                else:
                    sentiment_label = '中性'

                results.append({
                    '歌曲': song['title'],
                    '情感分值': sentiment,
                    '情感分类': sentiment_label
                })
            except:
                continue

        self.sentiment_df = pd.DataFrame(results)
        return self.sentiment_df

    def generate_word_frequency(self, top_n=30):
        """生成词频统计"""
        # 确保停用词文件存在
        if not os.path.exists('stopwords.txt'):
            ensure_file_exists('stopwords.txt', [
                '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到',
                '说', '要', '去', '你',
                '会', '着', '没有', '看', '好', '自己', '这', '里', '呢', '我们', '你们', '他们', '她', '他', '它',
                '啊', '哦', '嗯', '吧', '呀'
            ])

        # 使用jieba分词并过滤停用词
        words = jieba.lcut(self.all_lyrics)
        stopwords = set(open('stopwords.txt', encoding='utf-8').read().splitlines())
        filtered_words = [word for word in words if len(word) > 1 and word not in stopwords]

        # 统计词频
        word_counts = Counter(filtered_words)
        self.top_words = word_counts.most_common(top_n)

        return self.top_words

    def analyze_spatial_temporal(self):
        """分析时空偏好"""
        city_counts = {city: 0 for city in CITY_KEYWORDS}
        time_counts = {time: 0 for time in TIME_KEYWORDS}

        # 统计城市词频
        for city in CITY_KEYWORDS:
            city_counts[city] = self.all_lyrics.count(city)

        # 统计时间词频
        for time_word in TIME_KEYWORDS:
            time_counts[time_word] = self.all_lyrics.count(time_word)

        # 转换为DataFrame
        self.city_df = pd.DataFrame(list(city_counts.items()), columns=['城市', '次数'])
        self.time_df = pd.DataFrame(list(time_counts.items()), columns=['时间词', '次数'])

        return self.city_df, self.time_df

    def visualize_results(self):
        """可视化分析结果"""
        if not hasattr(self, 'sentiment_df') or not hasattr(self, 'top_words'):
            print("请先进行分析")
            return

        plt.figure(figsize=(18, 12))

        # 1. 情感分布饼图
        plt.subplot(2, 2, 1)
        sentiment_counts = self.sentiment_df['情感分类'].value_counts()
        plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct='%1.1f%%',
                colors=['#66b3ff', '#ff9999', '#99ff99'], startangle=90)
        plt.title(f'{self.artist_name}歌词情感分布')

        # 2. 高频词词云
        plt.subplot(2, 2, 2)
        word_freq = dict(self.top_words)
        wc = WordCloud(
            font_path='simhei.ttf',
            background_color='white',
            max_words=100,
            width=600,
            height=400
        ).generate_from_frequencies(word_freq)
        plt.imshow(wc, interpolation='bilinear')
        plt.axis('off')
        plt.title(f'{self.artist_name}歌词高频词云')

        # 3. 城市偏好柱状图
        plt.subplot(2, 2, 3)
        city_df_sorted = self.city_df.sort_values('次数', ascending=False).head(10)
        # 修复Seaborn警告
        sns.barplot(x='城市', y='次数', data=city_df_sorted, hue='城市', palette='viridis', legend=False)
        plt.title(f'{self.artist_name}歌词中的城市提及次数')
        plt.xticks(rotation=45)

        # 4. 时间偏好柱状图
        plt.subplot(2, 2, 4)
        time_df_sorted = self.time_df.sort_values('次数', ascending=False).head(10)
        # 修复Seaborn警告
        sns.barplot(x='时间词', y='次数', data=time_df_sorted, hue='时间词', palette='magma', legend=False)
        plt.title(f'{self.artist_name}歌词中的时间概念提及')
        plt.xticks(rotation=45)

        plt.tight_layout()
        plt.savefig(f'{self.artist_name}_歌词分析.png', dpi=300)

        # 情感分布趋势图
        plt.figure(figsize=(12, 6))
        self.sentiment_df['情感分值'].plot(kind='line', marker='o', alpha=0.7)
        plt.axhline(y=0.5, color='r', linestyle='--')
        plt.title(f'{self.artist_name}歌曲情感趋势')
        plt.ylabel('情感分值')
        plt.xlabel('歌曲序列')
        plt.grid(True)
        plt.savefig(f'{self.artist_name}_情感趋势.png', dpi=300)

        print(f"可视化结果已保存为图片文件")

    def full_analysis(self, max_songs=30):
        """完整分析流程"""
        print(f"开始分析歌手: {self.artist_name}")
        song_count = self.crawl_lyrics(max_songs)
        print(f"成功获取 {song_count} 首歌曲歌词")

        if song_count > 0:
            self.analyze_sentiment()
            self.generate_word_frequency()
            self.analyze_spatial_temporal()
            self.visualize_results()

            # 保存结果
            self.sentiment_df.to_csv(f'{self.artist_name}_情感分析.csv', index=False, encoding='utf-8-sig')
            pd.DataFrame(self.top_words, columns=['词语', '频次']).to_csv(f'{self.artist_name}_词频统计.csv',
                                                                          index=False, encoding='utf-8-sig')
            self.city_df.to_csv(f'{self.artist_name}_城市偏好.csv', index=False, encoding='utf-8-sig')
            self.time_df.to_csv(f'{self.artist_name}_时间偏好.csv', index=False, encoding='utf-8-sig')

            print(f"分析完成! 结果已保存到文件")
        else:
            print("未获取到歌词数据，分析终止")


# ===================== 使用示例 =====================
if __name__ == "__main__":
    # 分析周杰伦的歌词
    analyzer = LyricAnalyzer("周杰伦")
    analyzer.full_analysis(max_songs=20)

    # 查看分析结果
    if hasattr(analyzer, 'sentiment_df'):
        print("\n情感分析样例:")
        print(analyzer.sentiment_df.head())

    if hasattr(analyzer, 'top_words'):
        print("\n高频词汇:")
        print(analyzer.top_words[:10])

    if hasattr(analyzer, 'city_df'):
        print("\n城市偏好:")
        print(analyzer.city_df.sort_values('次数', ascending=False).head())

    if hasattr(analyzer, 'time_df'):
        print("\n时间偏好:")
        print(analyzer.time_df.sort_values('次数', ascending=False).head())