import re  # 正则表达式提取文本
import requests  # 爬虫发送请求
from bs4 import BeautifulSoup as BS  # 爬虫解析页面
import time
from snownlp import SnowNLP
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from wordcloud import WordCloud
from PIL import Image
import os
from collections import Counter
import jieba.analyse
headers = {
        'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36 Core/1.94.225.400 QQBrowser/12.2.5544.400" }
v_url='https://api.bilibili.com/x/player/pagelist?bvid='
bv='1YY41157dk'
v_url="https://www.bilibili.com/video/BV1YY41157dk/?vd_source=dab90677ef9cae3f045de5586075c9dc"
r1 = requests.get(
    url='https://api.bilibili.com/x/player/pagelist?bvid='+bv, headers=headers)
html1 = r1.json()
# 确保返回的数据正确
if html1['code'] != 0:
    raise Exception("Failed to get CID")
cid = html1['data'][0]['cid']  # 获取视频对应的cid号
print('该视频的cid是:', cid)
danmu_url = 'http://comment.bilibili.com/{}.xml'.format(cid)  # 弹幕地址
print('弹幕地址是：', danmu_url)
r2 = requests.get(danmu_url,headers=headers)
print(r2.status_code)
print(r2.apparent_encoding)  #查看源网页编码
html2=r2.content.decode('utf-8')
soup = BS(html2, 'xml')
danmu_list = soup.find_all('d')
print('共爬取到{}条弹幕'.format(len(danmu_list)))
video_url_list = []  # 视频地址
danmu_url_list = []  # 弹幕地址
time_list = []  # 弹幕时间
text_list = []  # 弹幕内容
for d in danmu_list:
    data_split = d['p'].split(',')  # 按逗号分隔
    temp_time = time.localtime(int(data_split[4]))  # 转换时间格式
    danmu_time = time.strftime("%Y-%m-%d %H:%M:%S", temp_time)
    video_url_list.append(v_url)
    danmu_url_list.append(danmu_url)
    time_list.append(danmu_time)
    text_list.append(d.text)
    print('{}:{}'.format(danmu_time, d.text))
 # 保存数据到DataFrame
df = pd.DataFrame({
        '视频地址': video_url_list,
        '弹幕地址': danmu_url_list,
        '弹幕时间': time_list,
        '弹幕内容': text_list
    })

v_result_file = 'bilibili_dfanmu.csv'
if os.path.exists(v_result_file):  # 如果文件存在，不需写入字段标题
    header = None
else:  # 如果文件不存在，说明是第一次新建文件，需写入字段标题
    header = ['视频地址', '弹幕地址', '弹幕时间', '弹幕内容']
df.to_csv(v_result_file, encoding='utf_8_sig', mode='a+', index=False, header=header)  # 数据保存到csv文件
print("Data has been saved to", v_result_file)
# 情感分析打标
def sentiment_analyse(v_cmt_list):
    """
	情感分析打分
	:param v_cmt_list: 需要处理的评论列表
	:return:
	"""
    score_list = []  # 情感评分值
    tag_list = []  # 打标分类结果
    pos_count = 0  # 计数器-积极
    neg_count = 0  # 计数器-消极
    for comment in v_cmt_list:
        tag = ''
        sentiments_score = SnowNLP(comment).sentiments
        if sentiments_score < 0.3:
            tag = '消极'
            neg_count += 1
        else:
            tag = '积极'
            pos_count += 1
        score_list.append(sentiments_score)  # 得分值
        tag_list.append(tag)  # 判定结果
    print('积极评价占比：', round(pos_count / (pos_count + neg_count), 4))
    print('消极评价占比：', round(neg_count / (pos_count + neg_count), 4))
    df['情感得分'] = score_list
    df['分析结果'] = tag_list
    # 把情感分析结果保存到excel文件
    df.to_excel('谷爱凌_情感评分结果.xlsx', index=None)
    print('情感分析结果已生成：谷爱凌_情感评分结果.xlsx')
sentiment_analyse(text_list)
#画出占比饼状图
plt.rcParams['font.sans-serif'] = ['SimHei']  # 指定字体为SimHei
grp = df['分析结果'].value_counts()
print('正负面评论统计：')
print(grp)
grp.plot.pie(y='分析结果', autopct='%.2f%%')  # 画饼图
plt.title('谷爱凌弹幕_情感分布占比图')
plt.savefig('谷爱凌弹幕_情感分布占比图.png')  # 保存图片

# 用jieba统计弹幕中的top10高频词
file_path = '谷爱凌_情感评分结果.xlsx'
data = pd.read_excel(file_path, sheet_name='Sheet1')
danmu_text = ' '.join(data['弹幕内容'].astype(str).tolist())#将所有弹幕内容合并成一个长字符串
keywords_top10 = jieba.analyse.extract_tags(danmu_text, withWeight=True, topK=10)#提取前 10 个关键词及其权重
print('top10关键词及权重：')
for keyword, weight in keywords_top10:
    print(f'{keyword}: {weight}')
def make_wordcloud(v_str, v_outfile):
    """
	绘制词云图
	:param v_str: 输入字符串
	:param v_stopwords: 停用词
	:param v_outfile: 输出文件
	:return: None
	"""
    print('开始生成词云图：{}'.format(v_outfile))
    try:
        v_stopwords = open("tingyongciku.txt",encoding="utf8").read().split("\n")
        backgroud_Image = np.array(Image.open('guailing1.jpg'))  # 读取背景图片
        wc = WordCloud(
			background_color="white",  # 背景颜色
			width=1500,  # 图宽
			height=1200,  # 图高
			max_words=1000,  # 最多字数
			font_path="C:\Windows\Fonts\simhei.ttf", # 字体文件路径
			stopwords=v_stopwords,  # 停用词
			mask=backgroud_Image,  # 背景图片
		)
        jieba_text = " ".join(jieba.lcut(v_str))  # jieba分词
        wc.generate_from_text(jieba_text)  # 生成词云图
        wc.to_file(v_outfile)  # 保存图片文件
        print('词云文件保存成功：{}'.format(v_outfile))
    except Exception as e:
        print('make_wordcloud except: {}'.format(str(e)))
make_wordcloud(v_str=danmu_text, v_outfile='谷爱凌弹幕词云图1.png')
#不同时间段弹幕数量统计
# 读取数据
data['弹幕时间'] = pd.to_datetime(data['弹幕时间'])
data.set_index('弹幕时间', inplace=True)
#以季度为时间间隔
danmu_count_by_quarter = data.resample('Q').size()
# 绘制柱状图
plt.figure(figsize=(10, 6))
danmu_count_by_quarter.plot(kind='bar', color='skyblue')
plt.title('不同时间段弹幕数量统计')
plt.xlabel('时间')
plt.ylabel('弹幕数量')
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()
