import pandas as pd
from collections import defaultdict
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
df=pd.read_csv('new_data.csv',encoding='gbk')
data1=[]
for i in df.values:
    data1.append([i[1],i[2],i[3],i[4],i[5],i[6],i[7],i[8]])

# 模拟数据
data = data1
columns = ['index', '用户id', '性别', '点赞数', '评论', '回复量', '评论日期', '哪吒关键词出现次数']
df = pd.DataFrame(data, columns=columns)

# 协同过滤推荐
def collaborative_filtering(user_id, n_rec=5):
    user_comments = defaultdict(list)
    for index, row in df.iterrows():
        user_comments[row['用户id']].append(row['评论'])

    user_comment_matrix = pd.DataFrame.from_dict(user_comments, orient='index').T.fillna('')

    vectorizer = TfidfVectorizer()
    comment_vectors = vectorizer.fit_transform(user_comment_matrix.values.flatten())
    comment_vectors = comment_vectors.reshape(len(user_comment_matrix), -1)

    cosine_sim = cosine_similarity(comment_vectors)

    user_index = list(user_comments.keys()).index(user_id)
    sim_scores = list(enumerate(cosine_sim[user_index]))
    sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
    sim_scores = sim_scores[1:]  # 排除用户自己

    similar_users = [list(user_comments.keys())[i[0]] for i in sim_scores[:n_rec]]

    rec_videos = []
    for similar_user in similar_users:
        user_df = df[df['用户id'] == similar_user]
        rec_videos.extend(user_df['评论'].tolist())

    return rec_videos[:n_rec]

# 基于内容的推荐
def content_based_recommendation(user_id, n_rec=5):
    user_comment = df[df['用户id'] == user_id]['评论'].values[0]

    all_comments = df['评论'].tolist()
    all_comments.append(user_comment)

    vectorizer = TfidfVectorizer()
    comment_vectors = vectorizer.fit_transform(all_comments)

    user_vector = comment_vectors[-1]
    other_vectors = comment_vectors[:-1]

    cosine_sim = cosine_similarity(user_vector, other_vectors).flatten()
    sim_scores = list(enumerate(cosine_sim))
    sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
    sim_scores = sim_scores[1:]  # 排除用户自己的评论

    rec_videos = [all_comments[i[0]] for i in sim_scores[:n_rec]]

    return rec_videos

# 混合推荐
def hybrid_recommendation(user_id, n_rec=5):
    cf_rec = collaborative_filtering(user_id, n_rec)
    cb_rec = content_based_recommendation(user_id, n_rec)

    rec_videos = list(set(cf_rec + cb_rec))[:n_rec]
    return rec_videos

# 测试
user_id = 256449071441
print(f"协同过滤推荐结果: {collaborative_filtering(user_id)}")
print(f"基于内容的推荐结果: {content_based_recommendation(user_id)}")
print(f"混合推荐结果: {hybrid_recommendation(user_id)}")

import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
plt.rcParams['font.family'] = ['SimHei']  # 设置字体为黑体
df=pd.read_csv('new_data.csv',encoding='gbk')
# 假设数据存储在一个 DataFrame 中，这里先模拟数据
data = {
    'index': df['index'],
    '用户id': df['用户id'],
    '性别': df['性别'],
    '点赞数': df['点赞数'],
    '评论': df['评论'],
    '回复量': df['回复量'],
    '评论日期': df['评论日期'],
    '哪吒关键词出现次数': df['哪吒关键词出现次数']
}

df = pd.DataFrame(data)

# 用户行为分析：点赞数分布
plt.figure(figsize=(40, 10))
sns.countplot(x='点赞数', data=df)
plt.title('用户点赞数分布')
plt.show()

# 用户行为分析：回复量分布
plt.figure(figsize=(30, 10))
sns.countplot(x='回复量', data=df)
plt.title('用户回复量分布')
plt.show()

# 关键词提取（简单分词统计）
all_words = []
for comment in df['评论']:
    words = comment.split()
    all_words.extend(words)

word_counts = Counter(all_words)
top_words = word_counts.most_common(10)

# 关键词可视化
words, counts = zip(*top_words)
plt.figure(figsize=(10, 6))
sns.barplot(x=counts, y=words)
plt.title('最常见的关键词')
plt.show()

# 性别分布可视化
plt.figure(figsize=(8, 6))
sns.countplot(x='性别', data=df)
plt.title('用户性别分布')
plt.show()


from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType, LongType
from pyspark.sql.functions import col, from_unixtime

# 创建 SparkSession
spark = SparkSession.builder \
    .appName("NezhaCommentAnalysis") \
    .getOrCreate()

# 手动定义 Schema（确保列名正确）
schema = StructType([
    StructField("index", IntegerType(), True),
    StructField("用户id", StringType(), True),
    StructField("性别", StringType(), True),
    StructField("评论时间", LongType(), True),
    StructField("点赞数", IntegerType(), True),
    StructField("评论", StringType(), True),
    StructField("回复量", IntegerType(), True)
])

# 读取 CSV 文件（指定编码和 Schema）
df = spark.read.csv(
    "哪吒去重.csv",
    header=True,
    schema=schema,
    encoding="GBK",  # 根据实际文件编码调整
    escape='"'
)

# # 清洗数据（去重、删除空值）
# df_cleaned = df.dropDuplicates().na.drop()

# # 转换时间戳
# df_transformed = df_cleaned.withColumn(
#     "评论日期",
#     from_unixtime(col("评论时间")).cast("timestamp")
# ).drop("评论时间")
#
# # 查看结果
# df_transformed.printSchema()
# df_transformed.show(5, truncate=False)

# 根据所有字段去重
df_cleaned = df.dropDuplicates()

# 或者根据特定字段去重（例如用户ID和评论时间）
# df_cleaned = df.dropDuplicates(["用户id", "评论时间"])

# 删除包含空值的行（根据需求选择）
df_cleaned = df_cleaned.na.drop()

# 或者填充缺失值（例如填充性别为“未知”）
# df_cleaned = df_cleaned.na.fill({"性别": "未知"})

df_transformed = df_cleaned.withColumn(
    "评论日期",
    from_unixtime(col("评论时间")).cast("timestamp")
).drop("评论时间")

# 查看转换后的时间
df_transformed.select("评论日期").show(5, truncate=False)

from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType

# 定义UDF统计关键词
def count_keyword(comment, keyword):
    return comment.count(keyword) if comment else 0

count_keyword_udf = udf(lambda x: count_keyword(x, "哪吒"), IntegerType())

df_transformed = df_transformed.withColumn(
    "哪吒关键词出现次数",
    count_keyword_udf(col("评论"))
)

# 查看结果
df_transformed.select("评论", "哪吒关键词出现次数").show(5, truncate=False)

gender_stats = df_transformed.groupBy("性别") \
    .count() \
    .withColumnRenamed("count", "评论数量") \
    .orderBy("评论数量", ascending=False)

gender_stats.show()

from pyspark.sql.functions import avg

avg_stats = df_transformed.agg(
    avg("点赞数").alias("平均点赞数"),
    avg("回复量").alias("平均回复量")
)

avg_stats.show()

top_comments = df_transformed.orderBy(col("回复量").desc()) \
    .select("评论", "回复量") \
    .limit(10)

top_comments.show(truncate=False)

df_transformed.write.mode("overwrite") \
    .csv("cleaned_nezhacomments", header=True)

print("=== 性别统计 ===")
gender_stats.show()

print("=== 平均指标 ===")
avg_stats.show()

print("=== 热门评论 ===")
top_comments.show(truncate=False)

# from pyspark.sql import SparkSession
# from pyspark.sql.types import StructType, StructField, IntegerType, StringType, LongType
# from pyspark.sql.functions import col, from_unixtime
#
# # 创建 SparkSession
# spark = SparkSession.builder \
#     .appName("NezhaCommentAnalysis") \
#     .getOrCreate()
#
# # 手动定义 Schema（确保列名正确）
# schema = StructType([
#     StructField("index", IntegerType(), True),
#     StructField("用户id", StringType(), True),
#     StructField("性别", StringType(), True),
#     StructField("评论时间", LongType(), True),
#     StructField("点赞数", IntegerType(), True),
#     StructField("评论", StringType(), True),
#     StructField("回复量", IntegerType(), True)
# ])
#
# # 读取 CSV 文件（指定编码和 Schema）
# df = spark.read.csv(
#     "哪吒去重.csv",
#     header=True,
#     schema=schema,
#     encoding="GBK",  # 根据实际文件编码调整
#     escape='"'
# )
#
# # 清洗数据（去重、删除空值）
# df_cleaned = df.dropDuplicates().na.drop()
#
# # 转换时间戳
# df_transformed = df_cleaned.withColumn(
#     "评论日期",
#     from_unixtime(col("评论时间")).cast("timestamp")
# ).drop("评论时间")
#
# # 查看结果
# df_transformed.printSchema()
# df_transformed.show(5, truncate=False)

import pandas as pd
from snownlp import SnowNLP
import jieba
from collections import Counter
df=pd.read_csv('new_data.csv',encoding='gbk')
# 假设数据存储在一个 DataFrame 中，这里先模拟数据
data = {
    'index': df['index'],
    '用户id': df['用户id'],
    '性别': df['性别'],
    '点赞数': df['点赞数'],
    '评论': df['评论'],
    '回复量': df['回复量'],
    '评论日期':df['评论日期'],
    '哪吒关键词出现次数': df['哪吒关键词出现次数']
}

df = pd.DataFrame(data)

# 情感分析函数
def sentiment_analysis(text):
    s = SnowNLP(text)
    return s.sentiments

# 关键词提取函数
def keyword_extraction(text, topK=5):
    words = jieba.lcut(text)
    word_counts = Counter(words)
    return [word for word, _ in word_counts.most_common(topK)]

# 对每条评论进行情感分析和关键词提取
df['情感得分'] = df['评论'].apply(sentiment_analysis)
df['关键词'] = df['评论'].apply(keyword_extraction)

# 提取所有评论的关键词并统计频率
all_keywords = [keyword for keywords_list in df['关键词'] for keyword in keywords_list]
keyword_counts = Counter(all_keywords)

# 找出热点话题（这里取出现频率最高的5个关键词）
hot_topics = keyword_counts.most_common(5)

# 分析用户情感倾向
positive_count = len(df[df['情感得分'] > 0.5])
negative_count = len(df[df['情感得分'] < 0.5])
neutral_count = len(df[(df['情感得分'] >= 0.5) & (df['情感得分'] <= 0.5)])

# 输出结果
print("每条评论的情感得分和关键词：")
print(df[['评论', '情感得分', '关键词']])
print("\n热点话题：")
print(hot_topics)
print("\n用户情感倾向：")
print(f"积极评论数量: {positive_count}")
print(f"消极评论数量: {negative_count}")
print(f"中立评论数量: {neutral_count}")

import pandas as pd
from textblob import TextBlob
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords

# 1. 读取CSV数据
data = pd.read_csv("new_data.csv",encoding='gbk')

# 2. 情感分析：对评论进行情感分析
def sentiment_analysis(comment):
    analysis = TextBlob(comment)
    polarity = analysis.sentiment.polarity
    if polarity > 0:
        return '积极'
    elif polarity < 0:
        return '消极'
    else:
        return '中立'

data['情感倾向'] = data['评论'].apply(sentiment_analysis)

# 3. 关键词提取：使用TF-IDF提取评论中的关键词
stop_words = stopwords.words('chinese')  # 使用中文停用词
vectorizer = TfidfVectorizer(stop_words=stop_words, max_features=10)  # 提取前10个重要关键词
X = vectorizer.fit_transform(data['评论'])
keywords = vectorizer.get_feature_names_out()

# 4. 主题建模：使用LDA进行主题建模
lda = LatentDirichletAllocation(n_components=3, random_state=42)
lda.fit(X)

# 打印每个主题的前10个关键词
for topic_idx, topic in enumerate(lda.components_):
    print(f"主题 {topic_idx}:")
    print(" ".join([keywords[i] for i in topic.argsort()[:-11:-1]]))

# 5. 用户画像构建：根据用户行为（点赞数、评论、回复量等）构建用户画像
def user_profile(row):
    engagement = row['点赞数'] + row['回复量']  # 计算用户的互动行为（点赞 + 回复量）
    sentiment = row['情感倾向']
    return {
        '互动指数': engagement,
        '情感倾向': sentiment,
        '关注点': '热门话题' if '全明星' in row['评论'] else '其他'
    }

data['用户画像'] = data.apply(user_profile, axis=1)

# 6. 输出分析结果
print("\n用户评论情感分析结果：")
print(data[['用户id', '评论', '情感倾向', '用户画像']])

# 7. 输出每个用户的情感分析和关键词
print("\n每个主题的前10个关键词：")
for topic_idx, topic in enumerate(lda.components_):
    print(f"主题 {topic_idx}:")
    print(" ".join([keywords[i] for i in topic.argsort()[:-11:-1]]))


import json
# 导入数据请求模块
import requests
# 导入csv模块
import csv
# 导入哈希模块
import hashlib
# 导入时间模块
import time
# 导入编码的方法
from urllib.parse import quote

def GetW(wts, NextPage):
    # 进行编码处理
    pagination_str = quote(NextPage)
    """w_rid加密参数"""
    l = [
        "mode=2",
        "oid=710125501",
        f"pagination_str={pagination_str}",
        "plat=1",
        "type=1",
        "web_location=1315875",
        f"wts={wts}"
    ]
    y = '&'.join(l)
    string = y + "ea1db124af3c7062474693fa704f4ff8"
    MD5 = hashlib.md5()
    MD5.update(string.encode('utf-8'))
    w_rid = MD5.hexdigest()
    print(w_rid)
    return w_rid
def GetContent(offset,num):
    d=[]
    """发送请求"""
    # 模拟浏览器
    headers = {
        # user-agent 用户代理, 表示浏览器基本身份信息
        "user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
    }
    # 请求网址
    url = 'https://api.bilibili.com/x/v2/reply/wbi/main'
    pagination_str = '{"offset": %s}'% offset
    # 获取当前时间戳
    wts = int(time.time())
    # 获取加密参数
    w_rid = GetW(wts=wts, NextPage=pagination_str)
    # 查询参数
    data = {
        'oid': '710125501',
        'type': '1',
        'mode': '2',
        'pagination_str': pagination_str,
        'plat': '1',
        'web_location': '1315875',
        'w_rid': w_rid,
        'wts': wts,
    }
    # 发送请求
    response = requests.get(url=url, params=data, headers=headers)
    """获取数据"""
    # 获取响应的json数据
    json_data = response.json()
    """解析数据"""
    # 字典取值, 提取评论信息所在replies列表
    replies = json_data['data']['replies']
    # for循环遍历, 提取列表里面元素
    for index in replies:
        # 提取具体评论数据内容
        dit = {
            '性别': index['member']['sex'],
            '评论': index['content']['message'],
            '点赞数':index['like'],
            '评论时间':index['ctime'],
            '用户id':index['rpid'],
            '回复量':index['rcount']
        }
        """保存数据"""
        print(dit)
        csv_writer.writerow(dit)
    # 获取下一页请求参数
    NextPage = json_data['data']['cursor']['pagination_reply']['next_offset']
    next_offset = json.dumps(NextPage)
    return next_offset

if __name__ == '__main__':
    # 创建csv文件
    f = open(file='哪吒.csv', mode='a', encoding='utf-8-sig', newline='')
    # 字典写入方法
    csv_writer = csv.DictWriter(f, fieldnames=['用户id', '性别', '评论时间','点赞数','评论','回复量'])
    # 写入表头
    csv_writer.writeheader()
    offset = '""'
    for page in range(1, 1000):
        try:
            offset = GetContent(offset=offset, num=page)
        except Exception as e:
            pass
