import pandas as pd
import re
import pymysql
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import pairwise_distances
# from kafka import KafkaConsumer, KafkaProducer
# import json
import numpy as np
# from course.mongo_models import UserCourseView
from mongoengine import connect

con = pymysql.connect(host='49.234.190.148', user='root', password='ExcZCP8deDx5XjLb', database='course_planning', charset='utf8mb4')


# 预处理试卷
def processing_exam():
    sql = "select exam.exam_id, exam_topic.Bquestion_content from exam left join exam_topic on exam.exam_id = exam_topic.exam_id"
    exam = pd.read_sql(sql, con)
    print(exam.head())
    # 文本分词
    text_cut = []  # 先定义一个空列表，之后把分好的词装入这个空列表中
    for text in exam.Bquestion_content:
        text = re.sub(r'[^\w\s]', '', text)  # 去除标点符号
        result = " ".join(jieba.cut(text))
        text_cut.append(result)
    tfidf = TfidfVectorizer()  # 实例化一个转换器类
    tfidf_matrix = tfidf.fit_transform(text_cut)  # 将文档转换为文档 - 词矩阵。返回稀疏矩阵，即，Tf-idf加权文档矩阵
    print(tfidf_matrix.shape)
    cosine_sim_exam = 1 - pairwise_distances(tfidf_matrix, metric='cosine')
    return cosine_sim_exam, exam


# 预处理作业
def processing_work():
    sql = "select work.work_id, work_topic.Bquestion_content from work left join work_topic on work.work_id = work_topic.work_id"
    work = pd.read_sql(sql, con)
    print(work.head())
    # 文本分词
    text_cut = []  # 先定义一个空列表，之后把分好的词装入这个空列表中
    for text in work.Bquestion_content:
        text = re.sub(r'[^\w\s]', '', text)  # 去除标点符号
        result = " ".join(jieba.cut(text))
        text_cut.append(result)
    tfidf = TfidfVectorizer()  # 实例化一个转换器类
    tfidf_matrix = tfidf.fit_transform(text_cut)  # 将文档转换为文档 - 词矩阵。返回稀疏矩阵，即，Tf-idf加权文档矩阵
    print(tfidf_matrix.shape)
    cosine_sim_work = 1 - pairwise_distances(tfidf_matrix, metric='cosine')
    return cosine_sim_work, work


connect('course_planning', host='49.234.190.148', port=27019)  # 连接MongoDB数据库(再写一次是因为之前的被覆盖掉了)


# def content_based_recommendation(exam_name: str, cosine_sim=cosine_sim):
#     try:
#         # 找到输入课程的索引
#         idx = exam[exam['exam_name'] == exam_name].index[0]
#         # 获取该课程的相似性分数
#         sim_scores = list(enumerate(cosine_sim[idx]))
#         # 根据相似性分数对课程进行排序
#         sim_scores = sorted(sim_scores, key=lambda x: x[1])
#         # 获取推荐前4个推荐课程的索引
#         sim_scores = sim_scores[1:5]
#         food_indices = [i[0] for i in sim_scores]
#         # 返回推荐课程信息
#         return exam['exam_name'].iloc[food_indices]
#     except Exception as e:
#         print(e)


# producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
#                          value_serializer=lambda m: json.dumps(m).encode('utf-8'),
#                          api_version=(0, 10, 2))

# producer = KafkaProducer(bootstrap_servers=['localhost:9092'],  # kafka集群地址（现在这个为在本地运行的地址）
#                          value_serializer=lambda m: json.dumps(m).encode('utf-8'),  # 指定值的反序列化方法
#                          api_version=(0, 10, 2)  # 必要参数，没有会报错
#                          )
#
#
# # kafka生产者接收用户行为数据发至主题
# def send_to_kafka(data):
#     try:
#         producer.send('user_behavior', value=data)
#     except Exception as e:
#         print(f"An error occurred while sending message to Kafka: {e}")
#
#
# # Kafka消费者处理实时数据
#     consumer = KafkaConsumer(
#         'user_behavior',
#         bootstrap_servers=['localhost:9092'],  # Kafka集群的地址，如果Kafka运行在本地，默认地址是localhost:9092
#         auto_offset_reset='latest',  # 从最新的消息开始消费
#         enable_auto_commit=True,  # 自动提交偏移量
#         value_deserializer=lambda x: json.loads(x.decode('utf-8')),  # 指定值的反序列化方法
#         api_version=(0, 10, 2),
#     )
#     # 循环消费消息
#     for message in consumer:
#         # 获取消息内容
#         behavior_data = message.value
#         print(f'Received message: {behavior_data}')
#         # 处理实时用户行为数据（将其存储到数据库、进行分析、实时推荐）
#         # 存储到MongoDB数据库
#         save_to_database(behavior_data)
#         # # 推荐
#         # user_item_matrix = time_decay_collaborative_filtering(behavior_data)
#         # user_id = behavior_data.get('user_id', None)
#         # recommendations = recommend(user_id, user_item_matrix)
#         # # 新建一个“recommendResult”主题，把推荐结果发至该主题，后在API接口中监听该主题给前端调用
#         # producer.send('recommendResult', value=json.dumps(recommendations).encode('utf-8'))


# 基于时间衰减的协同过滤模型
def time_decay_collaborative_filtering(behavior_data, item_id, decay_factor=0.5):
    try:
        # 将JSON数据转换为DataFrame
        behavior_df = pd.DataFrame(behavior_data)
        # 对行为数据按照浏览时间进行排序
        behavior_df.sort_values(by='view_time', ascending=False, inplace=True)
        # 找出所有不同的用户和项目
        unique_users = behavior_df['user_id'].unique()
        unique_items = behavior_df[item_id].unique()
        # 创建一个空的用户-项目矩阵
        user_item_matrix = pd.DataFrame(index=unique_users, columns=unique_items)
        user_item_matrix.fillna(0, inplace=True)
        # 对于每个用户，计算其对每个项目的权重
        for user_id in unique_users:
            if user_id:
                user_actions = behavior_df[behavior_df['user_id'] == user_id]
                max_view_time = user_actions['view_time'].max()  # 计算单个用户的最大浏览时间
                for _, action in user_actions.iterrows():
                    current_item_id = action[item_id]
                    browse_time = action['view_time']
                    time_decay_weight = np.exp(decay_factor * (browse_time - max_view_time))
                    user_item_matrix.at[user_id, current_item_id] += time_decay_weight
        return user_item_matrix
    except Exception as e:
        print(e)


# 推荐试卷
def recommend_exam(user_id, user_item_matrix, top_n=5):
    try:
        cosine_sim, exam_data = processing_exam()
        print(user_item_matrix)
        user_scores = user_item_matrix.loc[user_id]
        user_prediction = user_scores.sort_values(ascending=False)
        # 找评分最高的一个
        top_item = user_prediction.idxmax()
        idx = exam_data[exam_data['exam_id'] == top_item].index[0]
        # 计算当前项目的相似性分数
        sim_scores = list(enumerate(cosine_sim[idx]))
        sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
        sim_scores = sim_scores[1:top_n + 1]  # 获取前N个相似度高的项目
        print(sim_scores)
        recommended_exams = [exam_data['exam_id'].iloc[i] for i, _ in sim_scores]
        print(recommended_exams)
        return recommended_exams
    except Exception as e:
        print(e)


# 推荐作业
def recommend_work(user_id, user_item_matrix, top_n=5):
    try:
        cosine_sim, work_data = processing_work()
        print(user_item_matrix)
        user_scores = user_item_matrix.loc[user_id]
        user_prediction = user_scores.sort_values(ascending=False)
        # 找评分最高的一个
        top_item = user_prediction.idxmax()
        idx = work_data[work_data['work_id'] == top_item].index[0]
        # 计算当前项目的相似性分数
        sim_scores = list(enumerate(cosine_sim[idx]))
        sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
        sim_scores = sim_scores[1:top_n + 1]  # 获取前N个相似度高的项目
        print(sim_scores)
        recommended_works = [work_data['work_id'].iloc[i] for i, _ in sim_scores]
        print(recommended_works)
        return recommended_works
    except Exception as e:
        print(e)
