import pandas as pd
import jieba
from sqlalchemy import create_engine
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import json

# 1. 连接数据库（请修改为你的实际信息）
#engine = create_engine('mysql+pymysql://用户名:密码@localhost:3306/ai_intelligent_teaching_and_training_system?charset=utf8mb4')
engine = create_engine('mysql+pymysql://ltt:200439Litian%40@8.137.70.232:3306/ai_intelligent_teaching_and_training_system?charset=utf8mb4')

# 2. 读取表
exam_questions = pd.read_sql('SELECT * FROM user_answered_questions', engine)
assignment_questions = pd.read_sql('SELECT * FROM assignment_question_bank', engine)
resources = pd.read_sql('SELECT * FROM resource', engine)

# 3. NLP推荐：分数低的题目（考试+作业）
avg_score_exam = exam_questions['score_earned'].mean()
weak_exam_questions = exam_questions[exam_questions['score_earned'] < avg_score_exam]
weak_exam_texts = weak_exam_questions['question_content'].tolist()

# assignment_question_bank表，分数低或未答对的题目
if 'score_earned' in assignment_questions.columns and assignment_questions['score_earned'].notnull().any():
    avg_score_assign = assignment_questions['score_earned'].mean()
    weak_assign_questions = assignment_questions[assignment_questions['score_earned'] < avg_score_assign]
else:
    weak_assign_questions = assignment_questions[assignment_questions['is_correct'] != 1]  # 只要没做对都算薄弱

weak_assign_texts = weak_assign_questions['content'].tolist()

# 合并所有薄弱题目的内容
weak_texts = weak_exam_texts + weak_assign_texts
if not weak_texts:
    print("没有分数低的题目，无法推荐。")
    exit()

resources['full_text'] = resources['title'].fillna('') + ' ' + resources['description'].fillna('')

def jieba_tokenizer(text):
    return list(jieba.cut(text))

all_texts = weak_texts + resources['full_text'].tolist()
vectorizer = TfidfVectorizer(tokenizer=jieba_tokenizer, max_features=1000)
tfidf_matrix = vectorizer.fit_transform(all_texts)
question_vecs = tfidf_matrix[:len(weak_texts)]
resource_vecs = tfidf_matrix[len(weak_texts):]
similarity = cosine_similarity(question_vecs, resource_vecs)
resource_nlp_score = similarity.max(axis=0)
resources['nlp_score'] = resource_nlp_score

# 4. 协同过滤分数：直接用use_number归一化
if resources['use_number'].max() > 0:
    resources['cf_score'] = resources['use_number'] / resources['use_number'].max()
else:
    resources['cf_score'] = 0

# 5. 综合推荐分
resources['final_score'] = resources['nlp_score'] * 0.7 + resources['cf_score'] * 0.3

# 6. 推荐前5个资源
recommend_resources = resources.sort_values('final_score', ascending=False).head(5)

# 7. 输出推荐结果
print('\n为你推荐的资源（NLP+协同过滤）：')
for idx, row in recommend_resources.iterrows():
    print(f"资源ID: {row['resource_id']}, 标题: {row['title']}, NLP分: {row['nlp_score']:.4f}, 协同过滤分: {row['cf_score']:.4f}, 综合分: {row['final_score']:.4f}")

# 8. 保存为json
recommend_data = []
for idx, row in recommend_resources.iterrows():
    recommend_data.append({
        'resource_id': int(row['resource_id']),
        'title': str(row['title']),
        'description': str(row['description']),
        'type': str(row['type']),
        'url': str(row['url']) if pd.notna(row['url']) else '',
        'use_number': int(row['use_number']) if pd.notna(row['use_number']) else 0,
        'nlp_score': float(row['nlp_score']),
        'cf_score': float(row['cf_score']),
        'final_score': float(row['final_score'])
    })

with open('recommend_result.json', 'w', encoding='utf-8') as f:
    json.dump(recommend_data, f, ensure_ascii=False, indent=2)

print(f"\n推荐结果已保存到 recommend_result.json") 