from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import jieba

from Interview.models import Question


class InterviewRecommender:
    def __init__(self, report, user_id):
        self.report = report
        self.user_id = user_id
        self.weaknesses = report.metrics.get('weaknesses', []) if report else []

    def extract_weak_dimensions(self):
        """提取弱点标签，直接返回weaknesses列表"""
        return self.weaknesses

    def recommend_questions(self, queryset):
        """基于TF-IDF和结构化字段推荐题目"""

        questions = list(queryset)
        if not questions or not self.weaknesses:
            return questions  # 返回过滤后的题目或空列表

        # 文本预处理
        question_texts = [f"{q.content} {q.key_points}" for q in questions]
        weakness_text = " ".join(jieba.cut(" ".join(self.weaknesses)))

        # TF-IDF向量化
        vectorizer = TfidfVectorizer()
        tfidf_matrix = vectorizer.fit_transform(question_texts + [weakness_text])
        similarities = cosine_similarity(tfidf_matrix[-1:], tfidf_matrix[:-1])[0]

        # 综合评分
        recommended_questions = []
        for i, question in enumerate(questions):
            score = similarities[i] * 10  # 文本相似度基础分
            # 加权：高难度和应用型题目优先
            if question.difficulty == 'high':
                score += 2
            if question.question_type == 'application':
                score += 1
            recommended_questions.append((question, score))

        # 按分数排序
        recommended_questions.sort(key=lambda x: x[1], reverse=True)
        return [q[0] for q in recommended_questions]
