import torch
import numpy as np
import pandas as pd
import json
from pathlib import Path
import os
import sys
import re
import random
from collections import defaultdict
from transformers import AutoModelForCausalLM, AutoTokenizer
from resources.DKT_model.dkt import DKTForward

# 统一硬件配置
DEVICE = os.getenv("DEVICE", "cuda:2" if torch.cuda.is_available() else "cpu")
print(f"[INFO] 使用硬件: {DEVICE}")

def encode_student_onehot(student_data, max_step=50, num_questions=157):
    student_id = student_data['student_id']
    answer_time = student_data['begin_times'].split(',')[0]
    exercise_ids = student_data['exercise_ids']
    is_corrects = student_data['is_corrects']
    knowledge_points = student_data['knowledge_points']
    mapped_kp_str = student_data['mapped_knowledge_points']
    
    kp_groups = mapped_kp_str.split(';')
    corrects = list(map(int, is_corrects.split(',')))
    
    if len(kp_groups) != len(corrects):
        print("len(kp_groups)",len(kp_groups))
        print("len(corrects)",len(corrects))
        raise ValueError(f"学生 {student_id} 在 {answer_time} 的知识点和正确性长度不一致")
    
    length = len(corrects)
    onehot = np.zeros(shape=(max_step, 2 * num_questions), dtype=np.float32)
    
    start_idx = max(0, length - max_step)
    for i, (kp_group, correct) in enumerate(zip(kp_groups[start_idx:], corrects[start_idx:])):
        if kp_group:
            kp_ids = list(map(int, kp_group.split(',')))
            for kp_id in kp_ids:
                if kp_id >= num_questions:
                    print(f"警告: 学生 {student_id} 在 {answer_time} 的知识点 ID {kp_id} 超出映射范围 {num_questions}，跳过")
                    continue
                index = kp_id if correct > 0 else kp_id + num_questions
                onehot[i][index] = 1
    
    metadata = {
        "student_id": student_id,
        "answer_time": answer_time,
        "exercise_ids": exercise_ids,
        "is_corrects": is_corrects,
        "knowledge_points": knowledge_points,
        "mapped_knowledge_points": mapped_kp_str
    }
    
    return onehot, student_id, answer_time, metadata

def process_student_incorrect(student_data, question_bank_csv_path, max_step=50, num_questions=157):
    
    onehot, student_id, answer_time, metadata = encode_student_onehot(student_data)
    
    exercise_ids = student_data['exercise_ids'].split(',')
    is_corrects = list(map(int, student_data['is_corrects'].split(',')))
    knowledge_points = student_data['knowledge_points'].split(';')
    mapped_kp_str = student_data['mapped_knowledge_points'].split(';')
    
    if len(exercise_ids) != len(is_corrects) or len(is_corrects) != len(knowledge_points) or len(knowledge_points) != len(mapped_kp_str):
        raise ValueError(f"学生 {student_id} 的数据长度不一致")
    
    last_incorrect_index = -1
    last_incorrect_exercise_id = None
    last_incorrect_knowledge_points = None
    last_incorrect_knowledge_ids = None
    
    for i in range(len(is_corrects) - 1, -1, -1):
        if is_corrects[i] == 0:
            last_incorrect_index = i
            last_incorrect_exercise_id = exercise_ids[i]
            last_incorrect_knowledge_points = knowledge_points[i]
            last_incorrect_knowledge_ids = mapped_kp_str[i]
            break
    
    onehot_before_incorrect = np.zeros(shape=(max_step, 2 * num_questions), dtype=np.float32)
    end_idx = last_incorrect_index if last_incorrect_index >= 0 else len(is_corrects)
    kp_groups_before = mapped_kp_str[:end_idx]
    corrects_before = is_corrects[:end_idx]
    
    length_before = len(corrects_before)
    start_idx_before = max(0, length_before - max_step)
    
    if length_before > 0:
        print(f"调试: 错题之前的记录数: {length_before}, 编码从索引 {start_idx_before} 开始")
        for i, (kp_group, correct) in enumerate(zip(kp_groups_before[start_idx_before:], corrects_before[start_idx_before:])):
            if kp_group:
                kp_ids = list(map(int, kp_group.split(',')))
                for kp_id in kp_ids:
                    if kp_id >= num_questions:
                        print(f"警告: 学生 {student_id} 在 {answer_time} 的知识点 ID {kp_id} 超出映射范围 {num_questions}，跳过")
                        continue
                    index = kp_id if correct > 0 else kp_id + num_questions
                    onehot_before_incorrect[i][index] = 1
    else:
        print(f"提示: 学生 {student_id} 的最后一个错题索引为 {last_incorrect_index}，无之前的记录，onehot_before_incorrect 保持全零")
    
    nonzero_indices = np.nonzero(onehot_before_incorrect)
    print(f"调试: onehot_before_incorrect 非零元素位置 (行, 列): {list(zip(nonzero_indices[0], nonzero_indices[1]))}")
    
    question_info = None
    if last_incorrect_exercise_id:
        question_bank_df = pd.read_csv(question_bank_csv_path)
        print(f"调试: 题库 CSV 文件列名: {list(question_bank_df.columns)}")
        
        required_columns = {'题目ID', '题干', '选项', '答案', '难易度'}
        if not required_columns.issubset(question_bank_df.columns):
            missing_columns = required_columns - set(question_bank_df.columns)
            print(f"警告: 题库 CSV 文件缺少必要列: {missing_columns}")
        else:
            question_row = question_bank_df[question_bank_df['题目ID'] == last_incorrect_exercise_id]
            if not question_row.empty:
                question_info = {
                    'stem': question_row['题干'].iloc[0],
                    'options': question_row['选项'].iloc[0],
                    'correct_answer': question_row['答案'].iloc[0],
                    'difficulty': question_row['难易度'].iloc[0]
                }
            else:
                print(f"警告: 题目 ID {last_incorrect_exercise_id} 在题库中未找到")
    
    result = {
        'student_id': student_id,
        'onehot': onehot,
        'onehot_before_incorrect': onehot_before_incorrect,
        'last_incorrect_index': last_incorrect_index,
        'last_incorrect_exercise_id': last_incorrect_exercise_id,
        'last_incorrect_knowledge_points': last_incorrect_knowledge_points,
        'last_incorrect_knowledge_ids': last_incorrect_knowledge_ids,
        'question_info': question_info
    }
    
    return result

def process_student_incorrect_with_rec(student_data, question_bank_csv_path, max_step=50, num_questions=157):
   
    onehot, student_id, answer_time, metadata = encode_student_onehot(student_data)
    
    exercise_ids = student_data['exercise_ids'].split(',')
    is_corrects = list(map(int, student_data['is_corrects'].split(',')))
    knowledge_points = student_data['knowledge_points'].split(';')
    mapped_kp_str = student_data['mapped_knowledge_points'].split(';')
    
    if len(exercise_ids) != len(is_corrects) or len(is_corrects) != len(knowledge_points) or len(knowledge_points) != len(mapped_kp_str):
        raise ValueError(f"学生 {student_id} 的数据长度不一致")
    
    last_incorrect_index = -1
    last_incorrect_exercise_id = None
    last_incorrect_knowledge_points = None
    last_incorrect_knowledge_ids = None
    
    for i in range(len(is_corrects) - 1, -1, -1):
        if is_corrects[i] == 0:
            last_incorrect_index = i
            last_incorrect_exercise_id = exercise_ids[i]
            last_incorrect_knowledge_points = knowledge_points[i]
            last_incorrect_knowledge_ids = mapped_kp_str[i]
            break
    
    onehot_before_incorrect = np.zeros(shape=(max_step, 2 * num_questions), dtype=np.float32)
    end_idx = last_incorrect_index if last_incorrect_index >= 0 else len(is_corrects)
    kp_groups_before = mapped_kp_str[:end_idx]
    corrects_before = is_corrects[:end_idx]
    
    length_before = len(corrects_before)
    start_idx_before = max(0, length_before - max_step)
    
    result = {
        'student_id': student_id,
        'onehot': onehot,
        'onehot_before_incorrect': onehot_before_incorrect,
        'last_incorrect_index': last_incorrect_index,
        'last_incorrect_exercise_id': last_incorrect_exercise_id,
        'last_incorrect_knowledge_points': last_incorrect_knowledge_points,
        'last_incorrect_knowledge_ids': last_incorrect_knowledge_ids
    }
    
    return result

class DKTInference:
    def __init__(self, model_path, num_questions, hidden_dim=128, layer_num=2, dropout=0.2, cell_type='lstm', device=DEVICE):
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"DKT 模型路径 {model_path} 不存在")
        self.device = device
        self.model = DKTForward(
            input_dim=2 * num_questions,
            hidden_dim=hidden_dim,
            layer_num=layer_num,
            output_dim=num_questions,
            dropout=dropout,
            device=self.device,
            cell_type=cell_type
        ).to(self.device)
        
        print(f"加载 DKT 模型从 {model_path}...")
        checkpoint = torch.load(model_path, map_location=self.device)
        if isinstance(checkpoint, dict) and "model_state_dict" in checkpoint:
            print("[INFO] 检测到嵌套 state_dict，加载 model_state_dict")
            state_dict = checkpoint["model_state_dict"]
        else:
            raise ValueError(f"模型文件 {model_path} 格式错误，期望包含 'model_state_dict' 键")
        
        self.model.load_state_dict(state_dict)
        self.model.eval()
        self.num_questions = num_questions
        print("[INFO] DKT 模型加载完成")

    def predict_student(self, student_onehot_seq: np.ndarray):
        if student_onehot_seq.shape[1] != 2 * self.num_questions:
            raise ValueError(f"输入序列维度应为 [seq_len, {2 * self.num_questions}]，实际为 {student_onehot_seq.shape}")
        
        student_tensor = torch.tensor(student_onehot_seq[np.newaxis, ...], dtype=torch.float32).to(self.device)
        
        with torch.no_grad():
            logits, _ = self.model(student_tensor)
        probs = torch.sigmoid(logits[0]).cpu().numpy()
        last_probs = probs[-1]
        # print("预测概率",last_probs)
        return last_probs


def generate_rec_and_probs(student_data, question_bank_csv_path, dkt_model):
    # 处理学生数据，获取 one-hot 编码和错题信息
    # todo
    student_result = process_student_incorrect_with_rec(student_data, question_bank_csv_path)
    student_id = student_result['student_id']
    onehot_before_incorrect = student_result['onehot_before_incorrect']
    last_incorrect_knowledge_points = student_result['last_incorrect_knowledge_points']
    last_incorrect_knowledge_ids = student_result['last_incorrect_knowledge_ids']
    # question_info = student_result['question_info']
    
    # 使用 DKT 模型预测知识点掌握概率
    knowledge_probs = dkt_model.predict_student(onehot_before_incorrect)
    # print(f"[DEBUG] 学生 {student_id} 的 DKT 预测概率 (前5个): {knowledge_probs[:5]}")
    
    # 解析知识点 ID
    mapped_ids = list(map(int, last_incorrect_knowledge_ids.split(',')))
    knowledge_points = last_incorrect_knowledge_points.split(',')
    
    
    # 生成知识点概率字符串（0-based 索引）
    knowledge_prob_str = ""
    for mapped_id, kp in zip(mapped_ids, knowledge_points):
        prob_index = mapped_id  # 直接使用 mapped_id，0-based
        if prob_index >= len(knowledge_probs):
            print(f"[WARNING] 概率索引 {prob_index} 超出 DKT 输出范围 (0-{len(knowledge_probs)-1})")
            continue
        prob = knowledge_probs[prob_index]
        knowledge_prob_str += f"{prob:.2f}；"
        # print(f"[DEBUG] 知识点 {kp} (映射 ID {mapped_id}, 索引 {prob_index}) 概率: {prob:.2f}")
    
    if not knowledge_prob_str:
        print(f"[WARNING] 学生 {student_id} 的错题知识点概率为空")
        return None, None
    
    return knowledge_prob_str[:-1]

def get_avg_prob(x):
    prob_list = re.split(r"[;,，；]", str(x["掌握概率"]))
    prob_floats = [float(p.strip()) for p in prob_list if p.strip()]
    return sum(prob_floats) / len(prob_floats) if prob_floats else 0.0

def calculate_average_probability(prob_str):
    prob_list = re.split(r"[;,，；]", prob_str)
    prob_floats = [float(p.strip()) for p in prob_list if p.strip()]
    if prob_floats:
        avg_prob = sum(prob_floats) / len(prob_floats)
        return avg_prob
    return 0.0  # 如果没有有效的概率，返回 0.0

def recommend_weak_questions(student_data, all_knows_with_prereq, dkt_model, question_bank_csv_path, pre, top_k=5, max_step=50, num_questions=157):
    # 获取完整的 one-hot 编码（历史全部）
    onehot, student_id, _, metadata = encode_student_onehot(student_data, max_step=max_step, num_questions=num_questions)
    
    # 加载知识点映射表
    kp_map_df = pd.read_csv('./resources/knowledge_point_mapping.csv')
    kp2id = dict(zip(kp_map_df["knowledge_point"], kp_map_df["mapped_id"]))
    
    done_exercise_ids = set(student_data["exercise_ids"].split(","))
    
    # 候选题目 = 不在已做列表中的题目
    candidate_df = all_knows_with_prereq[~all_knows_with_prereq["题目ID"].isin(done_exercise_ids)]
    rows = candidate_df if len(candidate_df) <= 200 else candidate_df[:200]
    
    recommendations = []

    if pre:
        # 如果只用all_knows_with_prereq的知识点
        # print("启用 pre=True，只使用知识图谱中的知识点生成推荐")
        knowledge_point_set = set()
        for kp_str in all_knows_with_prereq["知识点"]:
            for kp in re.split(r"[;,，]", str(kp_str)):
                kp = kp.strip()
                if kp:
                    knowledge_point_set.add(kp)
    else:
        # 从学生数据中提取历史知识点
        print("pre=False，使用学生历史 + 知识图谱中的知识点生成推荐")
        knowledge_point_set = set()

        # 学生历史知识点
        for kp_str in student_data["knowledge_points"].split(";"):
            for kp in re.split(r"[;,，]", str(kp_str)):
                kp = kp.strip()
                if kp:
                    knowledge_point_set.add(kp)

        # 题库中所有知识点
        for kp_str in all_knows_with_prereq["知识点"]:
            for kp in re.split(r"[;,，]", str(kp_str)):
                kp = kp.strip()
                if kp:
                    knowledge_point_set.add(kp)

    for _, row in rows.iterrows():
        kp_ids_str = str(row["知识点"])
        question_kps = set(k.strip() for k in re.split(r"[;,，]", kp_ids_str) if k.strip())

        # 若该题目的知识点不在考虑范围内，则跳过
        if not question_kps & knowledge_point_set:
            continue

        exercise_id = row["题目ID"]
        changed_student_data = student_data.copy()

        # 滚动更新 student_data
        exercise_id_list = changed_student_data["exercise_ids"].split(",")
        if len(exercise_id_list) == 0:
            continue
        exercise_id_list = exercise_id_list[1:] + [exercise_id]
        changed_student_data["exercise_ids"] = ",".join(exercise_id_list)

        exercise_correct_list = changed_student_data["is_corrects"].split(",")
        if len(exercise_correct_list) == 0:
            continue
        exercise_correct_list = exercise_correct_list[1:] + ["0"]
        changed_student_data["is_corrects"] = ",".join(exercise_correct_list)

        knowledge_points_list = changed_student_data["knowledge_points"].split(";")
        knowledge_points_list = knowledge_points_list[1:] + [kp_ids_str]
        changed_student_data["knowledge_points"] = ";".join(knowledge_points_list)

        raw_kps = [kp.strip() for kp in re.split(r"[;,，]", kp_ids_str) if kp.strip()]
        mapped_ids = [str(kp2id[kp]) for kp in raw_kps if kp in kp2id]

        if not mapped_ids:
            print("跳过：没有知识点能映射", raw_kps)
            continue

        mapped_kps = changed_student_data["mapped_knowledge_points"].split(";")
        mapped_kps = mapped_kps[1:] + [",".join(mapped_ids)]
        changed_student_data["mapped_knowledge_points"] = ";".join(mapped_kps)

        probs = generate_rec_and_probs(changed_student_data, question_bank_csv_path, dkt_model)

        recommendations.append({
            "题目ID": exercise_id,
            "掌握概率": probs,
            "知识点ID": kp_ids_str,
            "题干": row.get("题干", "无"),
            "难度": row.get("难易度", "无")
        })

    kp_question_pool = {}
    seen_questions = set()

    for rec in recommendations:
        kp_list = re.split(r'[;,，]', rec["知识点ID"])
        for kp in kp_list:
            kp = kp.strip()
            if not kp:
                continue
            kp_question_pool.setdefault(kp, []).append(rec)

    kp_best_question = {}
    for kp, rec_list in kp_question_pool.items():
        rec = random.choice(rec_list)
        if rec["题目ID"] not in seen_questions:
            kp_best_question[kp] = rec
            seen_questions.add(rec["题目ID"])

    sorted_questions = sorted(kp_best_question.values(), key=get_avg_prob)

    return sorted_questions[:top_k]




    

    
    
            
    
    
    