import os
import json
import torch
import numpy as np
from torch import nn
from torch.utils.data import Dataset, DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
import joblib
import jieba
import pickle

# 定义分词函数，用于TF-IDF向量化器
def jieba_tokenizer(text):
    """
    使用jieba分词器对文本进行分词
    
    Args:
        text: 输入文本
        
    Returns:
        分词结果列表
    """
    return list(jieba.cut(text, cut_all=False))

class TechStackDataset(Dataset):
    def __init__(self, questions, labels, tech_stacks=None):
        """
        技术栈数据集类，用于PyTorch数据加载
        
        Args:
            questions: 问题列表
            labels: 标签列表
            tech_stacks: 技术栈列表
        """
        self.questions = questions
        self.labels = labels
        self.tech_stacks = tech_stacks
        
    def __len__(self):
        """返回数据集大小"""
        return len(self.questions)
    
    def __getitem__(self, idx):
        """获取指定索引的样本"""
        question = self.questions[idx]
        label = self.labels[idx]
        
        # 将标签转换为one-hot编码
        if self.tech_stacks:
            one_hot = np.zeros(len(self.tech_stacks))
            for tech in label:
                if tech in self.tech_stacks:
                    one_hot[self.tech_stacks.index(tech)] = 1
            label = one_hot
        
        return {
            "question": question,
            "labels": label
        }

class TechStackModelTrainer:
    def __init__(self, data_dir="data", model_dir="../../../model/tech_stack_model"):
        """
        技术栈模型训练器
        
        Args:
            data_dir: 数据目录，默认为当前目录下的data
            model_dir: 模型保存目录，默认为项目根目录下的model/tech_stack_model
        """
        self.data_dir = data_dir
        self.model_dir = model_dir
        
        # 确保模型目录存在
        os.makedirs(self.model_dir, exist_ok=True)
        
        # 加载技术栈列表
        tech_stacks_path = os.path.join(data_dir, "tech_stacks.json")
        if os.path.exists(tech_stacks_path):
            with open(tech_stacks_path, "r", encoding="utf-8") as f:
                self.tech_stacks = json.load(f)
        else:
            raise FileNotFoundError(f"技术栈列表文件不存在: {tech_stacks_path}")
        
        # 初始化TF-IDF向量化器，使用jieba分词
        self.vectorizer = TfidfVectorizer(
            analyzer='word',
            tokenizer=jieba_tokenizer,  # 使用自定义分词函数
            token_pattern=None,         # 禁用默认的token模式
            max_features=5000,          # 最大特征数量
            ngram_range=(1, 2)          # 使用1-gram和2-gram
        )
        
        # 初始化模型：使用CalibratedClassifierCV包装LinearSVC以支持概率预测
        self.model = MultiOutputClassifier(CalibratedClassifierCV(LinearSVC(random_state=42, max_iter=10000)))
        
        print(f"技术栈模型训练器已初始化")
        
    def load_data(self):
        """
        加载训练数据和测试数据
        
        Returns:
            训练问题、训练标签、测试问题、测试标签
        """
        # 加载训练数据
        train_path = os.path.join(self.data_dir, "train_data.json")
        if not os.path.exists(train_path):
            raise FileNotFoundError(f"训练数据文件不存在: {train_path}")
        
        with open(train_path, "r", encoding="utf-8") as f:
            train_data = json.load(f)
        
        train_questions = train_data["questions"]
        train_labels = train_data["labels"]
        
        # 加载测试数据
        test_path = os.path.join(self.data_dir, "test_data.json")
        if not os.path.exists(test_path):
            raise FileNotFoundError(f"测试数据文件不存在: {test_path}")
        
        with open(test_path, "r", encoding="utf-8") as f:
            test_data = json.load(f)
        
        test_questions = test_data["questions"]
        test_labels = test_data["labels"]
        
        return train_questions, train_labels, test_questions, test_labels
    
    def train(self, threshold=0.5):
        """
        训练模型
        
        Args:
            threshold: 分类阈值，默认0.5
            
        Returns:
            包含评估指标的字典
        """
        # 加载数据
        train_questions, train_labels, test_questions, test_labels = self.load_data()
        
        # 将标签转换为one-hot编码
        train_labels_one_hot = np.zeros((len(train_labels), len(self.tech_stacks)))
        for i, labels in enumerate(train_labels):
            for label in labels:
                if label in self.tech_stacks:
                    train_labels_one_hot[i, self.tech_stacks.index(label)] = 1
        
        test_labels_one_hot = np.zeros((len(test_labels), len(self.tech_stacks)))
        for i, labels in enumerate(test_labels):
            for label in labels:
                if label in self.tech_stacks:
                    test_labels_one_hot[i, self.tech_stacks.index(label)] = 1
        
        # 特征提取：将文本转换为TF-IDF特征
        print("提取文本特征...")
        X_train = self.vectorizer.fit_transform(train_questions)
        X_test = self.vectorizer.transform(test_questions)
        
        # 训练模型
        print("训练模型...")
        start_time = time.time()
        self.model.fit(X_train, train_labels_one_hot)
        training_time = time.time() - start_time
        print(f"训练完成，耗时: {training_time:.2f}秒")
        
        # 评估模型
        print("评估模型...")
        # 获取每个标签为正类的概率
        y_pred_proba = self.model.predict_proba(X_test)
        # 结果是一个列表，每个元素是(n_samples, 2)的数组，我们取正类(1)的概率
        y_pred_positive_proba = np.array([p[:, 1] for p in y_pred_proba]).T
        
        # 将预测概率根据阈值转换为0/1
        y_pred_binary = (y_pred_positive_proba > threshold).astype(int)
        
        # 计算每个类别的精确率、召回率和F1值
        precision, recall, f1, _ = precision_recall_fscore_support(
            test_labels_one_hot, y_pred_binary, average='samples'
        )
        
        # 计算准确率
        accuracy = accuracy_score(test_labels_one_hot, y_pred_binary)
        
        print(f"准确率: {accuracy:.4f}")
        print(f"精确率: {precision:.4f}")
        print(f"召回率: {recall:.4f}")
        print(f"F1值: {f1:.4f}")
        
        # 保存模型
        self.save_model()
        
        return {
            "accuracy": accuracy,
            "precision": precision,
            "recall": recall,
            "f1": f1,
            "training_time": training_time
        }
    
    def save_model(self):
        """
        保存模型、向量化器和技术栈列表到指定目录
        """
        # 保存模型
        model_path = os.path.join(self.model_dir, "tech_stack_model.pkl")
        joblib.dump(self.model, model_path)
        
        # 保存向量化器
        vectorizer_path = os.path.join(self.model_dir, "vectorizer.pkl")
        joblib.dump(self.vectorizer, vectorizer_path)
        
        # 保存技术栈列表
        tech_stacks_path = os.path.join(self.model_dir, "tech_stacks.json")
        with open(tech_stacks_path, "w", encoding="utf-8") as f:
            json.dump(self.tech_stacks, f, ensure_ascii=False, indent=2)
        
        print(f"模型已保存到 {self.model_dir}")
        print(f"- 模型: {model_path}")
        print(f"- 向量化器: {vectorizer_path}")
        print(f"- 技术栈列表: {tech_stacks_path}")

# 如果直接运行此文件，则训练模型
if __name__ == "__main__":
    trainer = TechStackModelTrainer()
    trainer.train() 