import pandas as pd
import joblib
from sklearn.preprocessing import OneHotEncoder, MultiLabelBinarizer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
import numpy as np

# 自定义多标签技能转换器
class SkillMultiLabelEncoder:
    def __init__(self):
        self.mlb = MultiLabelBinarizer()
        self.all_skills = None
    
    def fit(self, X, y=None):
        # 将|分隔的技能字符串转换为列表
        skills_list = [skills.split('|') for skills in X['skills']]
        self.mlb.fit(skills_list)
        self.all_skills = self.mlb.classes_
        return self
    
    def transform(self, X):
        # 转换技能数据
        skills_list = [skills.split('|') for skills in X['skills']]
        # 确保所有技能都在训练集中出现过，如果没有则忽略
        filtered_skills = []
        for skills in skills_list:
            filtered = [skill for skill in skills if skill in self.all_skills]
            # 如果所有技能都被过滤掉，保持原样以触发warning
            if not filtered:
                filtered = skills
            filtered_skills.append(filtered)
        skills_encoded = self.mlb.transform(filtered_skills)
        # 创建新的DataFrame
        skills_df = pd.DataFrame(skills_encoded, columns=self.all_skills, index=X.index)
        # 合并其他特征
        other_features = X.drop('skills', axis=1)
        return pd.concat([other_features, skills_df], axis=1)
    
    def fit_transform(self, X, y=None):
        self.fit(X)
        return self.transform(X)

# 自定义多标签经验转换器
class ExperienceMultiLabelEncoder:
    def __init__(self):
        self.mlb = MultiLabelBinarizer()
        self.all_experiences = None
    
    def fit(self, X, y=None):
        # 将|分隔的经验字符串转换为列表
        experience_list = [exp.split('|') for exp in X['experience']]
        self.mlb.fit(experience_list)
        self.all_experiences = self.mlb.classes_
        return self
    
    def transform(self, X):
        # 转换经验数据
        experience_list = [exp.split('|') for exp in X['experience']]
        # 确保所有经验都在训练集中出现过，如果没有则忽略
        filtered_experiences = []
        for exp in experience_list:
            filtered = [e for e in exp if e in self.all_experiences]
            # 如果所有经验都被过滤掉，保持原样以触发warning
            if not filtered:
                filtered = exp
            filtered_experiences.append(filtered)
        experience_encoded = self.mlb.transform(filtered_experiences)
        # 创建新的DataFrame
        exp_df = pd.DataFrame(experience_encoded, columns=self.all_experiences, index=X.index)
        # 合并其他特征
        other_features = X.drop('experience', axis=1)
        return pd.concat([other_features, exp_df], axis=1)
    
    def fit_transform(self, X, y=None):
        self.fit(X)
        return self.transform(X)

# 训练并保存模型
def train_and_save_model():
    # 加载数据
    # 跳过第一行数据
    data = pd.read_csv(r'd:\AOverSchool\pythonScript\recommendation-algorithm\student_job_data.csv', names=['skills', 'education', 'target', 'major', 'experience', 'job'], skiprows=1)
    # 将技能列中的|替换为空格（如果有的话），确保统一格式
    data['skills'] = data['skills'].str.replace('|', '|')
    
    # 检查job列的唯一值
    print(f"job列的唯一值: {data['job'].unique()}")
    
    # 打印前几行数据
    print("前5行数据:")
    print(data.head(5))
    
    # 分割特征和目标变量
    X = data[['skills', 'education', 'target', 'major', 'experience']]
    y = data['job']
    
    # 定义特征预处理管道
    preprocessor = ColumnTransformer(
        transformers=[
            ('education', OneHotEncoder(handle_unknown='ignore'), ['education']),
            ('target', OneHotEncoder(handle_unknown='ignore'), ['target']),
            ('major', OneHotEncoder(handle_unknown='ignore'), ['major'])
        ])
    
    # 创建模型管道
    model = Pipeline([
        ('skill_encoder', SkillMultiLabelEncoder()),
        ('experience_encoder', ExperienceMultiLabelEncoder()),
        ('preprocessor', preprocessor),
        # wujiale 利用max_depth属性可以提高模型的泛化能力（100%和60%的区别）
        # wujiale 利用min_samples_split属性可以防止过拟合
        ('classifier', DecisionTreeClassifier(max_depth=3, random_state=42, min_samples_split=3))
    ])
    
    # 训练模型
    model.fit(X, y)
    
    # 保存模型
    joblib.dump(model, 'job_recommendation_model.pkl')
    print('模型训练完成并已保存为 job_recommendation_model.pkl')
    
    return model

# 使用新数据复训模型
def retrain_model_with_new_data(new_data_path):
    # 加载新数据
    data = pd.read_csv(new_data_path, names=['skills', 'education', 'target', 'major', 'experience', 'job'], skiprows=1)
    # 将技能列中的|替换为|，确保统一格式
    data['skills'] = data['skills'].str.replace('|', '|')
    # 将经验列中的|替换为|，确保统一格式
    data['experience'] = data['experience'].str.replace('|', '|')

    
    # 检查job列的唯一值
    print(f"新数据job列的唯一值: {data['job'].unique()}")
    
    # 分割特征和目标变量
    X = data[['skills', 'education', 'target', 'major', 'experience']]
    y = data['job']
    
    # 定义特征预处理管道
    preprocessor = ColumnTransformer(
        transformers=[
            ('education', OneHotEncoder(handle_unknown='ignore'), ['education']),
            ('target', OneHotEncoder(handle_unknown='ignore'), ['target']),
            ('major', OneHotEncoder(handle_unknown='ignore'), ['major'])
        ])
    
    # 创建模型管道
    model = Pipeline([
        ('skill_encoder', SkillMultiLabelEncoder()),
        ('preprocessor', preprocessor),
        ('classifier', DecisionTreeClassifier(max_depth=3, random_state=42, min_samples_split=3))
    ])
    
    # 训练模型
    model.fit(X, y)
    
    # 保存模型
    joblib.dump(model, 'job_recommendation_model.pkl')
    print('使用新数据复训模型完成并已保存为 job_recommendation_model.pkl')
    
    return model

# 加载已训练的模型
def load_model():
    try:
        model = joblib.load('job_recommendation_model.pkl')
        print('成功加载已训练的模型')
        return model
    except FileNotFoundError:
        print('未找到已训练的模型，请先调用train_and_save_model()函数训练模型')
        return None

# 示例用法
if __name__ == '__main__':
    model = train_and_save_model()