# train_model.py
from spark_processor import SparkProcessor
import config
import os
import shutil


def train_and_save_model():
    """训练模型并保存到本地"""
    print("开始训练薪资预测模型...")

    # 创建Spark处理器
    processor = SparkProcessor()

    try:
        # 检查是否有足够的数据
        if processor.df.count() < 10:
            print("数据量不足，无法训练有效模型")
            return

        # 训练模型
        print("正在训练模型...")
        model = processor.train_salary_model()

        # 确保模型目录存在
        model_dir = config.MODEL_PATH
        if os.path.exists(model_dir):
            print(f"清理现有模型目录: {model_dir}")
            shutil.rmtree(model_dir)

        os.makedirs(model_dir, exist_ok=True)

        # 保存模型
        print(f"正在保存模型到 {model_dir}...")
        processor.save_model(model_dir)

        print("模型训练和保存完成！")

        # 重新加载模型进行测试
        print("重新加载模型进行测试...")
        processor.try_load_model()

        if processor.model_loaded:
            # 测试模型预测
            print("\n测试模型预测:")
            test_cases = [
                ("北京", "互联网", "本科", "3-5年", "100-500人"),
                ("上海", "金融", "硕士", "1-3年", "500-1000人"),
                ("深圳", "人工智能", "博士", "5-10年", "1000人以上")
            ]

            for city, industry, education, experience, company_size in test_cases:
                prediction = processor.predict_salary(city, industry, education, experience, company_size)
                print(f"{city}-{industry}-{education}-{experience}-{company_size}: {prediction}")
        else:
            print("模型加载失败，无法进行测试")

    except Exception as e:
        print(f"模型训练失败: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    train_and_save_model()