import json
import shutil
import subprocess
import os
import sys

from datetime import datetime
from llamafactory import cli

from dotenv import load_dotenv
load_dotenv()  # Loads variables from .env file

machineType = os.getenv('MACHINE_TYPE')
if not machineType:
    print("Please check your .env file and set the MACHINE_TYPE variable")
    sys.exit(1)

BASE_MODEL_FILTERS = [
    # "LLM-Research/Meta-Llama-3.1-8B-Instruct",
    # "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", 
    # "Qwen/Qwen3-8B",
    "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
]

# 新增模型加载函数
def load_model_config(config_path):
    try:
        with open(config_path, 'r') as f:
            config = json.load(f)
    except json.JSONDecodeError as e:
        print(f"Error parsing JSON file {config_path}: {e}")
        raise
    except IOError as e:
        print(f"Error reading file {config_path}: {e}")
        raise
    return config


def check_llamafactory_cli():
    """检查llamafactory-cli是否在系统PATH中"""
    return shutil.which('llamafactory-cli') is not None

def main():
    if not check_llamafactory_cli():
        print("llamafactory-cli not found, please switch the conda environment")
        return
    
    # 加载模型config file
    model_config = load_model_config('modelscope_models.json')

    now = datetime.now().strftime("%Y%m%d-%H%M%S")

    # 遍历模型，生成模型路径
    for model in model_config['models']:
        model_name = model['name']
        model_template = model['template']

        if model_name not in BASE_MODEL_FILTERS:
            continue

        # 生成模型路径
        prefix = model_config[machineType]['local_dir']
        model_path = f"{prefix}/{model_name}"
        model_save_path = f"{prefix}/saves/{now}/full/sft/{model_name}"
        eval_result_save_path = f"/train_cf/save/{now}/full?eval/{model_name}"

        # 检查模型路径是否存在
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"Model path {model_path} does not exist")

        try:
            sys.argv = [
                "llamafactory-cli",
                "train",
                "examples/train_full/llama3_full_sft.yaml",
                "model_name_or_path=" + model_path,
                "template=" + model_template,
                "output_dir=" + model_save_path,
                "dataset=sft_lora_data",
                "cutoff_len=1024",
                "max_samples=1000",
                "logging_steps=2",
                "save_steps=200",
                "per_device_train_batch_size=4",
                "gradient_accumulation_steps=2",
                "learning_rate=1.0e-4",
                "num_train_epochs=30.0",
                "warmup_ratio=0.1",
                "val_size=0.1",
                "per_device_eval_batch_size=4",
                "eval_steps=5"
            ]

            cli.main()

            print('\n')
            print('END OF TRAINING')
            print('\n')

        except subprocess.CalledProcessError as e:
            print(f"Error running llamafactory-cli train: {e}")
            raise

if __name__ == "__main__":
    main()