import torch
from transformers import AutoModelForCausalLM
import os
from loguru import logger

def create_model(config):
    model_path = os.path.join(config.data_dir, config.weight_dir)
    logger.info("Model path " + model_path)
    if not os.path.exists(model_path):
        logger.error(f"Model path does not exist: {model_path}")
        raise FileNotFoundError(f"Model path does not exist: {model_path}")
    if not torch.cuda.is_available():
        logger.error("CUDA is not available")
        raise RuntimeError("CUDA is not available")
    logger.info("CUDA is available")
    try:
        model = AutoModelForCausalLM.from_pretrained(model_path)
        logger.info("Model loaded from pretrained weights")

        # 确认模型已经加载
        logger.info(f"Model type: {type(model)}")
                        
        model.eval()
        logger.info("Model set to eval mode")
        model = model.cuda()
        logger.info("Model moved to GPU")
        model = model.float()
        logger.info("Model converted to float precision")
    except Exception as e:
        logger.error(f"Failed to load or move model: {e}")
        raise


    if config.fp16:
        model.half()

    return model
