from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, TextStreamer, BertTokenizer, \
    BertForSequenceClassification


def get_model_tokenizer():
    # 加载模型
    model_path = r"D:\model\Qwen\Qwen2.5-0.5B-Instruct"
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        # torch_dtype=torch.float16,
        device_map="auto"
    )
    return model, tokenizer


def get_model_tokenizer_by_bert():
    # 加载模型
    model_path = r"D:\model\google-bert\bert-base-chinese"
    tokenizer = BertTokenizer.from_pretrained(model_path)
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        # torch_dtype=torch.float16,
        device_map="cpu"
    )
    return model, tokenizer


def get_pipeline_by_qwen():
    model, tokenizer = get_model_tokenizer()
    return pipeline("text-generation", model=model, tokenizer=tokenizer)


def get_pipeline_by_bert():
    model, tokenizer = get_model_tokenizer_by_bert()
    return pipeline("text-generation", model=model, tokenizer=tokenizer)


def get_model_tokenizer_classification(num = 2):
    # 加载模型
    model_path = r"D:\model\google-bert\bert-base-chinese"
    tokenizer = BertTokenizer.from_pretrained(model_path)
    model = BertForSequenceClassification.from_pretrained(
        model_path,
        # torch_dtype=torch.float16,
        # device_map="auto"
        num_labels=num
    )

    return model, tokenizer

