Ti6-lllama2-QLoRra / model_utils.py
Giang07's picture
Update model_utils.py
9787fba verified
raw
history blame contribute delete
No virus
3.17 kB
# import os
# from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# from langchain.llms import HuggingFacePipeline
# from langchain import PromptTemplate, LLMChain
# from dotenv import load_dotenv
# Define the model directory and name
# MODEL_DIR = "/home/user/model"
# MODEL_NAME = "Giang07/Llama-2-7b-chat-QLoRa"
# MODEL_NAME = "meta-llama/Meta-Llama-3-8B"
# MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"
# Load environment variables from .env file
# load_dotenv()
# Now you can use the token
# api_token = os.getenv('HF_TOKEN')
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from langchain.llms import HuggingFacePipeline
MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"
def load_model():
"""
Load or download the model and tokenizer.
"""
# config_path = os.path.join(MODEL_DIR, "config.json")
# if not os.path.exists(config_path):
# os.makedirs(MODEL_DIR, exist_ok=True)
# # model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, use_auth_token=api_token)
# model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, trust_remote_code=True)
# model.save_pretrained(MODEL_DIR)
# # tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=api_token)
# tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
# tokenizer.save_pretrained(MODEL_DIR)
# else:
# model = AutoModelForCausalLM.from_pretrained(MODEL_DIR)
# tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
return model, tokenizer
def create_pipeline(model, tokenizer):
"""
Create a text-generation pipeline.
"""
# hf_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Initialize the Hugging Face pipeline
hf_pipeline = HuggingFacePipeline(
model=model,
tokenizer=tokenizer,
hf_pipeline_kwargs={"return_full_text": False} # Adjust based on the function you need
)
return hf_pipeline
def generate_text(hf_pipeline, input_text):
"""
Generate text using the Hugging Face pipeline.
# """
# prompt_template = PromptTemplate(
# input_variables=["input_text"],
# template="Translate the following English text to French: {input_text}"
# )
# llm = HuggingFacePipeline(pipeline=hf_pipeline)
# llm_chain = LLMChain(prompt_template=prompt_template, llm=llm)
# return llm_chain.run({"input_text": input_text})
# Define a prompt template if needed (this is an example, adjust accordingly)
prompt_template = PromptTemplate(
input_variables=["input_text"],
template="Please summarize the following text: {input_text}"
)
# Example input text for the task
# input_text = "LangChain is a library that facilitates the development of applications using language models."
# Run the LangChain pipeline
output = hf_pipeline.generate(input_text, prompt_template=prompt_template)
return output