Ti6-lllama2-QLoRra / model_utils.py
Giang07's picture
Update model_utils.py
93e29f9 verified
raw
history blame
No virus
2.02 kB
import os
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from langchain.llms import HuggingFacePipeline
from langchain import PromptTemplate, LLMChain
from dotenv import load_dotenv
# Define the model directory and name
MODEL_DIR = "/home/user/model"
# MODEL_NAME = "Giang07/Llama-2-7b-chat-QLoRa"
# MODEL_NAME = "meta-llama/Meta-Llama-3-8B"
MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"
# Load environment variables from .env file
load_dotenv()
# Now you can use the token
api_token = os.getenv('HF_TOKEN')
def load_model():
"""
Load or download the model and tokenizer.
"""
config_path = os.path.join(MODEL_DIR, "config.json")
if not os.path.exists(config_path):
os.makedirs(MODEL_DIR, exist_ok=True)
# model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, use_auth_token=api_token)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, trust_remote_code=True)
model.save_pretrained(MODEL_DIR)
# tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=api_token)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
tokenizer.save_pretrained(MODEL_DIR)
else:
model = AutoModelForCausalLM.from_pretrained(MODEL_DIR)
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
return model, tokenizer
def create_pipeline(model, tokenizer):
"""
Create a text-generation pipeline.
"""
hf_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
return hf_pipeline
def generate_text(hf_pipeline, input_text):
"""
Generate text using the Hugging Face pipeline.
"""
prompt_template = PromptTemplate(
input_variables=["input_text"],
template="Translate the following English text to French: {input_text}"
)
llm = HuggingFacePipeline(pipeline=hf_pipeline)
llm_chain = LLMChain(prompt_template=prompt_template, llm=llm)
return llm_chain.run({"input_text": input_text})