File size: 3,375 Bytes
778387f
 
 
 
 
 
843ba45
 
778387f
24089d3
90e17c2
778387f
843ba45
444c4d7
778387f
444c4d7
 
778387f
 
 
 
 
 
 
 
 
444c4d7
843ba45
 
 
 
778387f
 
 
 
 
 
 
 
 
 
 
 
 
 
843ba45
 
 
 
 
 
 
778387f
 
 
 
 
 
 
843ba45
 
 
 
 
778387f
 
 
 
 
 
 
 
 
 
843ba45
 
778387f
843ba45
778387f
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# import os
# from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# from langchain.llms import HuggingFacePipeline
# from langchain import PromptTemplate, LLMChain
# from dotenv import load_dotenv


# Define the model directory and name
# MODEL_DIR = "/home/user/model"
# MODEL_NAME = "Giang07/Llama-2-7b-chat-QLoRa"
# MODEL_NAME = "meta-llama/Meta-Llama-3-8B"
# MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"

# Load environment variables from .env file
# load_dotenv()

# Now you can use the token
# api_token = os.getenv('HF_TOKEN')

from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from langchain.llms import HuggingFacePipeline
from langchain.prompts import PromptTemplate
from langchain.schemas import LangChainConfig


MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"

def load_model():
    """
    Load or download the model and tokenizer.
    """
    # config_path = os.path.join(MODEL_DIR, "config.json")
    # if not os.path.exists(config_path):
    #     os.makedirs(MODEL_DIR, exist_ok=True)
    #     # model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, use_auth_token=api_token)
    #     model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, trust_remote_code=True)
    #     model.save_pretrained(MODEL_DIR)
    #     # tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=api_token)
    #     tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
    #     tokenizer.save_pretrained(MODEL_DIR)
    # else:
    #     model = AutoModelForCausalLM.from_pretrained(MODEL_DIR)
    #     tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
    model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    
    return model, tokenizer

def create_pipeline(model, tokenizer):
    """
    Create a text-generation pipeline.
    """
    # hf_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
    # Initialize the Hugging Face pipeline
    hf_pipeline = HuggingFacePipeline(
        model=model,
        tokenizer=tokenizer,
        hf_pipeline_kwargs={"return_full_text": False}  # Adjust based on the function you need
    )
    return hf_pipeline

def generate_text(hf_pipeline, input_text):
    """
    Generate text using the Hugging Face pipeline.
    # """
    # prompt_template = PromptTemplate(
    #     input_variables=["input_text"],
    #     template="Translate the following English text to French: {input_text}"
    # )
    # llm = HuggingFacePipeline(pipeline=hf_pipeline)
    # llm_chain = LLMChain(prompt_template=prompt_template, llm=llm)
    # return llm_chain.run({"input_text": input_text})
    # Define a prompt template if needed (this is an example, adjust accordingly)
    
    prompt_template = PromptTemplate(
        input_variables=["input_text"],
        template="Please summarize the following text: {input_text}"
    )
    # Configuration for LangChain (adjust max_tokens, temperature, etc., as needed)
    config = LangChainConfig(max_tokens=50, temperature=0.7)

    # Example input text for the task
    # input_text = "LangChain is a library that facilitates the development of applications using language models."
    
    # Run the LangChain pipeline
    output = hf_pipeline.generate(input_text, prompt_template=prompt_template, config=config)
    return output