Spaces:
Paused
Paused
Update model_utils.py
Browse files- model_utils.py +1 -5
model_utils.py
CHANGED
@@ -19,8 +19,6 @@
|
|
19 |
|
20 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
21 |
from langchain.llms import HuggingFacePipeline
|
22 |
-
from langchain.prompts import PromptTemplate
|
23 |
-
from langchain.schemas import LangChainConfig
|
24 |
|
25 |
|
26 |
MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"
|
@@ -76,12 +74,10 @@ def generate_text(hf_pipeline, input_text):
|
|
76 |
input_variables=["input_text"],
|
77 |
template="Please summarize the following text: {input_text}"
|
78 |
)
|
79 |
-
# Configuration for LangChain (adjust max_tokens, temperature, etc., as needed)
|
80 |
-
config = LangChainConfig(max_tokens=50, temperature=0.7)
|
81 |
|
82 |
# Example input text for the task
|
83 |
# input_text = "LangChain is a library that facilitates the development of applications using language models."
|
84 |
|
85 |
# Run the LangChain pipeline
|
86 |
-
output = hf_pipeline.generate(input_text, prompt_template=prompt_template
|
87 |
return output
|
|
|
19 |
|
20 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
21 |
from langchain.llms import HuggingFacePipeline
|
|
|
|
|
22 |
|
23 |
|
24 |
MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"
|
|
|
74 |
input_variables=["input_text"],
|
75 |
template="Please summarize the following text: {input_text}"
|
76 |
)
|
|
|
|
|
77 |
|
78 |
# Example input text for the task
|
79 |
# input_text = "LangChain is a library that facilitates the development of applications using language models."
|
80 |
|
81 |
# Run the LangChain pipeline
|
82 |
+
output = hf_pipeline.generate(input_text, prompt_template=prompt_template)
|
83 |
return output
|