evanperez commited on
Commit
bfe04df
1 Parent(s): 7ff74b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -12
app.py CHANGED
@@ -141,21 +141,19 @@ def user_input(user_question, api_key):
141
  chain = get_conversational_chain()
142
  response_gemini = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
143
 
144
- # Initialize the Hugging Face text generation pipeline with your custom model
145
- pipeline = TextGeneratorPipeline(model="Writer/palmyra-small")
 
 
146
 
147
- # Prompt template for making the response more conversational
148
- prompt_template = f"""
149
- Transform the following response into a more conversational tone without adding new information:
150
-
151
- Response:
152
- {response_gemini["output_text"]}
153
-
154
- Transformed Response:
155
- """
156
 
157
  # Generate the transformed response using the Hugging Face model
158
- transformed_response = pipeline(prompt=prompt_template, max_length=100)
 
 
 
159
 
160
  # Display the transformed response
161
  st.write("Reply: ", transformed_response)
@@ -166,6 +164,7 @@ def user_input(user_question, api_key):
166
 
167
 
168
 
 
169
  def main():
170
  st.header("RAG based LLM Application")
171
 
 
141
  chain = get_conversational_chain()
142
  response_gemini = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
143
 
144
+ # Initialize the Hugging Face model and tokenizer
145
+ model_name_or_path = "Writer/palmyra-small"
146
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
147
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
148
 
149
+ # Tokenize the prompt
150
+ inputs = tokenizer(prompt_template.format(response_gemini["output_text"]), return_tensors="pt", max_length=100, truncation=True)
 
 
 
 
 
 
 
151
 
152
  # Generate the transformed response using the Hugging Face model
153
+ outputs = model.generate(**inputs)
154
+
155
+ # Decode the generated response
156
+ transformed_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
157
 
158
  # Display the transformed response
159
  st.write("Reply: ", transformed_response)
 
164
 
165
 
166
 
167
+
168
  def main():
169
  st.header("RAG based LLM Application")
170