Commit
•
99b8f54
1
Parent(s):
87eb079
Update app.py (#1)
Browse files- Update app.py (af367983a894f0be42d118b6a148d6d4c4fcec3f)
Co-authored-by: kathiravan <Kathirsci@users.noreply.huggingface.co>
app.py
CHANGED
@@ -15,9 +15,9 @@ llm = HuggingFaceEndpoint(
|
|
15 |
task="text-generation",
|
16 |
max_new_tokens=512,
|
17 |
top_k=5,
|
18 |
-
temperature=0.
|
19 |
repetition_penalty=1.03,
|
20 |
-
|
21 |
huggingfacehub_api_token=HF_TOKEN
|
22 |
)
|
23 |
template = """
|
@@ -43,7 +43,7 @@ QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],templat
|
|
43 |
def predict(message, history):
|
44 |
input_prompt = QA_CHAIN_PROMPT.format(question=message, context=history)
|
45 |
result = llm.generate([input_prompt])
|
46 |
-
print(result)
|
47 |
|
48 |
# Access the generated text using the correct attribute(s)
|
49 |
if result.generations:
|
|
|
15 |
task="text-generation",
|
16 |
max_new_tokens=512,
|
17 |
top_k=5,
|
18 |
+
temperature=0.3,
|
19 |
repetition_penalty=1.03,
|
20 |
+
|
21 |
huggingfacehub_api_token=HF_TOKEN
|
22 |
)
|
23 |
template = """
|
|
|
43 |
def predict(message, history):
|
44 |
input_prompt = QA_CHAIN_PROMPT.format(question=message, context=history)
|
45 |
result = llm.generate([input_prompt])
|
46 |
+
print(result)
|
47 |
|
48 |
# Access the generated text using the correct attribute(s)
|
49 |
if result.generations:
|