Update app.py
Browse files
app.py
CHANGED
@@ -94,6 +94,8 @@ def load_model():
|
|
94 |
quantize_config=None,
|
95 |
)
|
96 |
|
|
|
|
|
97 |
DEFAULT_SYSTEM_PROMPT = """
|
98 |
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. Always provide the citation for the answer from the text. Try to include any section or subsection present in the text responsible for the answer. Provide reference. Provide page number, section, sub section etc from which answer is taken.
|
99 |
|
@@ -111,6 +113,8 @@ def load_model():
|
|
111 |
|
112 |
llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0.2})
|
113 |
|
|
|
|
|
114 |
SYSTEM_PROMPT = "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer."
|
115 |
|
116 |
template = generate_prompt("""{context} Question: {question} """,system_prompt=SYSTEM_PROMPT,) #Enter memory here!
|
@@ -129,6 +133,8 @@ def load_model():
|
|
129 |
#input_key="question",
|
130 |
#return_messages=True)
|
131 |
},)
|
|
|
|
|
132 |
return qa_chain
|
133 |
|
134 |
|
|
|
94 |
quantize_config=None,
|
95 |
)
|
96 |
|
97 |
+
print('model done')
|
98 |
+
|
99 |
DEFAULT_SYSTEM_PROMPT = """
|
100 |
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. Always provide the citation for the answer from the text. Try to include any section or subsection present in the text responsible for the answer. Provide reference. Provide page number, section, sub section etc from which answer is taken.
|
101 |
|
|
|
113 |
|
114 |
llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0.2})
|
115 |
|
116 |
+
print('llm done')
|
117 |
+
|
118 |
SYSTEM_PROMPT = "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer."
|
119 |
|
120 |
template = generate_prompt("""{context} Question: {question} """,system_prompt=SYSTEM_PROMPT,) #Enter memory here!
|
|
|
133 |
#input_key="question",
|
134 |
#return_messages=True)
|
135 |
},)
|
136 |
+
|
137 |
+
print('load done')
|
138 |
return qa_chain
|
139 |
|
140 |
|