Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import
|
|
|
3 |
from langchain import PromptTemplate, HuggingFaceHub, LLMChain
|
4 |
import os
|
5 |
from langchain.memory import ConversationBufferMemory
|
@@ -19,10 +20,10 @@ prompt = PromptTemplate(template=template, input_variables=["question"])
|
|
19 |
|
20 |
answer_memory = ConversationBufferMemory(input_key='question', memory_key='chat_history')
|
21 |
|
|
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True, output_key='answer', memory=answer_memory)
|
26 |
|
27 |
|
28 |
wiki = WikipediaAPIWrapper()
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
+
|
4 |
from langchain import PromptTemplate, HuggingFaceHub, LLMChain
|
5 |
import os
|
6 |
from langchain.memory import ConversationBufferMemory
|
|
|
20 |
|
21 |
answer_memory = ConversationBufferMemory(input_key='question', memory_key='chat_history')
|
22 |
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-base-alpha-3b")
|
24 |
|
25 |
+
model = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-base-alpha-3b")
|
26 |
+
llm_chain = LLMChain(prompt=prompt, llm=model, verbose=True, output_key='answer', memory=answer_memory)
|
|
|
27 |
|
28 |
|
29 |
wiki = WikipediaAPIWrapper()
|