KvrParaskevi commited on
Commit
7ee74de
1 Parent(s): 2cc7b13

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +6 -2
chatbot.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  from langchain.memory import ConversationBufferMemory
3
  from langchain.chains import ConversationChain
4
  import langchain.globals
@@ -31,7 +32,8 @@ Current Conversation:
31
  Human: {input}
32
  AI:"""
33
 
34
- #@st.cache_resource
 
35
  def load_model():
36
  quantization_config = BitsAndBytesConfig(
37
  load_in_8bit=True,
@@ -42,7 +44,8 @@ def load_model():
42
 
43
  return tokenizer,model
44
 
45
- #@st.cache_resource
 
46
  def load_pipeline():
47
  tokenizer, model = load_model()
48
  pipe = pipeline("text-generation",
@@ -69,6 +72,7 @@ def demo_miny_memory():
69
  memory = ConversationBufferMemory(llm = llm, memory_key = "history")
70
  return memory
71
 
 
72
  def demo_chain(input_text,history):
73
  #PROMPT = ChatPromptTemplate.from_template(template)
74
  PROMPT = PromptTemplate(template=template, input_variables=["history", "input"])
 
1
  import os
2
+ import spaces
3
  from langchain.memory import ConversationBufferMemory
4
  from langchain.chains import ConversationChain
5
  import langchain.globals
 
32
  Human: {input}
33
  AI:"""
34
 
35
+ #@st.cache_resource
36
+ @spaces.GPU
37
  def load_model():
38
  quantization_config = BitsAndBytesConfig(
39
  load_in_8bit=True,
 
44
 
45
  return tokenizer,model
46
 
47
+ #@st.cache_resource
48
+ @spaces.GPU
49
  def load_pipeline():
50
  tokenizer, model = load_model()
51
  pipe = pipeline("text-generation",
 
72
  memory = ConversationBufferMemory(llm = llm, memory_key = "history")
73
  return memory
74
 
75
+ @spaces.GPU
76
  def demo_chain(input_text,history):
77
  #PROMPT = ChatPromptTemplate.from_template(template)
78
  PROMPT = PromptTemplate(template=template, input_variables=["history", "input"])