CosmoAI commited on
Commit
fb19975
1 Parent(s): 5ef09d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -10,6 +10,7 @@ from langchain.chains import ConversationalRetrievalChain
10
  from htmlTemplates import css, bot_template, user_template
11
  from langchain.llms import HuggingFaceHub
12
  import os
 
13
  # from langchain.callbacks import get_openai_callback
14
 
15
  hub_token = os.environ["HUGGINGFACE_HUB_TOKEN"]
@@ -44,7 +45,10 @@ def get_vectorstore(text_chunks):
44
 
45
  def get_conversation_chain(vectorstore):
46
  # llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k")
47
- llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", huggingfacehub_api_token=hub_token, model_kwargs={"temperature":0.5, "max_length":20})
 
 
 
48
 
49
  memory = ConversationBufferMemory(
50
  memory_key='chat_history', return_messages=True)
 
10
  from htmlTemplates import css, bot_template, user_template
11
  from langchain.llms import HuggingFaceHub
12
  import os
13
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
14
  # from langchain.callbacks import get_openai_callback
15
 
16
  hub_token = os.environ["HUGGINGFACE_HUB_TOKEN"]
 
45
 
46
  def get_conversation_chain(vectorstore):
47
  # llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k")
48
+ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
49
+ model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
50
+
51
+ llm = HuggingFaceHub(repo_id=model, huggingfacehub_api_token=hub_token, model_kwargs={"temperature":0.5, "max_length":20})
52
 
53
  memory = ConversationBufferMemory(
54
  memory_key='chat_history', return_messages=True)