valeriylo commited on
Commit
2346d6a
·
1 Parent(s): 11ce526

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -7,16 +7,17 @@ from langchain.vectorstores import FAISS
7
  from langchain.chat_models import ChatOpenAI
8
  from langchain.memory import ConversationBufferMemory
9
  from langchain.chains import ConversationalRetrievalChain
 
10
  from htmlTemplates import css, bot_template, user_template
11
  from langchain.llms import HuggingFaceHub, LlamaCpp
12
  from huggingface_hub import snapshot_download, hf_hub_download
13
 
14
  # from prompts import CONDENSE_QUESTION_PROMPT
15
 
16
- repo_name = "IlyaGusev/saiga_mistral_7b_gguf"
17
- model_name = "model-q4_K.gguf"
18
 
19
- snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
20
 
21
 
22
  def get_pdf_text(pdf_docs):
@@ -51,17 +52,19 @@ def get_vectorstore(text_chunks):
51
 
52
  def get_conversation_chain(vectorstore, model_name):
53
 
54
- llm = LlamaCpp(model_path=model_name,
55
- temperature=0.1,
56
- top_k=30,
57
- top_p=0.9,
58
- streaming=True,
59
- n_ctx=2048,
60
- n_parts=1,
61
- echo=True
62
- )
63
 
64
  #llm = ChatOpenAI()
 
 
65
 
66
  memory = ConversationBufferMemory(memory_key='chat_history',
67
  input_key='question',
 
7
  from langchain.chat_models import ChatOpenAI
8
  from langchain.memory import ConversationBufferMemory
9
  from langchain.chains import ConversationalRetrievalChain
10
+ from langchain.chat_models.gigachat import GigaChat
11
  from htmlTemplates import css, bot_template, user_template
12
  from langchain.llms import HuggingFaceHub, LlamaCpp
13
  from huggingface_hub import snapshot_download, hf_hub_download
14
 
15
  # from prompts import CONDENSE_QUESTION_PROMPT
16
 
17
+ #repo_name = "IlyaGusev/saiga_mistral_7b_gguf"
18
+ #model_name = "model-q4_K.gguf"
19
 
20
+ #snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
21
 
22
 
23
  def get_pdf_text(pdf_docs):
 
52
 
53
  def get_conversation_chain(vectorstore, model_name):
54
 
55
+ #llm = LlamaCpp(model_path=model_name,
56
+ # temperature=0.1,
57
+ # top_k=30,
58
+ # top_p=0.9,
59
+ # streaming=True,
60
+ # n_ctx=2048,
61
+ # n_parts=1,
62
+ # echo=True
63
+ # )
64
 
65
  #llm = ChatOpenAI()
66
+
67
+ llm = GigaChat(profanity=False, credentials='9d2e213f-a4a0-431a-a2ac-624fafe3970c')
68
 
69
  memory = ConversationBufferMemory(memory_key='chat_history',
70
  input_key='question',