Sandaruth commited on
Commit
ffa8147
1 Parent(s): ffad717

update llm

Browse files
Files changed (1) hide show
  1. model.py +15 -3
model.py CHANGED
@@ -13,9 +13,16 @@ os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
13
 
14
  from langchain_openai import ChatOpenAI
15
 
16
- llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0,)
17
 
18
 
 
 
 
 
 
 
 
19
 
20
  ## Create embeddings and splitter
21
 
@@ -47,6 +54,7 @@ splitter = RecursiveCharacterTextSplitter(
47
  from langchain_community.vectorstores import FAISS
48
 
49
  persits_directory="./faiss_Test02_500_C_BGE_large"
 
50
 
51
  vectorstore= FAISS.load_local(persits_directory, embedding)
52
 
@@ -67,6 +75,8 @@ qa_template = ("""
67
  if context is not enough to answer the question, ask for more information.
68
  if context is not related to the question, say I dont know.
69
 
 
 
70
  each answer Must start with code word ATrad Ai(QA):
71
 
72
  Question: {question}
@@ -82,19 +92,21 @@ qa_template2 = ("""
82
  Please provide me with any questions or concerns you have regarding the ATrad Application.
83
  If you're unsure about something or need more information, feel free to ask.
84
 
 
 
85
  Question: {question}
86
 
87
  ATrad Ai(QA): Let me think about it...""")
88
 
89
 
90
- QA_PROMPT = PromptTemplate(input_variables=["context", "question"],template=qa_template2,)
91
 
92
 
93
  # Chain for Web
94
  from langchain.chains import RetrievalQA
95
 
96
  Web_qa = RetrievalQA.from_chain_type(
97
- llm=llm,
98
  chain_type="stuff",
99
  retriever = vectorstore.as_retriever(search_kwargs={"k": 4}),
100
  return_source_documents= True,
 
13
 
14
  from langchain_openai import ChatOpenAI
15
 
16
+ llm_OpenAi = ChatOpenAI(model="gpt-3.5-turbo", temperature=0,)
17
 
18
 
19
+ from langchain.chat_models import ChatAnyscale
20
+
21
+ ANYSCALE_ENDPOINT_TOKEN=os.environ.get("ANYSCALE_ENDPOINT_TOKEN")
22
+ anyscale_api_key =ANYSCALE_ENDPOINT_TOKEN
23
+
24
+ llm=ChatAnyscale(anyscale_api_key=anyscale_api_key,temperature=0, model_name='mistralai/Mistral-7B-Instruct-v0.1', streaming=False)
25
+
26
 
27
  ## Create embeddings and splitter
28
 
 
54
  from langchain_community.vectorstores import FAISS
55
 
56
  persits_directory="./faiss_Test02_500_C_BGE_large"
57
+ # persits_directory="./faiss_V03_C500_BGE_large-final"
58
 
59
  vectorstore= FAISS.load_local(persits_directory, embedding)
60
 
 
75
  if context is not enough to answer the question, ask for more information.
76
  if context is not related to the question, say I dont know.
77
 
78
+ give the answer with very clear structure and clear language.
79
+
80
  each answer Must start with code word ATrad Ai(QA):
81
 
82
  Question: {question}
 
92
  Please provide me with any questions or concerns you have regarding the ATrad Application.
93
  If you're unsure about something or need more information, feel free to ask.
94
 
95
+ each answer Must start with code word ATrad Ai(QA):
96
+
97
  Question: {question}
98
 
99
  ATrad Ai(QA): Let me think about it...""")
100
 
101
 
102
+ QA_PROMPT = PromptTemplate(input_variables=["context", "question"],template=qa_template,)
103
 
104
 
105
  # Chain for Web
106
  from langchain.chains import RetrievalQA
107
 
108
  Web_qa = RetrievalQA.from_chain_type(
109
+ llm=llm_OpenAi,
110
  chain_type="stuff",
111
  retriever = vectorstore.as_retriever(search_kwargs={"k": 4}),
112
  return_source_documents= True,