firqaaa commited on
Commit
9a861ff
1 Parent(s): 887ef05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -68,7 +68,7 @@ class Journal:
68
  def __repr__(self):
69
  return f"Journal(name='{self.name}', bytes='{self.bytes}')"
70
 
71
- llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview")
72
 
73
  textex_chain = create_extraction_chain(textex_schema, llm)
74
  tablex_chain = create_extraction_chain(tablex_schema, llm)
@@ -104,7 +104,7 @@ if uploaded_files:
104
  if on_h:
105
  chunk_size_h = st.selectbox(
106
  'Tokens amounts per process :',
107
- (64000, 32000,25000, 16000, 12000, 10000, 8000, 5000), key='table_h'
108
  )
109
  parseButtonH = st.button("Get Result", key='table_H')
110
 
@@ -116,7 +116,7 @@ if uploaded_files:
116
  if on_v:
117
  chunk_size_v = st.selectbox(
118
  'Tokens amounts per process :',
119
- (64000, 32000, 25000, 16000, 12000, 10000, 8000, 5000), key='table_v'
120
  )
121
  parseButtonV = st.button("Get Result", key='table_V')
122
  with col3:
@@ -127,7 +127,7 @@ if uploaded_files:
127
  if on_t:
128
  chunk_size_t = st.selectbox(
129
  'Tokens amounts per process :',
130
- (64000, 32000, 25000, 16000, 12000, 10000, 8000, 5000), key='no_table'
131
  )
132
  parseButtonT = st.button("Get Result", key="no_Table")
133
 
@@ -198,7 +198,7 @@ if uploaded_files:
198
  embeddings = OpenAIEmbeddings()
199
 
200
  db = Chroma.from_documents(docs, embeddings)
201
- llm_table = ChatOpenAI(model_name="gpt-4-1106-preview", temperature=0)
202
  qa_chain = RetrievalQA.from_chain_type(llm_table, retriever=db.as_retriever())
203
 
204
  # List of questions
@@ -650,7 +650,7 @@ if uploaded_files:
650
  embeddings = OpenAIEmbeddings()
651
 
652
  db = Chroma.from_documents(docs, embeddings)
653
- llm_table = ChatOpenAI(model_name="gpt-4-1106-preview", temperature=0)
654
  qa_chain = RetrievalQA.from_chain_type(llm_table, retriever=db.as_retriever())
655
 
656
  # List of questions
 
68
  def __repr__(self):
69
  return f"Journal(name='{self.name}', bytes='{self.bytes}')"
70
 
71
+ llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-1106")
72
 
73
  textex_chain = create_extraction_chain(textex_schema, llm)
74
  tablex_chain = create_extraction_chain(tablex_schema, llm)
 
104
  if on_h:
105
  chunk_size_h = st.selectbox(
106
  'Tokens amounts per process :',
107
+ (15000, 12000, 10000, 8000, 5000), key='table_h'
108
  )
109
  parseButtonH = st.button("Get Result", key='table_H')
110
 
 
116
  if on_v:
117
  chunk_size_v = st.selectbox(
118
  'Tokens amounts per process :',
119
+ (15000, 12000, 10000, 8000, 5000), key='table_v'
120
  )
121
  parseButtonV = st.button("Get Result", key='table_V')
122
  with col3:
 
127
  if on_t:
128
  chunk_size_t = st.selectbox(
129
  'Tokens amounts per process :',
130
+ (15000, 12000, 10000, 8000, 5000), key='no_table'
131
  )
132
  parseButtonT = st.button("Get Result", key="no_Table")
133
 
 
198
  embeddings = OpenAIEmbeddings()
199
 
200
  db = Chroma.from_documents(docs, embeddings)
201
+ llm_table = ChatOpenAI(model_name="gpt-3.5-turbo-1106", temperature=0)
202
  qa_chain = RetrievalQA.from_chain_type(llm_table, retriever=db.as_retriever())
203
 
204
  # List of questions
 
650
  embeddings = OpenAIEmbeddings()
651
 
652
  db = Chroma.from_documents(docs, embeddings)
653
+ llm_table = ChatOpenAI(model_name="gpt-3.5-turbo-1106", temperature=0)
654
  qa_chain = RetrievalQA.from_chain_type(llm_table, retriever=db.as_retriever())
655
 
656
  # List of questions