secilozksen commited on
Commit
38a4d89
1 Parent(s): a3f655e

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ policyQA_original.csv filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,17 @@
1
- ---
2
- title: Question Answering Demo
3
- emoji: 📉
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
1
+ # QuestionAnsweringDemo
2
+
3
+ ## Create the environment
4
+
5
+ conda env create --file environment.yml
6
+
7
+ conda activate QADemo
8
+
9
+ After installing requirements, please make sure that you add huggingface authorization token to your ./.streamlit/secret.toml file.
10
+
11
+ It should be something like:
12
+
13
+ AUTH_TOKEN='your_auth_token_here'
14
+
15
+ ## Runing the app:
16
+
17
+ streamlit run demov2.py
context-embeddings.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9051e569255d71a5dbece9ebe371c81c0ef1a2ab9af91dc23d27eddb61943310
3
+ size 6562679
demov2.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import streamlit as st
3
+ import json
4
+ import pandas as pd
5
+ import tokenizers
6
+ from sentence_transformers import SentenceTransformer, CrossEncoder, util
7
+ from transformers import pipeline
8
+ from st_aggrid import GridOptionsBuilder, AgGrid
9
+ import pickle
10
+ import torch
11
+ from transformers import RobertaTokenizer, RobertaForSequenceClassification
12
+ import spacy
13
+ import regex
14
+ from typing import List
15
+ from torch.autograd import Variable
16
+
17
+ st.set_page_config(layout="wide")
18
+
19
+ DATAFRAME_FILE_ORIGINAL = 'policyQA_original.csv'
20
+ DATAFRAME_FILE_BSBS = 'policyQA_bsbs_sentence.csv'
21
+
22
+
23
+ @st.experimental_singleton(suppress_st_warning=True, show_spinner=False)
24
+ def cross_encoder_init():
25
+ cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
26
+ return cross_encoder
27
+
28
+
29
+ @st.experimental_singleton(suppress_st_warning=True, show_spinner=False)
30
+ def bi_encoder_init():
31
+ bi_encoder = SentenceTransformer('multi-qa-MiniLM-L6-cos-v1')
32
+ bi_encoder.max_seq_length = 500 # Truncate long passages to 256 tokens
33
+ return bi_encoder
34
+
35
+
36
+ @st.experimental_singleton(suppress_st_warning=True, show_spinner=False)
37
+ def nlp_init(auth_token, private_model_name):
38
+ return pipeline('question-answering', model=private_model_name, tokenizer=private_model_name,
39
+ use_auth_token=auth_token,
40
+ revision="main")
41
+
42
+
43
+ @st.experimental_singleton(suppress_st_warning=True, show_spinner=False)
44
+ def nlp_pipeline_hf():
45
+ model_name = "deepset/roberta-base-squad2"
46
+ return pipeline('question-answering', model=model_name, tokenizer=model_name)
47
+
48
+
49
+ @st.experimental_singleton(suppress_st_warning=True, show_spinner=False)
50
+ def nlp_pipeline_sentence_based(auth_token, private_model_name):
51
+ tokenizer = RobertaTokenizer.from_pretrained(private_model_name, use_auth_token=auth_token)
52
+ model = RobertaForSequenceClassification.from_pretrained(private_model_name, use_auth_token=auth_token)
53
+ return tokenizer, model
54
+
55
+
56
+ @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None,
57
+ regex.Pattern: lambda _: None}, show_spinner=False)
58
+ def load_models_sentence_based(auth_token, private_model_name, private_model_name_base):
59
+ bi_encoder = bi_encoder_init()
60
+ cross_encoder = cross_encoder_init()
61
+ # OLD MODEL
62
+ # nlp = nlp_init(auth_token, private_model_name)
63
+ # nlp_hf = nlp_pipeline_hf()
64
+ policy_qa_tokenizer, policy_qa_model = nlp_pipeline_sentence_based(auth_token, private_model_name)
65
+ asnq_tokenizer, asnq_model = nlp_pipeline_sentence_based(auth_token, private_model_name_base)
66
+
67
+ return bi_encoder, cross_encoder, policy_qa_tokenizer, policy_qa_model, asnq_tokenizer, asnq_model
68
+
69
+
70
+ @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None}, show_spinner=False)
71
+ def load_models(auth_token, private_model_name):
72
+ bi_encoder = bi_encoder_init()
73
+ cross_encoder = cross_encoder_init()
74
+ nlp = nlp_init(auth_token, private_model_name)
75
+ nlp_hf = nlp_pipeline_hf()
76
+
77
+ return bi_encoder, cross_encoder, nlp, nlp_hf
78
+
79
+
80
+ def context():
81
+ bi_encoder = SentenceTransformer('multi-qa-MiniLM-L6-cos-v1', device='cpu')
82
+ with open("/home/secilsen/PycharmProjects/SquadOperations/contexes.json", 'r', encoding='utf-8') as f:
83
+ paragraphs = json.load(f)
84
+ paragraphs = paragraphs['contexes']
85
+ with open('context-embeddings.pkl', "wb") as fIn:
86
+ context_embeddings = bi_encoder.encode(paragraphs, convert_to_tensor=True, show_progress_bar=True)
87
+ pickle.dump({'contexes': paragraphs, 'embeddings': context_embeddings}, fIn)
88
+
89
+
90
+ @st.cache(show_spinner=False)
91
+ def load_paragraphs():
92
+ with open('context-embeddings.pkl', "rb") as fIn:
93
+ cache_data = pickle.load(fIn)
94
+ corpus_sentences = cache_data['contexes']
95
+ corpus_embeddings = cache_data['embeddings']
96
+
97
+ return corpus_embeddings, corpus_sentences
98
+
99
+
100
+ @st.cache(show_spinner=False)
101
+ def load_dataframes():
102
+ data_original = pd.read_csv(DATAFRAME_FILE_ORIGINAL, index_col=0, sep='|')
103
+ data_bsbs = pd.read_csv(DATAFRAME_FILE_BSBS, index_col=0, sep='|')
104
+ data_original = data_original.sample(frac=1).reset_index(drop=True)
105
+ data_bsbs = data_bsbs.sample(frac=1).reset_index(drop=True)
106
+ return data_original, data_bsbs
107
+
108
+
109
+ def search(question, corpus_embeddings, contexes, bi_encoder, cross_encoder):
110
+ # Semantic Search (Retrieve)
111
+ question_embedding = bi_encoder.encode(question, convert_to_tensor=True)
112
+ hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=100)
113
+ if len(hits) == 0:
114
+ return []
115
+ hits = hits[0]
116
+ # Rerank - score all retrieved passages with cross-encoder
117
+ cross_inp = [[question, contexes[hit['corpus_id']]] for hit in hits]
118
+ cross_scores = cross_encoder.predict(cross_inp)
119
+
120
+ # Sort results by the cross-encoder scores
121
+ for idx in range(len(cross_scores)):
122
+ hits[idx]['cross-score'] = cross_scores[idx]
123
+
124
+ # Output of top-5 hits from re-ranker
125
+ hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
126
+ top_5_contexes = []
127
+ top_5_scores = []
128
+ for hit in hits[0:20]:
129
+ top_5_contexes.append(contexes[hit['corpus_id']])
130
+ top_5_scores.append(hit['cross-score'])
131
+ return top_5_contexes, top_5_scores
132
+
133
+
134
+ def paragraph_embeddings():
135
+ paragraphs = load_paragraphs()
136
+ context_embeddings = bi_encoder.encode(paragraphs, convert_to_tensor=True, show_progress_bar=True)
137
+ return context_embeddings, paragraphs
138
+
139
+
140
+ def retrieve_rerank_pipeline(question, context_embeddings, paragraphs, bi_encoder, cross_encoder):
141
+ top_5_contexes, top_5_scores = search(question, context_embeddings, paragraphs, bi_encoder, cross_encoder)
142
+ return top_5_contexes, top_5_scores
143
+
144
+
145
+ def qa_pipeline(question, context, nlp):
146
+ return nlp({'question': question.strip(), 'context': context})
147
+
148
+
149
+ def qa_pipeline_sentence(question, context, model, tokenizer):
150
+ sentences_doc = spacy_nlp(context)
151
+ candidate_sentences = []
152
+ for sentence in sentences_doc.sents:
153
+ tokenized = tokenizer(f"<s> {question} </s> {sentence.text} </s>", padding=True, truncation=True, return_tensors='pt')
154
+ output = model(**tokenized)
155
+ soft_outputs = torch.nn.functional.sigmoid(output[0])
156
+ t = Variable(torch.Tensor([0.2])) # threshold
157
+ out = (soft_outputs[0] > t) * 1
158
+ out = out.flatten().cpu().detach().numpy()
159
+ # res = torch.argmax(out, dim=-1)
160
+ print(out[1])
161
+ if out[1] == 1:
162
+ prob = soft_outputs[:, 1].flatten().cpu().detach().numpy()
163
+ candidate_sentences.append(dict(sentence=sentence,
164
+ prob=prob[0]))
165
+ print(candidate_sentences)
166
+ candidate_sentences = sorted(candidate_sentences, key=lambda x: x['prob'], reverse=True)
167
+ return candidate_sentences
168
+
169
+
170
+ def candidate_sentence_controller(sentences):
171
+ if sentences is None or len(sentences) == 0:
172
+ return ""
173
+ if len(sentences) == 1:
174
+ return sentences[0]
175
+ return sentences
176
+
177
+
178
+ def interactive_table(dataframe):
179
+ gb = GridOptionsBuilder.from_dataframe(dataframe)
180
+ gb.configure_pagination(paginationAutoPageSize=True)
181
+ gb.configure_side_bar()
182
+ gb.configure_selection('single', rowMultiSelectWithClick=True,
183
+ groupSelectsChildren="Group checkbox select children") # Enable multi-row selection
184
+ gridOptions = gb.build()
185
+ grid_response = AgGrid(
186
+ dataframe,
187
+ gridOptions=gridOptions,
188
+ data_return_mode='AS_INPUT',
189
+ update_mode='SELECTION_CHANGED',
190
+ enable_enterprise_modules=False,
191
+ fit_columns_on_grid_load=False,
192
+ theme='streamlit', # Add theme color to the table
193
+ height=350,
194
+ width='100%',
195
+ reload_data=False
196
+ )
197
+ return grid_response
198
+
199
+
200
+ def qa_main_widgetsv2():
201
+ st.title("Question Answering Demo")
202
+ col1, col2, col3 = st.columns([2, 1, 1])
203
+ with col1:
204
+ form = st.form(key='first_form')
205
+ question = form.text_area("What is your question?:", height=200)
206
+ submit = form.form_submit_button('Submit')
207
+ if "form_submit" not in st.session_state:
208
+ st.session_state.form_submit = False
209
+ if submit:
210
+ st.session_state.form_submit = True
211
+ if st.session_state.form_submit and question != '':
212
+ with st.spinner(text='Related context search in progress..'):
213
+ top_5_contexes, top_5_scores = retrieve_rerank_pipeline(question.strip(), context_embeddings,
214
+ paragraphs, bi_encoder,
215
+ cross_encoder)
216
+ if len(top_5_contexes) == 0:
217
+ st.error("Related context not found!")
218
+ st.session_state.form_submit = False
219
+ else:
220
+ with st.spinner(text='Now answering your question..'):
221
+ for i, context in enumerate(top_5_contexes):
222
+ # answer_trained = qa_pipeline(question, context, nlp)
223
+ # answer_base = qa_pipeline(question, context, nlp_hf)
224
+ answer_trained = qa_pipeline_sentence(question, context, policy_qa_model, policy_qa_tokenizer)
225
+ answer_base = qa_pipeline_sentence(question, context, asnq_model, asnq_tokenizer)
226
+ st.markdown(f"## Related Context - {i + 1} (score: {top_5_scores[i]:.2f})")
227
+ st.markdown(context)
228
+ st.markdown("## Answer (trained):")
229
+ if answer_trained is None:
230
+ st.markdown("")
231
+ elif isinstance(answer_trained, List):
232
+ for i,answer in enumerate(answer_trained):
233
+ st.markdown(f"### Answer Option {i+1} with prob. {answer['prob']:.4f}")
234
+ st.markdown(answer['sentence'])
235
+ else:
236
+ st.markdown(answer_trained)
237
+ # st.markdown(answer_trained['answer'])
238
+ st.markdown("## Answer (roberta-base-asnq):")
239
+ if answer_base is None:
240
+ st.markdown("")
241
+ elif isinstance(answer_base, List):
242
+ for i,answer in enumerate(answer_base):
243
+ st.markdown(f"### Answer Option {i + 1} with prob. {answer['prob']:.4f}")
244
+ st.markdown(answer['sentence'])
245
+ else:
246
+ st.markdown(answer_base)
247
+ st.markdown("""---""")
248
+
249
+ with col2:
250
+ st.markdown("## Original Questions")
251
+ grid_response = interactive_table(dataframe_original)
252
+ data1 = grid_response['selected_rows']
253
+ if "grid_click_1" not in st.session_state:
254
+ st.session_state.grid_click_1 = False
255
+ if len(data1) > 0:
256
+ st.session_state.grid_click_1 = True
257
+ if st.session_state.grid_click_1:
258
+ selection = data1[0]
259
+ # st.markdown("## Context & Answer:")
260
+ st.markdown("### Context:")
261
+ st.write(selection['context'])
262
+ st.markdown("### Question:")
263
+ st.write(selection['question'])
264
+ st.markdown("### Answer:")
265
+ st.write(selection['answer'])
266
+ st.session_state.grid_click_1 = False
267
+ with col3:
268
+ st.markdown("## Our Questions")
269
+ grid_response = interactive_table(dataframe_bsbs)
270
+ data2 = grid_response['selected_rows']
271
+ if "grid_click_2" not in st.session_state:
272
+ st.session_state.grid_click_2 = False
273
+ if len(data2) > 0:
274
+ st.session_state.grid_click_2 = True
275
+ if st.session_state.grid_click_2:
276
+ selection = data2[0]
277
+ # st.markdown("## Context & Answer:")
278
+ st.markdown("### Context:")
279
+ st.write(selection['context'])
280
+ st.markdown("### Question:")
281
+ st.write(selection['question'])
282
+ st.markdown("### Answer:")
283
+ st.write(selection['answer'])
284
+ st.session_state.grid_click_2 = False
285
+
286
+
287
+ def load():
288
+ context_embeddings, paragraphs = load_paragraphs()
289
+ dataframe_original, dataframe_bsbs = load_dataframes()
290
+ spacy_nlp = spacy.load('en_core_web_sm')
291
+ # bi_encoder, cross_encoder, nlp, nlp_hf = copy.deepcopy(load(st.secrets["AUTH_TOKEN"], st.secrets["MODEL_NAME"]))
292
+ bi_encoder, cross_encoder, policy_qa_tokenizer, policy_qa_model, asnq_tokenizer, asnq_model \
293
+ = copy.deepcopy(
294
+ load_models_sentence_based(st.secrets["AUTH_TOKEN"], st.secrets["MODEL_NAME"], st.secrets["MODEL_NAME_BASE"]))
295
+ return context_embeddings, paragraphs, dataframe_original, dataframe_bsbs, bi_encoder, cross_encoder, policy_qa_tokenizer, policy_qa_model, asnq_tokenizer, asnq_model, spacy_nlp
296
+
297
+
298
+ # save_dataframe()
299
+ # context_embeddings, paragraphs, dataframe_original, dataframe_bsbs, bi_encoder, cross_encoder, nlp, nlp_hf = load()
300
+ context_embeddings, paragraphs, dataframe_original, dataframe_bsbs, bi_encoder, cross_encoder, policy_qa_tokenizer, policy_qa_model, asnq_tokenizer, asnq_model, spacy_nlp = load()
301
+ qa_main_widgetsv2()
302
+
303
+ # if __name__ == '__main__':
304
+ # context()
policyQA.json ADDED
The diff for this file is too large to render. See raw diff
 
policyQA_bsbs.csv ADDED
The diff for this file is too large to render. See raw diff
 
policyQA_bsbs_sentence.csv ADDED
The diff for this file is too large to render. See raw diff
 
policyQA_original.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f7b4cb4bd7c65a11f21a0553c0a419c424639a6a123cdf89ecbb05ad849b7a6
3
+ size 28581894
requirements.txt ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ altair==4.2.0
2
+ argon2-cffi==21.3.0
3
+ argon2-cffi-bindings==21.2.0
4
+ asttokens==2.0.5
5
+ attrs==21.4.0
6
+ backcall==0.2.0
7
+ bleach==5.0.1
8
+ blinker==1.5
9
+ blis==0.7.9
10
+ brotlipy==0.7.0
11
+ cachetools==5.2.0
12
+ catalogue==2.0.8
13
+ certifi==2022.9.24
14
+ cffi==1.15.1
15
+ charset-normalizer==2.1.1
16
+ click==8.1.3
17
+ commonmark==0.9.1
18
+ cryptography==38.0.3
19
+ cycler==0.11.0
20
+ cymem==2.0.7
21
+ debugpy==1.6.0
22
+ decorator==5.1.1
23
+ defusedxml==0.7.1
24
+ en-core-web-sm==3.2.0
25
+ entrypoints==0.4
26
+ executing==0.8.3
27
+ fastjsonschema==2.15.3
28
+ filelock==3.8.0
29
+ fonttools==4.33.3
30
+ gitdb==4.0.9
31
+ GitPython==3.1.29
32
+ huggingface-hub==0.10.0
33
+ idna==3.4
34
+ importlib-metadata==5.0.0
35
+ ipykernel==6.15.0
36
+ ipython==8.4.0
37
+ ipython-genutils==0.2.0
38
+ ipywidgets==7.7.1
39
+ jedi==0.18.1
40
+ Jinja2==3.1.2
41
+ joblib==1.2.0
42
+ jsonschema==4.6.0
43
+ jupyter==1.0.0
44
+ jupyter-client==7.3.4
45
+ jupyter-console==6.4.4
46
+ jupyter-core==4.10.0
47
+ jupyterlab-pygments==0.2.2
48
+ jupyterlab-widgets==1.1.1
49
+ kiwisolver==1.4.3
50
+ langcodes==3.3.0
51
+ MarkupSafe==2.1.1
52
+ matplotlib==3.5.2
53
+ matplotlib-inline==0.1.3
54
+ mistune==0.8.4
55
+ mkl-fft==1.3.1
56
+ mkl-random==1.2.2
57
+ mkl-service==2.4.0
58
+ mpmath==1.2.1
59
+ murmurhash==1.0.9
60
+ nbclient==0.6.4
61
+ nbconvert==6.5.0
62
+ nbformat==5.4.0
63
+ nest-asyncio==1.5.5
64
+ nltk==3.7
65
+ nose==1.3.7
66
+ notebook==6.4.12
67
+ numpy==1.23.3
68
+ packaging==21.3
69
+ pandas==1.5.0
70
+ pandocfilters==1.5.0
71
+ parso==0.8.3
72
+ pathy==0.6.2
73
+ pexpect==4.8.0
74
+ pickleshare==0.7.5
75
+ Pillow==9.2.0
76
+ pip==22.2.2
77
+ preshed==3.0.8
78
+ prometheus-client==0.14.1
79
+ prompt-toolkit==3.0.30
80
+ protobuf==3.20.3
81
+ psutil==5.9.1
82
+ ptyprocess==0.7.0
83
+ pure-eval==0.2.2
84
+ pyarrow==10.0.0
85
+ pycparser==2.21
86
+ pydantic==1.8.2
87
+ pydeck==0.8.0b4
88
+ Pygments==2.12.0
89
+ Pympler==1.0.1
90
+ pyOpenSSL==22.1.0
91
+ pyparsing==3.0.9
92
+ pyrsistent==0.18.1
93
+ PySocks==1.7.1
94
+ python-dateutil==2.8.2
95
+ python-decouple==3.6
96
+ pytz==2022.6
97
+ pytz-deprecation-shim==0.1.0.post0
98
+ PyYAML==6.0
99
+ pyzmq==23.2.0
100
+ qtconsole==5.3.1
101
+ QtPy==2.1.0
102
+ regex==2022.10.31
103
+ requests==2.28.1
104
+ rich==12.6.0
105
+ scikit-learn==1.1.2
106
+ scipy==1.9.2
107
+ semver==2.13.0
108
+ Send2Trash==1.8.0
109
+ sentence-transformers==2.2.2
110
+ sentencepiece==0.1.97
111
+ setuptools==65.5.0
112
+ six==1.16.0
113
+ smart-open==5.2.1
114
+ smmap==5.0.0
115
+ soupsieve==2.3.2.post1
116
+ spacy==3.2.0
117
+ spacy-legacy==3.0.10
118
+ spacy-loggers==1.0.3
119
+ srsly==2.4.5
120
+ stack-data==0.3.0
121
+ streamlit==1.13.0
122
+ streamlit-aggrid==0.3.3
123
+ sympy==1.10.1
124
+ terminado==0.15.0
125
+ thinc==8.0.17
126
+ threadpoolctl==3.1.0
127
+ tinycss2==1.1.1
128
+ tokenizers==0.12.1
129
+ toml==0.10.2
130
+ toolz==0.12.0
131
+ torch==1.12.1
132
+ torchaudio==0.12.1
133
+ torchvision==0.13.1
134
+ tornado==6.1
135
+ tqdm==4.64.1
136
+ traitlets==5.3.0
137
+ transformers==4.22.2
138
+ typer==0.4.2
139
+ typing_extensions==4.4.0
140
+ tzdata==2022.6
141
+ tzlocal==4.2
142
+ urllib3==1.26.11
143
+ validators==0.20.0
144
+ wasabi==0.10.1
145
+ watchdog==2.1.9
146
+ wcwidth==0.2.5
147
+ webencodings==0.5.1
148
+ wheel==0.37.1
149
+ widgetsnbextension==3.6.1
150
+ zipp==3.10.0