makcrx commited on
Commit
d869f0d
1 Parent(s): 454c118

test keywords

Browse files
Files changed (4) hide show
  1. app.py +19 -5
  2. extract_keywords.py +122 -0
  3. test.ipynb +77 -7
  4. test_keybert.ipynb +224 -0
app.py CHANGED
@@ -2,18 +2,32 @@ from langchain.vectorstores import FAISS
2
  from langchain.embeddings import SentenceTransformerEmbeddings
3
  import gradio as gr
4
  import reranking
 
5
 
6
  embeddings = SentenceTransformerEmbeddings(model_name="multi-qa-MiniLM-L6-cos-v1")
7
  db = FAISS.load_local('faiss_qa', embeddings)
 
8
 
9
  def main(query):
10
  query = query.lower()
 
11
  result_docs = db.similarity_search_with_score(query, k=20)
12
- sentences = [doc[0].page_content for doc in result_docs]
13
- #print('----------------------------------------------------------------')
14
- #for doc in result_docs:
15
- # print(doc[0].metadata['articleId'], ' | ', doc[0].page_content, ' | ', doc[0].metadata['answer'])
16
- score, index = reranking.search(query, sentences)
 
 
 
 
 
 
 
 
 
 
 
17
  return result_docs[index][0].metadata['answer'], score, result_docs[index][0].page_content
18
 
19
  demo = gr.Interface(fn=main, inputs="text", outputs=[
 
2
  from langchain.embeddings import SentenceTransformerEmbeddings
3
  import gradio as gr
4
  import reranking
5
+ from extract_keywords import init_keyword_extractor, extract_keywords
6
 
7
  embeddings = SentenceTransformerEmbeddings(model_name="multi-qa-MiniLM-L6-cos-v1")
8
  db = FAISS.load_local('faiss_qa', embeddings)
9
+ init_keyword_extractor()
10
 
11
  def main(query):
12
  query = query.lower()
13
+ query_keywords = set(extract_keywords(query))
14
  result_docs = db.similarity_search_with_score(query, k=20)
15
+
16
+ if len(query_keywords) > 0:
17
+ result_docs = filter(lambda doc: len(set(extract_keywords(doc[0].page_content)).intersection(query_keywords)) > 0, result_docs)
18
+
19
+ if len(result_docs) == 0:
20
+ return 'Ответ не найден', 0, ''
21
+
22
+ if len(result_docs) == 1:
23
+ score, index = 0, 0
24
+ else:
25
+ sentences = [doc[0].page_content for doc in result_docs]
26
+ #print('----------------------------------------------------------------')
27
+ #for doc in result_docs:
28
+ # print(doc[0].metadata['articleId'], ' | ', doc[0].page_content, ' | ', doc[0].metadata['answer'])
29
+ score, index = reranking.search(query, sentences)
30
+
31
  return result_docs[index][0].metadata['answer'], score, result_docs[index][0].page_content
32
 
33
  demo = gr.Interface(fn=main, inputs="text", outputs=[
extract_keywords.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def flatten(items, seqtypes=(list, tuple)):
2
+ try:
3
+ for i, x in enumerate(items):
4
+ while isinstance(x, seqtypes):
5
+ items[i:i+1] = x
6
+ x = items[i]
7
+ except IndexError:
8
+ pass
9
+ return items
10
+
11
+ aliases = [
12
+ #('canonical name', ['aliases', ...])
13
+ ('почта россия', ['почта', 'почта рф', 'пр', 'gh']),
14
+ ('почта россия трекинг', ['пр трекинг', 'почта трекинг', 'пр трэкинг', 'почта трэкинг']),
15
+ ('реестр почта', ['реестр пр', 'реестр почта россии']),
16
+ ('реестр пэк', []),
17
+ ('реквизиты', []),
18
+ ('пешкарики', []),
19
+ ('импорт лидов директ', []),
20
+ ('яндекс доставка экспресс', ['яндекс доставка express', 'яд экспресс', 'ядоставка экспресс']),
21
+ ('яндекс доставка ndd', ['яд ндд', 'я доставка ндд', 'ядоставка ндд', 'модуль ндд']),
22
+ ('яндекс метрика', ['яндекс метрика импорт']),
23
+ ('альфабанк', ['альфа банк', 'alfabank', 'альфа']),
24
+ ('импорт лидов facebook', ['импорт лидов fb', 'загрузка лидов fb', 'лиды фейсбук', 'импорт лидов фб', 'fb lead']),
25
+ ('маркетинговые расходы', ['расходы', 'загрузка расходов']),
26
+ ('cloudpayments', ['клауд', 'клаудпеймент', 'клаудпейментс']),
27
+ ('robokassa', ['робокасса', 'робокаса']),
28
+ ('sipuni', ['сипуни', 'сипьюни']),
29
+ ('mailchimp', ['майлчимп', 'мейлчим', 'мейлчимп']),
30
+ ('unisender', ['юнисендер']),
31
+ ('яндекс аудитории', ['экспорт аудитории', 'экспорт яндекс аудитории']),
32
+ ('экспорт facebook', ['экспорт сегментов facebook', 'экспорт fb', 'экспорт фейсбук', 'экспорт аудиторий фб', 'fb экспорт']),
33
+ ('экспорт вк', ['экспорт сегментов vkontakte', 'экспорт vk', 'экспорт контакте'])
34
+ ]
35
+
36
+ vocab_raw = flatten([[k] + keywords for k, keywords in aliases])
37
+
38
+ import string
39
+ import pymorphy3
40
+
41
+ morph = None
42
+ def normalize_word(word):
43
+ if word == 'лид':
44
+ return word
45
+ global morph
46
+ if morph is None:
47
+ morph = pymorphy3.MorphAnalyzer()
48
+ return morph.parse(word)[0].normal_form
49
+
50
+ def tokenize_sentence(text):
51
+ # remove punctuation
52
+ text = text.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation)))
53
+ # tokenize
54
+ return [normalize_word(word) for word in text.split()]
55
+
56
+ def normalize_sentence(text):
57
+ return " ".join(tokenize_sentence(text))
58
+
59
+ def canonical_keywords(keywords):
60
+ """
61
+ replace keyword aliases with canonical keyword names
62
+ """
63
+ result = []
64
+ for k in keywords:
65
+ k = normalize_sentence(k)
66
+ for canonical_name, alias_names in aliases:
67
+ canonical_name = normalize_sentence(canonical_name)
68
+ for a in alias_names:
69
+ a = normalize_sentence(a)
70
+ #print('a', a)
71
+ if a == k:
72
+ result.append(canonical_name)
73
+ break
74
+ else:
75
+ continue
76
+ break
77
+ else:
78
+ result.append(k)
79
+ return result
80
+
81
+ def merge_keywords(keywords):
82
+ """
83
+ remove subkeywords
84
+ """
85
+ result = []
86
+ sorted_keywords = sorted(keywords, key=len, reverse=True)
87
+
88
+ for k in sorted_keywords:
89
+ for rk in result:
90
+ if rk.lower().startswith(k):
91
+ break
92
+ else:
93
+ result.append(k)
94
+ continue
95
+
96
+ return result
97
+
98
+
99
+ vectorizer = None
100
+ kw_model = None
101
+
102
+ def init_keyword_extractor():
103
+ global vectorizer
104
+ global kw_model
105
+
106
+ from keybert import KeyBERT
107
+ import spacy
108
+ from sklearn.feature_extraction.text import CountVectorizer
109
+
110
+ kw_model = KeyBERT(model=spacy.load("ru_core_news_sm", exclude=['tokenizer', 'tagger', 'parser', 'ner', 'attribute_ruler', 'lemmatizer']))
111
+ vocab = [" ".join(tokenize_sentence(s)) for s in vocab_raw]
112
+ vectorizer = CountVectorizer(ngram_range=(1, 4), vocabulary=vocab, tokenizer=tokenize_sentence)
113
+
114
+ def extract_keywords(text):
115
+ global vectorizer
116
+ global kw_model
117
+
118
+ if vectorizer is None or kw_model is None:
119
+ init_keyword_extractor()
120
+
121
+ keywords = [k for k, score in kw_model.extract_keywords(text, vectorizer=vectorizer)]
122
+ return merge_keywords(canonical_keywords(keywords))
test.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 6,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
@@ -41,7 +41,7 @@
41
  },
42
  {
43
  "cell_type": "code",
44
- "execution_count": 9,
45
  "metadata": {},
46
  "outputs": [],
47
  "source": [
@@ -52,7 +52,7 @@
52
  },
53
  {
54
  "cell_type": "code",
55
- "execution_count": 10,
56
  "metadata": {},
57
  "outputs": [],
58
  "source": [
@@ -64,17 +64,87 @@
64
  },
65
  {
66
  "cell_type": "code",
67
- "execution_count": 15,
68
  "metadata": {},
69
- "outputs": [],
 
 
 
 
 
 
 
 
70
  "source": [
71
- "embeddings = SentenceTransformerEmbeddings(model_name=\"multi-qa-MiniLM-L6-cos-v1\")"
 
72
  ]
73
  },
74
  {
75
  "cell_type": "code",
76
  "execution_count": 2,
77
  "metadata": {},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  "outputs": [],
79
  "source": [
80
  "output_dir = 'faiss_qa'"
@@ -82,7 +152,7 @@
82
  },
83
  {
84
  "cell_type": "code",
85
- "execution_count": 17,
86
  "metadata": {},
87
  "outputs": [],
88
  "source": [
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
 
41
  },
42
  {
43
  "cell_type": "code",
44
+ "execution_count": 2,
45
  "metadata": {},
46
  "outputs": [],
47
  "source": [
 
52
  },
53
  {
54
  "cell_type": "code",
55
+ "execution_count": 3,
56
  "metadata": {},
57
  "outputs": [],
58
  "source": [
 
64
  },
65
  {
66
  "cell_type": "code",
67
+ "execution_count": 1,
68
  "metadata": {},
69
+ "outputs": [
70
+ {
71
+ "name": "stderr",
72
+ "output_type": "stream",
73
+ "text": [
74
+ "2023-08-07 17:36:37.358149: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n"
75
+ ]
76
+ }
77
+ ],
78
  "source": [
79
+ "from extract_keywords import canonical_keywords, merge_keywords, tokenize_sentence, extract_keywords, init_keyword_extractor\n",
80
+ "init_keyword_extractor()"
81
  ]
82
  },
83
  {
84
  "cell_type": "code",
85
  "execution_count": 2,
86
  "metadata": {},
87
+ "outputs": [
88
+ {
89
+ "name": "stderr",
90
+ "output_type": "stream",
91
+ "text": [
92
+ "/home/makcrx/anaconda3/lib/python3.10/site-packages/sklearn/feature_extraction/text.py:528: UserWarning: The parameter 'token_pattern' will not be used since 'tokenizer' is not None'\n",
93
+ " warnings.warn(\n"
94
+ ]
95
+ },
96
+ {
97
+ "data": {
98
+ "text/plain": [
99
+ "['почта россия трекинг']"
100
+ ]
101
+ },
102
+ "execution_count": 2,
103
+ "metadata": {},
104
+ "output_type": "execute_result"
105
+ }
106
+ ],
107
+ "source": [
108
+ "extract_keywords('пр трекинг')"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": 3,
114
+ "metadata": {},
115
+ "outputs": [
116
+ {
117
+ "data": {
118
+ "text/html": [
119
+ "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #800000; text-decoration-color: #800000\">╭─────────────────────────────── </span><span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">Traceback </span><span style=\"color: #bf7f7f; text-decoration-color: #bf7f7f; font-weight: bold\">(most recent call last)</span><span style=\"color: #800000; text-decoration-color: #800000\"> ────────────────────────────────╮</span>\n",
120
+ "<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/tmp/ipykernel_1594240/</span><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">2036088539.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">1</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">&lt;module&gt;</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
121
+ "<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
122
+ "<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000; font-style: italic\">[Errno 2] No such file or directory: '/tmp/ipykernel_1594240/2036088539.py'</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
123
+ "<span style=\"color: #800000; text-decoration-color: #800000\">╰──────────────────────────────────────────────────────────────────────────────────────────────────╯</span>\n",
124
+ "<span style=\"color: #ff0000; text-decoration-color: #ff0000; font-weight: bold\">NameError: </span>name <span style=\"color: #008000; text-decoration-color: #008000\">'SentenceTransformerEmbeddings'</span> is not defined\n",
125
+ "</pre>\n"
126
+ ],
127
+ "text/plain": [
128
+ "\u001b[31m╭─\u001b[0m\u001b[31m──────────────────────────────\u001b[0m\u001b[31m \u001b[0m\u001b[1;31mTraceback \u001b[0m\u001b[1;2;31m(most recent call last)\u001b[0m\u001b[31m \u001b[0m\u001b[31m───────────────────────────────\u001b[0m\u001b[31m─╮\u001b[0m\n",
129
+ "\u001b[31m│\u001b[0m \u001b[2;33m/tmp/ipykernel_1594240/\u001b[0m\u001b[1;33m2036088539.py\u001b[0m:\u001b[94m1\u001b[0m in \u001b[92m<module>\u001b[0m \u001b[31m│\u001b[0m\n",
130
+ "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
131
+ "\u001b[31m│\u001b[0m \u001b[3;31m[Errno 2] No such file or directory: '/tmp/ipykernel_1594240/2036088539.py'\u001b[0m \u001b[31m│\u001b[0m\n",
132
+ "\u001b[31m╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\u001b[0m\n",
133
+ "\u001b[1;91mNameError: \u001b[0mname \u001b[32m'SentenceTransformerEmbeddings'\u001b[0m is not defined\n"
134
+ ]
135
+ },
136
+ "metadata": {},
137
+ "output_type": "display_data"
138
+ }
139
+ ],
140
+ "source": [
141
+ "embeddings = SentenceTransformerEmbeddings(model_name=\"multi-qa-MiniLM-L6-cos-v1\")"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": 5,
147
+ "metadata": {},
148
  "outputs": [],
149
  "source": [
150
  "output_dir = 'faiss_qa'"
 
152
  },
153
  {
154
  "cell_type": "code",
155
+ "execution_count": 7,
156
  "metadata": {},
157
  "outputs": [],
158
  "source": [
test_keybert.ipynb ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 79,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "doc = 'как подключить модуль почту россии трекинг'"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 65,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "from keybert import KeyBERT\n",
19
+ "from sklearn.feature_extraction.text import CountVectorizer\n",
20
+ "import spacy\n",
21
+ "nlp = spacy.load(\"ru_core_news_sm\", exclude=['tokenizer', 'tagger', 'parser', 'ner', 'attribute_ruler', 'lemmatizer'])\n",
22
+ "kw_model = KeyBERT(model=nlp)"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 74,
28
+ "metadata": {},
29
+ "outputs": [
30
+ {
31
+ "data": {
32
+ "text/plain": [
33
+ "'!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~'"
34
+ ]
35
+ },
36
+ "execution_count": 74,
37
+ "metadata": {},
38
+ "output_type": "execute_result"
39
+ }
40
+ ],
41
+ "source": [
42
+ "string.punctuation"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": 76,
48
+ "metadata": {},
49
+ "outputs": [],
50
+ "source": [
51
+ "import string\n",
52
+ "\n",
53
+ "def tokenize_sentence(text):\n",
54
+ " # remove punctuation\n",
55
+ " text = text.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation)))\n",
56
+ " # tokenize\n",
57
+ " return [morph.parse(word)[0].normal_form for word in text.split()]"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 81,
63
+ "metadata": {},
64
+ "outputs": [
65
+ {
66
+ "name": "stdout",
67
+ "output_type": "stream",
68
+ "text": [
69
+ "почта россии\n",
70
+ "почта\n",
71
+ "почта россии трекинг\n"
72
+ ]
73
+ }
74
+ ],
75
+ "source": [
76
+ "vocab_raw = [\n",
77
+ " 'почта россии', 'почта', 'почта россии трекинг',\n",
78
+ " 'яндекс доставка', 'яндекс доставка экспресс', 'яндекс доставка express',\n",
79
+ " 'альфабанк', 'альфа банк',\n",
80
+ "]\n",
81
+ "aliases = [\n",
82
+ " #('canonical name', ['aliases', ...])\n",
83
+ " ('почта россии', ['почта']),\n",
84
+ " ('яндекс доставка экспресс', ['яндекс доставка express']),\n",
85
+ " ('альфабанк', ['альфа банк']),\n",
86
+ "]\n",
87
+ "vocab = [\" \".join(tokenize_sentence(s)) for s in vocab_raw]"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": 87,
93
+ "metadata": {},
94
+ "outputs": [
95
+ {
96
+ "name": "stdout",
97
+ "output_type": "stream",
98
+ "text": [
99
+ "как подключить модуль почту россии трекинг\n",
100
+ "как подключить модуль почту россии трекинг\n"
101
+ ]
102
+ },
103
+ {
104
+ "name": "stderr",
105
+ "output_type": "stream",
106
+ "text": [
107
+ "/home/makcrx/anaconda3/lib/python3.10/site-packages/sklearn/feature_extraction/text.py:528: UserWarning: The parameter 'token_pattern' will not be used since 'tokenizer' is not None'\n",
108
+ " warnings.warn(\n"
109
+ ]
110
+ },
111
+ {
112
+ "data": {
113
+ "text/plain": [
114
+ "[('почта россия трекинг', 0.4786), ('почта россия', 0.3053), ('почта', 0.2357)]"
115
+ ]
116
+ },
117
+ "execution_count": 87,
118
+ "metadata": {},
119
+ "output_type": "execute_result"
120
+ }
121
+ ],
122
+ "source": [
123
+ "from keyphrase_vectorizers import KeyphraseCountVectorizer\n",
124
+ "#vectorizer = KeyphraseCountVectorizer(spacy_pipeline='ru_core_news_sm', vocabulary=vocab)\n",
125
+ "vectorizer = CountVectorizer(ngram_range=(1, 4), vocabulary=vocab, tokenizer=tokenize_sentence)\n",
126
+ "kw_model.extract_keywords(doc, vectorizer=vectorizer)"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": 22,
132
+ "metadata": {},
133
+ "outputs": [],
134
+ "source": [
135
+ "import pymorphy3"
136
+ ]
137
+ },
138
+ {
139
+ "cell_type": "code",
140
+ "execution_count": 23,
141
+ "metadata": {},
142
+ "outputs": [],
143
+ "source": [
144
+ "morph = pymorphy3.MorphAnalyzer()"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": 28,
150
+ "metadata": {},
151
+ "outputs": [
152
+ {
153
+ "data": {
154
+ "text/plain": [
155
+ "'почту россия'"
156
+ ]
157
+ },
158
+ "execution_count": 28,
159
+ "metadata": {},
160
+ "output_type": "execute_result"
161
+ }
162
+ ],
163
+ "source": [
164
+ "morph.parse('почту')[0].normal_form"
165
+ ]
166
+ },
167
+ {
168
+ "cell_type": "code",
169
+ "execution_count": 48,
170
+ "metadata": {},
171
+ "outputs": [
172
+ {
173
+ "data": {
174
+ "text/plain": [
175
+ "['почта', 'россия', 'трекинг']"
176
+ ]
177
+ },
178
+ "execution_count": 48,
179
+ "metadata": {},
180
+ "output_type": "execute_result"
181
+ }
182
+ ],
183
+ "source": [
184
+ "tokenize_sentence('Почта России? трекинг')"
185
+ ]
186
+ },
187
+ {
188
+ "cell_type": "code",
189
+ "execution_count": null,
190
+ "metadata": {},
191
+ "outputs": [],
192
+ "source": []
193
+ },
194
+ {
195
+ "cell_type": "code",
196
+ "execution_count": null,
197
+ "metadata": {},
198
+ "outputs": [],
199
+ "source": []
200
+ }
201
+ ],
202
+ "metadata": {
203
+ "kernelspec": {
204
+ "display_name": "base",
205
+ "language": "python",
206
+ "name": "python3"
207
+ },
208
+ "language_info": {
209
+ "codemirror_mode": {
210
+ "name": "ipython",
211
+ "version": 3
212
+ },
213
+ "file_extension": ".py",
214
+ "mimetype": "text/x-python",
215
+ "name": "python",
216
+ "nbconvert_exporter": "python",
217
+ "pygments_lexer": "ipython3",
218
+ "version": "3.10.9"
219
+ },
220
+ "orig_nbformat": 4
221
+ },
222
+ "nbformat": 4,
223
+ "nbformat_minor": 2
224
+ }