asFrants commited on
Commit
957c035
1 Parent(s): d95cbea

add flake8 and black

Browse files
Files changed (5) hide show
  1. .vscode/settings.json +2 -4
  2. app.py +81 -45
  3. main.py +48 -20
  4. poetry.lock +153 -1
  5. pyproject.toml +2 -0
.vscode/settings.json CHANGED
@@ -1,8 +1,4 @@
1
  {
2
- "python.linting.pylint" : false,
3
- "python.linting.flake8" : true,
4
- "python.linting.enabled" : true,
5
- "python.formatting.black" : true,
6
  "editor.formatOnSave": true,
7
  "python.linting.flake8Args":[
8
  "--max-line-length=88"
@@ -13,4 +9,6 @@
13
  "python.testing.pytestArgs": [
14
  "tests"
15
  ],
 
 
16
  }
 
1
  {
 
 
 
 
2
  "editor.formatOnSave": true,
3
  "python.linting.flake8Args":[
4
  "--max-line-length=88"
 
9
  "python.testing.pytestArgs": [
10
  "tests"
11
  ],
12
+ "black-formatter.importStrategy": "fromEnvironment",
13
+ "flake8.importStrategy": "fromEnvironment",
14
  }
app.py CHANGED
@@ -2,7 +2,7 @@ import re
2
 
3
  import gradio as gr
4
  from pydantic import BaseModel
5
- from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
6
  from loguru import logger
7
 
8
  # from pydantic import BaseModel
@@ -17,7 +17,7 @@ EN_SUMMARY_MODEL = "csebuetnlp/mT5_multilingual_XLSum"
17
  EN_SENTIMENT_MODEL = "distilbert-base-uncased-finetuned-sst-2-english"
18
 
19
 
20
- DEFAULT_EN_TEXT ="""Flags on official buildings are being flown at half-mast and a minute's silence will be observed at midday.
21
  Fourteen people were shot dead at the Faculty of Arts building of Charles University in the capital by a student who then killed himself.
22
  Police are working to uncover the motive behind the attack.
23
  It is one of the deadliest assaults by a lone gunman in Europe this century.
@@ -45,6 +45,7 @@ DEFAULT_RU_TEXT = """В результате взрыва на заправке,
45
  доноров для их пополнения на данный час тоже уже немало», — написало ведомство.
46
  """
47
 
 
48
  class Request(BaseModel):
49
  text: str
50
 
@@ -53,14 +54,16 @@ class Result(BaseModel):
53
  sentiment_score: float
54
  sentiment_label: str
55
  summary: str
56
-
57
  def to_str(self):
58
- return f"Summary: {self.summary}\n Sentiment: {self.sentiment_label} ({self.sentiment_score:.3f})"
 
59
 
60
  # class Response(BaseModel):
61
  # results: List[Result] # list of Result objects
62
 
63
- class Summarizer():
 
64
  ru_summary_pipe: pipeline
65
  ru_sentiment_pipe: pipeline
66
  en_summary_pipe: pipeline
@@ -68,65 +71,72 @@ class Summarizer():
68
  # sum_model_name = "csebuetnlp/mT5_multilingual_XLSum"
69
  # sum_tokenizer = AutoTokenizer.from_pretrained(sum_model_name)
70
  # sum_model = AutoModelForSeq2SeqLM.from_pretrained(sum_model_name)
71
-
72
  def __init__(self) -> None:
73
- sum_pipe = pipeline("summarization", model=RU_SUMMARY_MODEL, max_length=100, truncation=True)
 
 
74
  self.ru_summary_pipe = sum_pipe
75
- self.ru_sentiment_pipe = pipeline("sentiment-analysis", model=RU_SENTIMENT_MODEL)
 
 
76
  self.en_summary_pipe = sum_pipe
77
- self.en_sentiment_pipe = pipeline("sentiment-analysis", model=EN_SENTIMENT_MODEL)
 
 
78
 
79
  def mT5_summarize(self, text: str) -> str:
80
- '''Handle text with mT5 model without pipeline'''
81
-
82
- WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip()))
 
83
 
84
  input_ids = self.sum_tokenizer(
85
- [WHITESPACE_HANDLER(text)],
86
  return_tensors="pt",
87
  padding="max_length",
88
  truncation=True,
89
- max_length=512
90
  )["input_ids"]
91
 
92
  output_ids = self.sum_model.generate(
93
- input_ids=input_ids,
94
- max_length=84,
95
- no_repeat_ngram_size=2,
96
- num_beams=4
97
  )[0]
98
 
99
  summary = self.sum_tokenizer.decode(
100
- output_ids,
101
- skip_special_tokens=True,
102
- clean_up_tokenization_spaces=False
103
  )
104
 
105
  return summary
106
 
107
  def get_pipe(self, lang: str):
108
- logger.info(f'Pipe language: {lang}')
109
- summary = {'en': self.en_summary_pipe,
110
- 'ru': self.ru_summary_pipe,}
111
- sentiment = {'en': self.en_sentiment_pipe,
112
- 'ru': self.ru_sentiment_pipe,}
 
 
 
 
113
  return summary[lang], sentiment[lang]
114
 
115
- def summarize(self, text: Request, lang: str = 'en') -> Result:
116
  sum_pipe, sent_pipe = self.get_pipe(lang)
117
- response_summary = sum_pipe(text)
118
  logger.info(response_summary)
119
- response_sentiment = sent_pipe(text)
120
  logger.info(response_sentiment)
121
- result = Result(
122
  summary=response_summary[0]["summary_text"],
123
  sentiment_label=response_sentiment[0]["label"],
124
  sentiment_score=response_sentiment[0]["score"],
125
  )
126
  return result
127
-
128
- def summ(self, text: Request, lang: str = 'en') -> str:
129
- return self.summarize(text, lang).to_str()
 
130
 
131
  if __name__ == "__main__":
132
  pipe = Summarizer()
@@ -134,20 +144,46 @@ if __name__ == "__main__":
134
  with gr.Blocks() as demo:
135
  with gr.Row():
136
  with gr.Column(scale=2, min_width=600):
137
- en_sum_description=gr.Markdown(value=f"Model for Summary: {EN_SUMMARY_MODEL}")
138
- en_sent_description=gr.Markdown(value=f"Model for Sentiment: {EN_SENTIMENT_MODEL}")
139
- en_inputs=gr.Textbox(label="en_input", lines=5, value=DEFAULT_EN_TEXT, placeholder=DEFAULT_EN_TEXT)
140
- en_lang=gr.Textbox(value='en',visible=False)
141
- en_outputs=gr.Textbox(label="en_output", lines=5, placeholder="Summary and Sentiment would be here...")
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  en_inbtn = gr.Button("Proceed")
143
  with gr.Column(scale=2, min_width=600):
144
- ru_sum_description=gr.Markdown(value=f"Model for Summary: {RU_SUMMARY_MODEL}")
145
- ru_sent_description=gr.Markdown(value=f"Model for Sentiment: {RU_SENTIMENT_MODEL}")
146
- ru_inputs=gr.Textbox(label="ru_input", lines=5, value=DEFAULT_RU_TEXT, placeholder=DEFAULT_RU_TEXT)
147
- ru_lang=gr.Textbox(value='ru',visible=False)
148
- ru_outputs=gr.Textbox(label="ru_output", lines=5, placeholder="Здесь будет обобщение и эмоциональный окрас текста...")
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  ru_inbtn = gr.Button("Запустить")
150
-
151
  en_inbtn.click(
152
  pipe.summ,
153
  [en_inputs, en_lang],
@@ -158,4 +194,4 @@ if __name__ == "__main__":
158
  [ru_inputs, ru_lang],
159
  [ru_outputs],
160
  )
161
- demo.launch(show_api=False)
 
2
 
3
  import gradio as gr
4
  from pydantic import BaseModel
5
+ from transformers import pipeline
6
  from loguru import logger
7
 
8
  # from pydantic import BaseModel
 
17
  EN_SENTIMENT_MODEL = "distilbert-base-uncased-finetuned-sst-2-english"
18
 
19
 
20
+ DEFAULT_EN_TEXT = """Flags on official buildings are being flown at half-mast and a minute's silence will be observed at midday.
21
  Fourteen people were shot dead at the Faculty of Arts building of Charles University in the capital by a student who then killed himself.
22
  Police are working to uncover the motive behind the attack.
23
  It is one of the deadliest assaults by a lone gunman in Europe this century.
 
45
  доноров для их пополнения на данный час тоже уже немало», — написало ведомство.
46
  """
47
 
48
+
49
  class Request(BaseModel):
50
  text: str
51
 
 
54
  sentiment_score: float
55
  sentiment_label: str
56
  summary: str
57
+
58
  def to_str(self):
59
+ return f"Summary: {self.summary}\nSentiment: {self.sentiment_label} ({self.sentiment_score:.3f})"
60
+
61
 
62
  # class Response(BaseModel):
63
  # results: List[Result] # list of Result objects
64
 
65
+
66
+ class Summarizer:
67
  ru_summary_pipe: pipeline
68
  ru_sentiment_pipe: pipeline
69
  en_summary_pipe: pipeline
 
71
  # sum_model_name = "csebuetnlp/mT5_multilingual_XLSum"
72
  # sum_tokenizer = AutoTokenizer.from_pretrained(sum_model_name)
73
  # sum_model = AutoModelForSeq2SeqLM.from_pretrained(sum_model_name)
74
+
75
  def __init__(self) -> None:
76
+ sum_pipe = pipeline(
77
+ "summarization", model=RU_SUMMARY_MODEL, max_length=100, truncation=True
78
+ )
79
  self.ru_summary_pipe = sum_pipe
80
+ self.ru_sentiment_pipe = pipeline(
81
+ "sentiment-analysis", model=RU_SENTIMENT_MODEL
82
+ )
83
  self.en_summary_pipe = sum_pipe
84
+ self.en_sentiment_pipe = pipeline(
85
+ "sentiment-analysis", model=EN_SENTIMENT_MODEL
86
+ )
87
 
88
  def mT5_summarize(self, text: str) -> str:
89
+ """Handle text with mT5 model without pipeline"""
90
+
91
+ def whitespace_handler(text: str):
92
+ return re.sub("\s+", " ", re.sub("\n+", " ", text.strip()))
93
 
94
  input_ids = self.sum_tokenizer(
95
+ [whitespace_handler(text)],
96
  return_tensors="pt",
97
  padding="max_length",
98
  truncation=True,
99
+ max_length=512,
100
  )["input_ids"]
101
 
102
  output_ids = self.sum_model.generate(
103
+ input_ids=input_ids, max_length=84, no_repeat_ngram_size=2, num_beams=4
 
 
 
104
  )[0]
105
 
106
  summary = self.sum_tokenizer.decode(
107
+ output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
 
 
108
  )
109
 
110
  return summary
111
 
112
  def get_pipe(self, lang: str):
113
+ logger.info(f"Pipe language: {lang}")
114
+ summary = {
115
+ "en": self.en_summary_pipe,
116
+ "ru": self.ru_summary_pipe,
117
+ }
118
+ sentiment = {
119
+ "en": self.en_sentiment_pipe,
120
+ "ru": self.ru_sentiment_pipe,
121
+ }
122
  return summary[lang], sentiment[lang]
123
 
124
+ def summarize(self, req: Request, lang: str = "en") -> Result:
125
  sum_pipe, sent_pipe = self.get_pipe(lang)
126
+ response_summary = sum_pipe(req.text)
127
  logger.info(response_summary)
128
+ response_sentiment = sent_pipe(req.text)
129
  logger.info(response_sentiment)
130
+ result = Result(
131
  summary=response_summary[0]["summary_text"],
132
  sentiment_label=response_sentiment[0]["label"],
133
  sentiment_score=response_sentiment[0]["score"],
134
  )
135
  return result
136
+
137
+ def summ(self, req: Request, lang: str = "en") -> str:
138
+ return self.summarize(req, lang).to_str()
139
+
140
 
141
  if __name__ == "__main__":
142
  pipe = Summarizer()
 
144
  with gr.Blocks() as demo:
145
  with gr.Row():
146
  with gr.Column(scale=2, min_width=600):
147
+ en_sum_description = gr.Markdown(
148
+ value=f"Model for Summary: {EN_SUMMARY_MODEL}"
149
+ )
150
+ en_sent_description = gr.Markdown(
151
+ value=f"Model for Sentiment: {EN_SENTIMENT_MODEL}"
152
+ )
153
+ en_inputs = gr.Textbox(
154
+ label="en_input",
155
+ lines=5,
156
+ value=DEFAULT_EN_TEXT,
157
+ placeholder=DEFAULT_EN_TEXT,
158
+ )
159
+ en_lang = gr.Textbox(value="en", visible=False)
160
+ en_outputs = gr.Textbox(
161
+ label="en_output",
162
+ lines=5,
163
+ placeholder="Summary and Sentiment would be here...",
164
+ )
165
  en_inbtn = gr.Button("Proceed")
166
  with gr.Column(scale=2, min_width=600):
167
+ ru_sum_description = gr.Markdown(
168
+ value=f"Model for Summary: {RU_SUMMARY_MODEL}"
169
+ )
170
+ ru_sent_description = gr.Markdown(
171
+ value=f"Model for Sentiment: {RU_SENTIMENT_MODEL}"
172
+ )
173
+ ru_inputs = gr.Textbox(
174
+ label="ru_input",
175
+ lines=5,
176
+ value=DEFAULT_RU_TEXT,
177
+ placeholder=DEFAULT_RU_TEXT,
178
+ )
179
+ ru_lang = gr.Textbox(value="ru", visible=False)
180
+ ru_outputs = gr.Textbox(
181
+ label="ru_output",
182
+ lines=5,
183
+ placeholder="Здесь будет обобщение и эмоциональный окрас текста...",
184
+ )
185
  ru_inbtn = gr.Button("Запустить")
186
+
187
  en_inbtn.click(
188
  pipe.summ,
189
  [en_inputs, en_lang],
 
194
  [ru_inputs, ru_lang],
195
  [ru_outputs],
196
  )
197
+ demo.launch(show_api=False)
main.py CHANGED
@@ -1,43 +1,73 @@
1
  import gradio as gr
2
  from fastapi import FastAPI
3
- from typing import List
4
  from app import Summarizer, Request, Result
5
- from app import EN_SENTIMENT_MODEL, EN_SUMMARY_MODEL, RU_SENTIMENT_MODEL, RU_SUMMARY_MODEL
 
 
 
 
 
6
  from app import DEFAULT_EN_TEXT, DEFAULT_RU_TEXT
7
 
8
  app = FastAPI()
9
  pipe = Summarizer()
10
 
 
11
  @app.post("/summ_ru", response_model=Result)
12
  async def ru_summ_api(request: Request):
13
- results = pipe.summarize(request.text, lang='ru')
14
  return results
15
 
16
 
17
-
18
  @app.post("/summ_en", response_model=Result)
19
- async def ru_summ_api(request: Request):
20
- results = pipe.summarize(request.text, lang='en')
21
  return results
22
 
23
 
24
  with gr.Blocks() as demo:
25
  with gr.Row():
26
  with gr.Column(scale=2, min_width=600):
27
- en_sum_description=gr.Markdown(value=f"Model for Summary: {EN_SUMMARY_MODEL}")
28
- en_sent_description=gr.Markdown(value=f"Model for Sentiment: {EN_SENTIMENT_MODEL}")
29
- en_inputs=gr.Textbox(label="en_input", lines=5, value=DEFAULT_EN_TEXT, placeholder=DEFAULT_EN_TEXT)
30
- en_lang=gr.Textbox(value='en',visible=False)
31
- en_outputs=gr.Textbox(label="en_output", lines=5, placeholder="Summary and Sentiment would be here...")
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  en_inbtn = gr.Button("Proceed")
33
  with gr.Column(scale=2, min_width=600):
34
- ru_sum_description=gr.Markdown(value=f"Model for Summary: {RU_SUMMARY_MODEL}")
35
- ru_sent_description=gr.Markdown(value=f"Model for Sentiment: {RU_SENTIMENT_MODEL}")
36
- ru_inputs=gr.Textbox(label="ru_input", lines=5, value=DEFAULT_RU_TEXT, placeholder=DEFAULT_RU_TEXT)
37
- ru_lang=gr.Textbox(value='ru',visible=False)
38
- ru_outputs=gr.Textbox(label="ru_output", lines=5, placeholder="Здесь будет обобщение и эмоциональный окрас текста...")
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  ru_inbtn = gr.Button("Запустить")
40
-
41
  en_inbtn.click(
42
  pipe.summ,
43
  [en_inputs, en_lang],
@@ -49,7 +79,5 @@ with gr.Blocks() as demo:
49
  [ru_outputs],
50
  )
51
 
52
- # demo.launch(show_api=False)
53
-
54
  # mounting at the root path
55
- app = gr.mount_gradio_app(app, demo, path="/")
 
1
  import gradio as gr
2
  from fastapi import FastAPI
 
3
  from app import Summarizer, Request, Result
4
+ from app import (
5
+ EN_SENTIMENT_MODEL,
6
+ EN_SUMMARY_MODEL,
7
+ RU_SENTIMENT_MODEL,
8
+ RU_SUMMARY_MODEL,
9
+ )
10
  from app import DEFAULT_EN_TEXT, DEFAULT_RU_TEXT
11
 
12
  app = FastAPI()
13
  pipe = Summarizer()
14
 
15
+
16
  @app.post("/summ_ru", response_model=Result)
17
  async def ru_summ_api(request: Request):
18
+ results = pipe.summarize(request.text, lang="ru")
19
  return results
20
 
21
 
 
22
  @app.post("/summ_en", response_model=Result)
23
+ async def en_summ_api(request: Request):
24
+ results = pipe.summarize(request.text, lang="en")
25
  return results
26
 
27
 
28
  with gr.Blocks() as demo:
29
  with gr.Row():
30
  with gr.Column(scale=2, min_width=600):
31
+ en_sum_description = gr.Markdown(
32
+ value=f"Model for Summary: {EN_SUMMARY_MODEL}"
33
+ )
34
+ en_sent_description = gr.Markdown(
35
+ value=f"Model for Sentiment: {EN_SENTIMENT_MODEL}"
36
+ )
37
+ en_inputs = gr.Textbox(
38
+ label="en_input",
39
+ lines=5,
40
+ value=DEFAULT_EN_TEXT,
41
+ placeholder=DEFAULT_EN_TEXT,
42
+ )
43
+ en_lang = gr.Textbox(value="en", visible=False)
44
+ en_outputs = gr.Textbox(
45
+ label="en_output",
46
+ lines=5,
47
+ placeholder="Summary and Sentiment would be here...",
48
+ )
49
  en_inbtn = gr.Button("Proceed")
50
  with gr.Column(scale=2, min_width=600):
51
+ ru_sum_description = gr.Markdown(
52
+ value=f"Model for Summary: {RU_SUMMARY_MODEL}"
53
+ )
54
+ ru_sent_description = gr.Markdown(
55
+ value=f"Model for Sentiment: {RU_SENTIMENT_MODEL}"
56
+ )
57
+ ru_inputs = gr.Textbox(
58
+ label="ru_input",
59
+ lines=5,
60
+ value=DEFAULT_RU_TEXT,
61
+ placeholder=DEFAULT_RU_TEXT,
62
+ )
63
+ ru_lang = gr.Textbox(value="ru", visible=False)
64
+ ru_outputs = gr.Textbox(
65
+ label="ru_output",
66
+ lines=5,
67
+ placeholder="Здесь будет обобщение и эмоциональный окрас текста...",
68
+ )
69
  ru_inbtn = gr.Button("Запустить")
70
+
71
  en_inbtn.click(
72
  pipe.summ,
73
  [en_inputs, en_lang],
 
79
  [ru_outputs],
80
  )
81
 
 
 
82
  # mounting at the root path
83
+ app = gr.mount_gradio_app(app, demo, path="/")
poetry.lock CHANGED
@@ -91,6 +91,53 @@ docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-
91
  tests = ["attrs[tests-no-zope]", "zope-interface"]
92
  tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  [[package]]
95
  name = "certifi"
96
  version = "2023.11.17"
@@ -373,6 +420,23 @@ docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1
373
  testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"]
374
  typing = ["typing-extensions (>=4.8)"]
375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  [[package]]
377
  name = "fonttools"
378
  version = "4.47.0"
@@ -979,6 +1043,18 @@ pillow = ">=8"
979
  pyparsing = ">=2.3.1"
980
  python-dateutil = ">=2.7"
981
 
 
 
 
 
 
 
 
 
 
 
 
 
982
  [[package]]
983
  name = "mdurl"
984
  version = "0.1.2"
@@ -1007,6 +1083,18 @@ files = [
1007
  testing = ["astroid (>=2.0)", "coverage", "pylint (>=2.3.1,<2.4.0)", "pytest"]
1008
  yaml = ["PyYAML (>=5.1.0)"]
1009
 
 
 
 
 
 
 
 
 
 
 
 
 
1010
  [[package]]
1011
  name = "numpy"
1012
  version = "1.26.2"
@@ -1259,6 +1347,18 @@ sql-other = ["SQLAlchemy (>=1.4.36)"]
1259
  test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"]
1260
  xml = ["lxml (>=4.8.0)"]
1261
 
 
 
 
 
 
 
 
 
 
 
 
 
1262
  [[package]]
1263
  name = "pillow"
1264
  version = "10.1.0"
@@ -1327,6 +1427,22 @@ files = [
1327
  docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
1328
  tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
1329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1330
  [[package]]
1331
  name = "pretrainedmodels"
1332
  version = "0.7.4"
@@ -1365,6 +1481,18 @@ files = [
1365
  {file = "protobuf-4.25.1.tar.gz", hash = "sha256:57d65074b4f5baa4ab5da1605c02be90ac20c8b40fb137d6a8df9f416b0d0ce2"},
1366
  ]
1367
 
 
 
 
 
 
 
 
 
 
 
 
 
1368
  [[package]]
1369
  name = "pydantic"
1370
  version = "2.5.3"
@@ -1515,6 +1643,18 @@ files = [
1515
  {file = "pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f"},
1516
  ]
1517
 
 
 
 
 
 
 
 
 
 
 
 
 
1518
  [[package]]
1519
  name = "pygments"
1520
  version = "2.17.2"
@@ -2290,6 +2430,18 @@ dev = ["tokenizers[testing]"]
2290
  docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"]
2291
  testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"]
2292
 
 
 
 
 
 
 
 
 
 
 
 
 
2293
  [[package]]
2294
  name = "tomlkit"
2295
  version = "0.12.0"
@@ -2683,4 +2835,4 @@ dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"]
2683
  [metadata]
2684
  lock-version = "2.0"
2685
  python-versions = "^3.10"
2686
- content-hash = "a0a8f5d7bbfd3d08c83e048ee2f8e18cf31729338afa2484b5f817c387457622"
 
91
  tests = ["attrs[tests-no-zope]", "zope-interface"]
92
  tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
93
 
94
+ [[package]]
95
+ name = "black"
96
+ version = "24.1.1"
97
+ description = "The uncompromising code formatter."
98
+ category = "dev"
99
+ optional = false
100
+ python-versions = ">=3.8"
101
+ files = [
102
+ {file = "black-24.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2588021038bd5ada078de606f2a804cadd0a3cc6a79cb3e9bb3a8bf581325a4c"},
103
+ {file = "black-24.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a95915c98d6e32ca43809d46d932e2abc5f1f7d582ffbe65a5b4d1588af7445"},
104
+ {file = "black-24.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fa6a0e965779c8f2afb286f9ef798df770ba2b6cee063c650b96adec22c056a"},
105
+ {file = "black-24.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:5242ecd9e990aeb995b6d03dc3b2d112d4a78f2083e5a8e86d566340ae80fec4"},
106
+ {file = "black-24.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fc1ec9aa6f4d98d022101e015261c056ddebe3da6a8ccfc2c792cbe0349d48b7"},
107
+ {file = "black-24.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0269dfdea12442022e88043d2910429bed717b2d04523867a85dacce535916b8"},
108
+ {file = "black-24.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3d64db762eae4a5ce04b6e3dd745dcca0fb9560eb931a5be97472e38652a161"},
109
+ {file = "black-24.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:5d7b06ea8816cbd4becfe5f70accae953c53c0e53aa98730ceccb0395520ee5d"},
110
+ {file = "black-24.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e2c8dfa14677f90d976f68e0c923947ae68fa3961d61ee30976c388adc0b02c8"},
111
+ {file = "black-24.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a21725862d0e855ae05da1dd25e3825ed712eaaccef6b03017fe0853a01aa45e"},
112
+ {file = "black-24.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07204d078e25327aad9ed2c64790d681238686bce254c910de640c7cc4fc3aa6"},
113
+ {file = "black-24.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:a83fe522d9698d8f9a101b860b1ee154c1d25f8a82ceb807d319f085b2627c5b"},
114
+ {file = "black-24.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08b34e85170d368c37ca7bf81cf67ac863c9d1963b2c1780c39102187ec8dd62"},
115
+ {file = "black-24.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7258c27115c1e3b5de9ac6c4f9957e3ee2c02c0b39222a24dc7aa03ba0e986f5"},
116
+ {file = "black-24.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40657e1b78212d582a0edecafef133cf1dd02e6677f539b669db4746150d38f6"},
117
+ {file = "black-24.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:e298d588744efda02379521a19639ebcd314fba7a49be22136204d7ed1782717"},
118
+ {file = "black-24.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:34afe9da5056aa123b8bfda1664bfe6fb4e9c6f311d8e4a6eb089da9a9173bf9"},
119
+ {file = "black-24.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:854c06fb86fd854140f37fb24dbf10621f5dab9e3b0c29a690ba595e3d543024"},
120
+ {file = "black-24.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3897ae5a21ca132efa219c029cce5e6bfc9c3d34ed7e892113d199c0b1b444a2"},
121
+ {file = "black-24.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:ecba2a15dfb2d97105be74bbfe5128bc5e9fa8477d8c46766505c1dda5883aac"},
122
+ {file = "black-24.1.1-py3-none-any.whl", hash = "sha256:5cdc2e2195212208fbcae579b931407c1fa9997584f0a415421748aeafff1168"},
123
+ {file = "black-24.1.1.tar.gz", hash = "sha256:48b5760dcbfe5cf97fd4fba23946681f3a81514c6ab8a45b50da67ac8fbc6c7b"},
124
+ ]
125
+
126
+ [package.dependencies]
127
+ click = ">=8.0.0"
128
+ mypy-extensions = ">=0.4.3"
129
+ packaging = ">=22.0"
130
+ pathspec = ">=0.9.0"
131
+ platformdirs = ">=2"
132
+ tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
133
+ typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
134
+
135
+ [package.extras]
136
+ colorama = ["colorama (>=0.4.3)"]
137
+ d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
138
+ jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
139
+ uvloop = ["uvloop (>=0.15.2)"]
140
+
141
  [[package]]
142
  name = "certifi"
143
  version = "2023.11.17"
 
420
  testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"]
421
  typing = ["typing-extensions (>=4.8)"]
422
 
423
+ [[package]]
424
+ name = "flake8"
425
+ version = "7.0.0"
426
+ description = "the modular source code checker: pep8 pyflakes and co"
427
+ category = "dev"
428
+ optional = false
429
+ python-versions = ">=3.8.1"
430
+ files = [
431
+ {file = "flake8-7.0.0-py2.py3-none-any.whl", hash = "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"},
432
+ {file = "flake8-7.0.0.tar.gz", hash = "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132"},
433
+ ]
434
+
435
+ [package.dependencies]
436
+ mccabe = ">=0.7.0,<0.8.0"
437
+ pycodestyle = ">=2.11.0,<2.12.0"
438
+ pyflakes = ">=3.2.0,<3.3.0"
439
+
440
  [[package]]
441
  name = "fonttools"
442
  version = "4.47.0"
 
1043
  pyparsing = ">=2.3.1"
1044
  python-dateutil = ">=2.7"
1045
 
1046
+ [[package]]
1047
+ name = "mccabe"
1048
+ version = "0.7.0"
1049
+ description = "McCabe checker, plugin for flake8"
1050
+ category = "dev"
1051
+ optional = false
1052
+ python-versions = ">=3.6"
1053
+ files = [
1054
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
1055
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
1056
+ ]
1057
+
1058
  [[package]]
1059
  name = "mdurl"
1060
  version = "0.1.2"
 
1083
  testing = ["astroid (>=2.0)", "coverage", "pylint (>=2.3.1,<2.4.0)", "pytest"]
1084
  yaml = ["PyYAML (>=5.1.0)"]
1085
 
1086
+ [[package]]
1087
+ name = "mypy-extensions"
1088
+ version = "1.0.0"
1089
+ description = "Type system extensions for programs checked with the mypy type checker."
1090
+ category = "dev"
1091
+ optional = false
1092
+ python-versions = ">=3.5"
1093
+ files = [
1094
+ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
1095
+ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
1096
+ ]
1097
+
1098
  [[package]]
1099
  name = "numpy"
1100
  version = "1.26.2"
 
1347
  test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"]
1348
  xml = ["lxml (>=4.8.0)"]
1349
 
1350
+ [[package]]
1351
+ name = "pathspec"
1352
+ version = "0.12.1"
1353
+ description = "Utility library for gitignore style pattern matching of file paths."
1354
+ category = "dev"
1355
+ optional = false
1356
+ python-versions = ">=3.8"
1357
+ files = [
1358
+ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
1359
+ {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
1360
+ ]
1361
+
1362
  [[package]]
1363
  name = "pillow"
1364
  version = "10.1.0"
 
1427
  docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
1428
  tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
1429
 
1430
+ [[package]]
1431
+ name = "platformdirs"
1432
+ version = "4.1.0"
1433
+ description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
1434
+ category = "dev"
1435
+ optional = false
1436
+ python-versions = ">=3.8"
1437
+ files = [
1438
+ {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"},
1439
+ {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"},
1440
+ ]
1441
+
1442
+ [package.extras]
1443
+ docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"]
1444
+ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"]
1445
+
1446
  [[package]]
1447
  name = "pretrainedmodels"
1448
  version = "0.7.4"
 
1481
  {file = "protobuf-4.25.1.tar.gz", hash = "sha256:57d65074b4f5baa4ab5da1605c02be90ac20c8b40fb137d6a8df9f416b0d0ce2"},
1482
  ]
1483
 
1484
+ [[package]]
1485
+ name = "pycodestyle"
1486
+ version = "2.11.1"
1487
+ description = "Python style guide checker"
1488
+ category = "dev"
1489
+ optional = false
1490
+ python-versions = ">=3.8"
1491
+ files = [
1492
+ {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"},
1493
+ {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"},
1494
+ ]
1495
+
1496
  [[package]]
1497
  name = "pydantic"
1498
  version = "2.5.3"
 
1643
  {file = "pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f"},
1644
  ]
1645
 
1646
+ [[package]]
1647
+ name = "pyflakes"
1648
+ version = "3.2.0"
1649
+ description = "passive checker of Python programs"
1650
+ category = "dev"
1651
+ optional = false
1652
+ python-versions = ">=3.8"
1653
+ files = [
1654
+ {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"},
1655
+ {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"},
1656
+ ]
1657
+
1658
  [[package]]
1659
  name = "pygments"
1660
  version = "2.17.2"
 
2430
  docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"]
2431
  testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"]
2432
 
2433
+ [[package]]
2434
+ name = "tomli"
2435
+ version = "2.0.1"
2436
+ description = "A lil' TOML parser"
2437
+ category = "dev"
2438
+ optional = false
2439
+ python-versions = ">=3.7"
2440
+ files = [
2441
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
2442
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
2443
+ ]
2444
+
2445
  [[package]]
2446
  name = "tomlkit"
2447
  version = "0.12.0"
 
2835
  [metadata]
2836
  lock-version = "2.0"
2837
  python-versions = "^3.10"
2838
+ content-hash = "5648f003dcb355b02c351874f25f204380d23b153110cec7f6177e81fd260dbf"
pyproject.toml CHANGED
@@ -22,6 +22,8 @@ uvicorn = "^0.27.0"
22
 
23
  [tool.poetry.group.dev.dependencies]
24
  huggingface-hub = "^0.20.1"
 
 
25
 
26
  [build-system]
27
  requires = ["poetry-core"]
 
22
 
23
  [tool.poetry.group.dev.dependencies]
24
  huggingface-hub = "^0.20.1"
25
+ flake8 = "^7.0.0"
26
+ black = "^24.1.1"
27
 
28
  [build-system]
29
  requires = ["poetry-core"]