Upto12forenglish
commited on
Commit
•
0b4e144
1
Parent(s):
606b5ea
Update main.py
Browse files
main.py
CHANGED
@@ -3,15 +3,76 @@ from pydantic import BaseModel
|
|
3 |
from fastapi.responses import RedirectResponse, HTMLResponse
|
4 |
import urllib.parse
|
5 |
import requests
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
app = FastAPI()
|
8 |
|
9 |
-
|
10 |
class Message(BaseModel):
|
11 |
message: str
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
@app.get("/", response_class=HTMLResponse)
|
15 |
async def read_root():
|
16 |
return """Hello Grammar Bot!"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
|
|
3 |
from fastapi.responses import RedirectResponse, HTMLResponse
|
4 |
import urllib.parse
|
5 |
import requests
|
6 |
+
from gramformer import Gramformer
|
7 |
+
import torch
|
8 |
+
import telebot
|
9 |
+
import spacy
|
10 |
+
import os
|
11 |
+
import concurrent.futures
|
12 |
|
13 |
app = FastAPI()
|
14 |
|
|
|
15 |
class Message(BaseModel):
|
16 |
message: str
|
17 |
|
18 |
+
# Load the spaCy English model
|
19 |
+
nlp = spacy.load("en_core_web_sm")
|
20 |
+
|
21 |
+
def set_seed(seed):
|
22 |
+
torch.manual_seed(seed)
|
23 |
+
if torch.cuda.is_available():
|
24 |
+
torch.cuda.manual_seed_all(seed)
|
25 |
+
|
26 |
+
set_seed(1212)
|
27 |
+
|
28 |
+
gf = Gramformer(models=1, use_gpu=False) # 1=corrector, 2=detector
|
29 |
|
30 |
@app.get("/", response_class=HTMLResponse)
|
31 |
async def read_root():
|
32 |
return """Hello Grammar Bot!"""
|
33 |
+
|
34 |
+
@app.post("/checkgrammar")
|
35 |
+
async def receive_message(message: Message):
|
36 |
+
print("Received message:", message.message)
|
37 |
+
influent_paragraph = message_text
|
38 |
+
corrected_paragraph = influent_paragraph # Initialize with the input text
|
39 |
+
mistakes_count = 0
|
40 |
+
mistakes = []
|
41 |
+
|
42 |
+
while True:
|
43 |
+
if mistakes_count == 5:
|
44 |
+
break
|
45 |
+
mistakes_count += 1
|
46 |
+
influent_sentences = list(nlp(corrected_paragraph).sents)
|
47 |
+
influent_sentences = [sentence.text for sentence in influent_sentences]
|
48 |
+
print("[Influent sentences]", influent_sentences)
|
49 |
+
|
50 |
+
corrected_paragraph = list(gf.correct(corrected_paragraph, max_candidates=1))[0]
|
51 |
+
print("[Corrected paragraph]", corrected_paragraph)
|
52 |
+
|
53 |
+
new_mistakes = []
|
54 |
+
for influent_sentence in influent_sentences:
|
55 |
+
corrected_sentences = gf.correct(influent_sentence, max_candidates=1)
|
56 |
+
print("[Input]", influent_sentence)
|
57 |
+
for corrected_sentence in corrected_sentences:
|
58 |
+
print("[Correction]", corrected_sentence)
|
59 |
+
new_mistakes += gf.get_edits(influent_sentence, corrected_sentence)
|
60 |
+
print("[Edits]", new_mistakes)
|
61 |
+
|
62 |
+
# If no new mistakes are found, exit the loop
|
63 |
+
if not new_mistakes:
|
64 |
+
break
|
65 |
+
|
66 |
+
# Add the new mistakes to the list
|
67 |
+
mistakes += new_mistakes
|
68 |
+
|
69 |
+
full_output = "Corrected Paragraph:\n" + corrected_paragraph + "\n\nMistakes:\n"
|
70 |
+
for index, (tag, mistake, start, end, correction, corr_start, corr_end) in enumerate(mistakes, start=1):
|
71 |
+
full_output += f"{index}. Tag: {tag}\n"
|
72 |
+
full_output += f" Mistake: {mistake} (Position: {start}-{end})\n"
|
73 |
+
full_output += f" Correction: {correction} (Position: {corr_start}-{corr_end})\n"
|
74 |
+
full_output += "\n"
|
75 |
+
|
76 |
+
print(full_output)
|
77 |
+
return {"message": "Message received successfully"}
|
78 |
|