|
from fastapi import FastAPI, Request |
|
from pydantic import BaseModel |
|
from fastapi.responses import RedirectResponse, HTMLResponse |
|
import urllib.parse |
|
import requests |
|
from gramformer import Gramformer |
|
import torch |
|
import spacy |
|
import os |
|
import concurrent.futures |
|
|
|
app = FastAPI() |
|
|
|
class Message(BaseModel): |
|
message: str |
|
|
|
|
|
nlp = spacy.load("en_core_web_sm") |
|
|
|
def set_seed(seed): |
|
torch.manual_seed(seed) |
|
if torch.cuda.is_available(): |
|
torch.cuda.manual_seed_all(seed) |
|
|
|
set_seed(1212) |
|
|
|
gf = Gramformer(models=1, use_gpu=False) |
|
|
|
@app.get("/", response_class=HTMLResponse) |
|
async def read_root(): |
|
return """Hello Grammar Bot!""" |
|
|
|
@app.post("/checkgrammar") |
|
async def receive_updates(request: Request): |
|
data = await request.json() |
|
print("Received Update:") |
|
print(data) |
|
user_message = data.get('message', {}).get('text', '') |
|
message_id = data.get('message', {}).get('message_id', '') |
|
user_id = data.get('message', {}).get('from', {}).get('id', '') |
|
chat_id = data.get('message', {}).get('chat', {}).get('id', '') |
|
|
|
print(f"User Message: {user_message}") |
|
print(f"Original Message ID: {message_id}") |
|
print(f"User ID: {user_id}") |
|
print(f"Chat ID: {chat_id}") |
|
|
|
influent_paragraph = user_message |
|
corrected_paragraph = influent_paragraph |
|
mistakes_count = 0 |
|
mistakes = [] |
|
|
|
while True: |
|
if mistakes_count == 5: |
|
break |
|
mistakes_count += 1 |
|
influent_sentences = list(nlp(corrected_paragraph).sents) |
|
influent_sentences = [sentence.text for sentence in influent_sentences] |
|
print("[Influent sentences]", influent_sentences) |
|
|
|
corrected_paragraph = list(gf.correct(corrected_paragraph, max_candidates=1))[0] |
|
print("[Corrected paragraph]", corrected_paragraph) |
|
|
|
new_mistakes = [] |
|
for influent_sentence in influent_sentences: |
|
corrected_sentences = gf.correct(influent_sentence, max_candidates=1) |
|
print("[Input]", influent_sentence) |
|
for corrected_sentence in corrected_sentences: |
|
print("[Correction]", corrected_sentence) |
|
new_mistakes += gf.get_edits(influent_sentence, corrected_sentence) |
|
print("[Edits]", new_mistakes) |
|
|
|
|
|
if not new_mistakes: |
|
break |
|
|
|
|
|
mistakes += new_mistakes |
|
|
|
full_output = "Corrected Paragraph:\n" + corrected_paragraph + "\n\nMistakes:\n" |
|
for index, (tag, mistake, start, end, correction, corr_start, corr_end) in enumerate(mistakes, start=1): |
|
full_output += f"{index}. Tag: {tag}\n" |
|
full_output += f" Mistake: {mistake} (Position: {start}-{end})\n" |
|
full_output += f" Correction: {correction} (Position: {corr_start}-{corr_end})\n" |
|
full_output += "\n" |
|
|
|
print(full_output) |
|
return {"message": "Message received successfully"} |
|
|