File size: 3,040 Bytes
fa22eb9
606b5ea
 
 
 
0b4e144
 
 
 
 
debafc1
 
 
606b5ea
 
debafc1
0b4e144
 
 
 
 
 
 
 
 
 
 
debafc1
606b5ea
 
 
0b4e144
 
4443779
 
 
 
60acb5f
 
 
 
 
 
 
 
 
333f97c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606b5ea
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from fastapi import FastAPI, Request
from pydantic import BaseModel
from fastapi.responses import RedirectResponse, HTMLResponse
import urllib.parse
import requests
from gramformer import Gramformer
import torch
import spacy
import os
import concurrent.futures

app = FastAPI()

class Message(BaseModel):
    message: str

# Load the spaCy English model
nlp = spacy.load("en_core_web_sm")

def set_seed(seed):
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

set_seed(1212)

gf = Gramformer(models=1, use_gpu=False)  # 1=corrector, 2=detector

@app.get("/", response_class=HTMLResponse)
async def read_root():
        return """Hello Grammar Bot!"""

@app.post("/checkgrammar")
async def receive_updates(request: Request):
    data = await request.json()
    print("Received Update:")
    print(data)
    user_message = data.get('message', {}).get('text', '')
    message_id = data.get('message', {}).get('message_id', '')
    user_id = data.get('message', {}).get('from', {}).get('id', '')
    chat_id = data.get('message', {}).get('chat', {}).get('id', '')

    print(f"User Message: {user_message}")
    print(f"Original Message ID: {message_id}")
    print(f"User ID: {user_id}")
    print(f"Chat ID: {chat_id}")

    influent_paragraph = user_message
    corrected_paragraph = influent_paragraph  # Initialize with the input text
    mistakes_count = 0
    mistakes = []

    while True:
        if mistakes_count == 5:
            break
        mistakes_count += 1
        influent_sentences = list(nlp(corrected_paragraph).sents)
        influent_sentences = [sentence.text for sentence in influent_sentences]
        print("[Influent sentences]", influent_sentences)

        corrected_paragraph = list(gf.correct(corrected_paragraph, max_candidates=1))[0]
        print("[Corrected paragraph]", corrected_paragraph)

        new_mistakes = []
        for influent_sentence in influent_sentences:
            corrected_sentences = gf.correct(influent_sentence, max_candidates=1)
            print("[Input]", influent_sentence)
            for corrected_sentence in corrected_sentences:
                print("[Correction]", corrected_sentence)
                new_mistakes += gf.get_edits(influent_sentence, corrected_sentence)
                print("[Edits]", new_mistakes)

        # If no new mistakes are found, exit the loop
        if not new_mistakes:
            break

        # Add the new mistakes to the list
        mistakes += new_mistakes

    full_output = "Corrected Paragraph:\n" + corrected_paragraph + "\n\nMistakes:\n"
    for index, (tag, mistake, start, end, correction, corr_start, corr_end) in enumerate(mistakes, start=1):
        full_output += f"{index}. Tag: {tag}\n"
        full_output += f"   Mistake: {mistake} (Position: {start}-{end})\n"
        full_output += f"   Correction: {correction} (Position: {corr_start}-{corr_end})\n"
        full_output += "\n"

    print(full_output)
    return {"message": "Message received successfully"}