File size: 2,686 Bytes
a661a07
 
4ae8173
a661a07
4ae8173
81f7da9
a661a07
4ae8173
a661a07
87aed84
81f7da9
87aed84
4ae8173
87aed84
 
 
 
 
 
 
 
4ae8173
87aed84
4ae8173
 
 
 
 
 
 
 
87aed84
81f7da9
87aed84
a661a07
 
4ae8173
a661a07
 
 
 
 
4ae8173
a661a07
 
 
d50af28
a661a07
 
 
 
 
 
81f7da9
a661a07
4ae8173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87aed84
 
a661a07
d50af28
81f7da9
a661a07
 
0deb68b
a661a07
 
4ae8173
d50af28
 
 
a661a07
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import gradio as gr
from transformers import pipeline
import re

# βœ… Use a small, instruction-following model that works on CPU
generator = pipeline("text2text-generation", model="declare-lab/flan-alpaca-base", device=-1)

# 🧠 Structured prompt
def build_prompt(essay_text):
    return f"""
You are an English teacher.

Evaluate the essay below in 6 categories. For each category, give a score from 0 to 100:

1. Grammar & Mechanics  
2. Coherence & Flow  
3. Clarity & Style  
4. Argument Strength & Evidence  
5. Structure & Organization  
6. Teacher-Specific Style  

Then, calculate the average of these 6 scores and return it as "Total Grade".

Respond ONLY with this exact format:
Grammar & Mechanics: [score]  
Coherence & Flow: [score]  
Clarity & Style: [score]  
Argument Strength & Evidence: [score]  
Structure & Organization: [score]  
Teacher-Specific Style: [score]  
Total Grade: [average]

Essay:
\"\"\"{essay_text}\"\"\"
"""

# πŸ“„ Read .txt file input
def extract_text(file):
    if file.name.endswith(".txt"):
        return file.read().decode("utf-8")
    return "Unsupported file type. Please upload a .txt file."

# πŸ§ͺ Main logic
def grade_essay(essay, file):
    if not essay and not file:
        return "Please provide either text or a file."

    if file:
        essay = extract_text(file)
        if "Unsupported" in essay:
            return essay

    prompt = build_prompt(essay)
    result = generator(prompt, max_new_tokens=128)[0]["generated_text"]

    # πŸ” Extract scores using regex even if not formatted as JSON
    categories = [
        "Grammar & Mechanics",
        "Coherence & Flow",
        "Clarity & Style",
        "Argument Strength & Evidence",
        "Structure & Organization",
        "Teacher-Specific Style",
        "Total Grade"
    ]
    output = {}
    for cat in categories:
        pattern = f"{cat}\\s*[:=]\\s*(\\d+(\\.\\d+)?)"
        match = re.search(pattern, result, re.IGNORECASE)
        if match:
            output[cat] = float(match.group(1))
        else:
            output[cat] = "Not found"

    return output

# 🎨 Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("# ✏️ MimicMark – AI Essay Evaluator")
    gr.Markdown("Paste your essay or upload a `.txt` file. The AI will score it in 6 categories and return a total grade.")

    with gr.Row():
        essay_input = gr.Textbox(label="Paste Your Essay", lines=12)
        file_input = gr.File(label="Or Upload a .txt File", file_types=[".txt"])

    output = gr.JSON(label="Evaluation Results")

    submit = gr.Button("Evaluate Essay")
    submit.click(fn=grade_essay, inputs=[essay_input, file_input], outputs=output)

demo.launch()