|
import os |
|
import gradio as gr |
|
from transformers import pipeline, AutoTokenizer |
|
import torch |
|
import spaces |
|
import json |
|
from huggingface_hub import HfApi, upload_file |
|
|
|
|
|
HF_TOKEN = os.environ.get("HF_TOKEN") |
|
DATASET_REPO = "Pisethan/khmer-lesson-dataset-generated" |
|
LOCAL_JSONL = "generated_lessons.jsonl" |
|
|
|
|
|
grade_options = ["1", "2", "3", "4", "5", "6"] |
|
topic_options = ["Addition", "Subtraction", "Counting", "Number Recognition", "Multiplication", "Division"] |
|
level_options = ["Beginner", "Intermediate", "Advanced"] |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Pisethan/khmer-lesson-model", token=HF_TOKEN) |
|
|
|
|
|
def save_to_jsonl(record): |
|
with open(LOCAL_JSONL, "a", encoding="utf-8") as f: |
|
f.write(json.dumps(record, ensure_ascii=False) + "\n") |
|
|
|
upload_file( |
|
path_or_fileobj=LOCAL_JSONL, |
|
path_in_repo="generated_lessons.jsonl", |
|
repo_id=DATASET_REPO, |
|
repo_type="dataset", |
|
token=HF_TOKEN |
|
) |
|
|
|
|
|
@spaces.GPU |
|
def generate_lesson(grade, topic, level): |
|
device = 0 if torch.cuda.is_available() else -1 |
|
pipe = pipeline( |
|
"text-generation", |
|
model="Pisethan/khmer-lesson-model-v2", |
|
tokenizer=tokenizer, |
|
device=device, |
|
token=HF_TOKEN |
|
) |
|
|
|
prompt = f""" |
|
You are a lesson planning assistant. Return only one structured Khmer math lesson plan with these fields: |
|
|
|
Lesson Title: |
|
Objective: |
|
Activity: |
|
Instruction (Khmer): |
|
Materials: |
|
|
|
Please follow the structure exactly. |
|
|
|
Grade: {grade} |
|
Topic: {topic} |
|
TaRL Level: {level} |
|
""" |
|
|
|
output = pipe(prompt, max_new_tokens=300, temperature=0.7, do_sample=True, eos_token_id=tokenizer.eos_token_id) |
|
result = output[0]['generated_text'] |
|
|
|
|
|
record = { |
|
"grade": grade, |
|
"topic": topic, |
|
"level": level, |
|
"prompt": prompt.strip(), |
|
"completion": result.strip() |
|
} |
|
save_to_jsonl(record) |
|
return result |
|
|
|
|
|
@spaces.GPU |
|
def generate_all_lessons(): |
|
device = 0 if torch.cuda.is_available() else -1 |
|
pipe = pipeline( |
|
"text-generation", |
|
model="Pisethan/khmer-lesson-model-v2", |
|
tokenizer=tokenizer, |
|
device=device, |
|
token=HF_TOKEN |
|
) |
|
|
|
results = "" |
|
for grade in grade_options: |
|
for topic in topic_options: |
|
for level in level_options: |
|
prompt = f"""Generate a Khmer math lesson plan. |
|
|
|
Grade: {grade} |
|
Topic: {topic} |
|
TaRL Level: {level}""" |
|
output = pipe(prompt, max_new_tokens=200, temperature=0.7, do_sample=True) |
|
result = output[0]['generated_text'] |
|
|
|
record = { |
|
"grade": grade, |
|
"topic": topic, |
|
"level": level, |
|
"prompt": prompt.strip(), |
|
"completion": result.strip() |
|
} |
|
save_to_jsonl(record) |
|
|
|
results += f"πΉ ααααΆαα {grade} | {topic} | {level}\n{result}\n\n{'-'*50}\n\n" |
|
return results |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## π€ α’ααααααα½ααααααΎααααααααα·ααα·ααααΆ") |
|
gr.Markdown("ααααΎαααΎαααααΆαα αααααΆααα αα·αααααα·ααα·ααα αα½α
α
α»α
αααααΎααααααα α¬α
α»α
αααΌαα»αααΆαααααααααααΆαααααααΎααααααααΆααα’ααα") |
|
|
|
with gr.Row(): |
|
grade = gr.Dropdown(choices=grade_options, label="ααααΆαα (Grade)", value="1") |
|
topic = gr.Dropdown(choices=topic_options, label="αααααΆααα (Topic)", value="Addition") |
|
level = gr.Dropdown(choices=level_options, label="ααααα·ααα·ααα (TaRL Level)", value="Beginner") |
|
|
|
output_box = gr.Textbox( |
|
label="π Khmer Lesson Plan", |
|
lines=20, |
|
max_lines=200, |
|
show_copy_button=True, |
|
autoscroll=True |
|
) |
|
|
|
with gr.Row(): |
|
gen_btn = gr.Button("β
αααααΎαααααα") |
|
gen_all_btn = gr.Button("π§ αααααΎααααααααΆααα’αα") |
|
clear_btn = gr.Button("π§Ή αααα’αΆα") |
|
|
|
gen_btn.click(fn=generate_lesson, inputs=[grade, topic, level], outputs=output_box) |
|
gen_all_btn.click(fn=generate_all_lessons, outputs=output_box) |
|
clear_btn.click(fn=lambda: "", outputs=output_box) |
|
|
|
demo.queue() |
|
demo.launch() |
|
|