File size: 3,531 Bytes
f552345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33e502d
f552345
33e502d
 
 
 
 
f552345
33e502d
 
 
 
 
 
 
 
 
 
 
f552345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33e502d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import torch
import gradio as gr
import re

model_path = "prajjwal888/Llama-2-7b-chat-question-generation"

model = AutoModelForCausalLM.from_pretrained(
    model_path,
    torch_dtype=torch.float16,
    device_map="auto"
)

tokenizer = AutoTokenizer.from_pretrained(model_path)

def parse_generated_text(text: str) -> dict:
    clean_text = re.sub(r"\[/?INST\]", "", text)
    clean_text = re.sub(r"Question:\s*Question:", "Question:", clean_text)
    clean_text = clean_text.strip()

    match = re.search(r"Question:\s*(.*?)(?:\nHint:|Hint:)(.*)", clean_text, re.DOTALL)

    if match:
        question = match.group(1).strip().strip('"').replace("Question:", "").strip()
        hint = match.group(2).strip().strip('"')
    else:
        question = clean_text.strip()
        hint = "No hint available"

    return {
        "question": question,
        "hint": hint
    }

def generate_questions(topic, difficulty, types, count):
    print("Received input:", topic, difficulty, types, count)  

    try:
        pipe = pipeline(
            task="text-generation",
            model=model,
            tokenizer=tokenizer,
            max_length=200,
            temperature=0.7,
            top_p=0.9,
            do_sample=True
        )

        questions = []

        for _ in range(count):
            for q_type in types:
                prompt = (
                    f"Generate a {difficulty} difficulty {q_type} question about {topic}.\n"
                    "Format strictly as follows:\n"
                    "Question: <your question here>\n"
                    "Hint: <your hint here or 'No hint available'>"
                )

                formatted_prompt = f"<s>[INST] {prompt} [/INST]"
                print("Prompt:", formatted_prompt)

                result = pipe(formatted_prompt)
                print("Raw Output:", result)

                generated_text = result[0]['generated_text'].replace(formatted_prompt, "").strip()
                parsed = parse_generated_text(generated_text)

                print("Parsed Output:", parsed)

                # Safe fallback
                if not parsed['question']:
                    parsed['question'] = "⚠️ Could not parse question."
                if not parsed['hint']:
                    parsed['hint'] = "No hint available"

                formatted = f"**Type**: {q_type}\n\n**Question**: {parsed['question']}\n\n**Hint**: {parsed['hint']}\n\n---"
                questions.append(formatted)

        final_output = "\n\n".join(questions)
        print("Final Output:", final_output)
        return final_output

    except Exception as e:
        print("Error:", e)
        return f"❌ Something went wrong: {e}"
    
    except Exception as e:
        print("Error:", e)
        return f"Something went wrong: {e}"

iface = gr.Interface(
    fn=generate_questions,
    inputs=[
        gr.Textbox(label="Topic"),
        gr.Dropdown(choices=["easy", "medium", "hard"], label="Difficulty", value="medium"),
        gr.CheckboxGroup(choices=["Conceptual", "Numerical", "Application"], label="Question Types"),
        gr.Slider(minimum=1, maximum=5, step=1, value=2, label="Number of Questions per Type")
    ],
    outputs=gr.Markdown(label="Generated Questions"),
    title="AI Question Generator",
    description="Enter a topic, select difficulty and question types to generate AI-powered questions."
)

if __name__ == "__main__":
    iface.queue().launch()