AneriThakkar commited on
Commit
2a0d6cc
1 Parent(s): 2661a89

Upload main.py

Browse files
Files changed (1) hide show
  1. main.py +58 -0
main.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import torch
2
+ import streamlit as st
3
+ # import numpy as np
4
+ from transformers import T5ForConditionalGeneration, T5Tokenizer
5
+ # from transformers import pipeline
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM
7
+
8
+ def load_model(model_name):
9
+ if model_name == "T5":
10
+ model = T5ForConditionalGeneration.from_pretrained('google/flan-t5-base')
11
+ tokenizer = T5Tokenizer.from_pretrained('google/flan-t5-base')
12
+ return model, tokenizer
13
+ if model_name == "Llama3":
14
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
15
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
16
+ return model, tokenizer
17
+ if model_name == "Llama3-Instruct":
18
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
19
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
20
+ return model, tokenizer
21
+ else:
22
+ st.error(f"Model {model_name} not available.")
23
+ return None, None
24
+
25
+ def generate_question(model,tokenizer,context):
26
+ input_text = 'Generate a question from this: ' + context
27
+ input_ids = tokenizer(input_text, return_tensors='pt').input_ids
28
+ outputs = model.generate(input_ids,max_length=512)
29
+ output_text = tokenizer.decode(outputs[0][1:len(outputs[0])-1])
30
+ return output_text
31
+
32
+ def main():
33
+ st.title("Question Generation From Given Text")
34
+ context = st.text_area("Enter text","Laughter is the best medicine.")
35
+ st.write("Select a model and provide the text to generate questions.")
36
+ model_choice = st.selectbox("Select a model", ["T5", "Llama3", "Llama3-Instruct"])
37
+
38
+ if st.button("Generate Questions"):
39
+ model, tokenizer = load_model(model_choice)
40
+ if model and tokenizer:
41
+ questions = generate_question(model, tokenizer, context)
42
+ st.write("Generated Question:")
43
+ st.write(questions)
44
+ else:
45
+ st.error("Model loading failed.")
46
+ # tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
47
+ # model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
48
+ # tokenizer = AutoTokenizer.from_pretrained("ramsrigouthamg/t5_squad_v1")
49
+ # model = AutoModelForSeq2SeqLM.from_pretrained("ramsrigouthamg/t5_squad_v1")
50
+ # input_text = 'Generate a question from this: ' + context
51
+ # input_ids = tokenizer(input_text, return_tensors='pt').input_ids
52
+ # outputs = model.generate(input_ids)
53
+ # output_text = tokenizer.decode(outputs[0][1:len(outputs[0])-1])
54
+ # st.write("Generated question:")
55
+ # st.write(output_text)
56
+
57
+ if __name__ == '__main__':
58
+ main()