AneriThakkar commited on
Commit
d1a3b91
1 Parent(s): 661bcbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -57
app.py CHANGED
@@ -1,58 +1,66 @@
1
- # import torch
2
- import streamlit as st
3
- # import numpy as np
4
- from transformers import T5ForConditionalGeneration, T5Tokenizer
5
- # from transformers import pipeline
6
- from transformers import AutoTokenizer, AutoModelForCausalLM
7
-
8
- def load_model(model_name):
9
- if model_name == "T5":
10
- model = T5ForConditionalGeneration.from_pretrained('google/flan-t5-base')
11
- tokenizer = T5Tokenizer.from_pretrained('google/flan-t5-base')
12
- return model, tokenizer
13
- if model_name == "Llama3":
14
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
15
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
16
- return model, tokenizer
17
- if model_name == "Llama3-Instruct":
18
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
19
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
20
- return model, tokenizer
21
- else:
22
- st.error(f"Model {model_name} not available.")
23
- return None, None
24
-
25
- def generate_question(model,tokenizer,context):
26
- input_text = 'Generate a question from this: ' + context
27
- input_ids = tokenizer(input_text, return_tensors='pt').input_ids
28
- outputs = model.generate(input_ids,max_length=512)
29
- output_text = tokenizer.decode(outputs[0][1:len(outputs[0])-1])
30
- return output_text
31
-
32
- def main():
33
- st.title("Question Generation From Given Text")
34
- context = st.text_area("Enter text","Laughter is the best medicine.")
35
- st.write("Select a model and provide the text to generate questions.")
36
- model_choice = st.selectbox("Select a model", ["T5", "Llama3", "Llama3-Instruct"])
37
-
38
- if st.button("Generate Questions"):
39
- model, tokenizer = load_model(model_choice)
40
- if model and tokenizer:
41
- questions = generate_question(model, tokenizer, context)
42
- st.write("Generated Question:")
43
- st.write(questions)
44
- else:
45
- st.error("Model loading failed.")
46
- # tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
47
- # model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
48
- # tokenizer = AutoTokenizer.from_pretrained("ramsrigouthamg/t5_squad_v1")
49
- # model = AutoModelForSeq2SeqLM.from_pretrained("ramsrigouthamg/t5_squad_v1")
50
- # input_text = 'Generate a question from this: ' + context
51
- # input_ids = tokenizer(input_text, return_tensors='pt').input_ids
52
- # outputs = model.generate(input_ids)
53
- # output_text = tokenizer.decode(outputs[0][1:len(outputs[0])-1])
54
- # st.write("Generated question:")
55
- # st.write(output_text)
56
-
57
- if __name__ == '__main__':
 
 
 
 
 
 
 
 
58
  main()
 
1
+ # import torch
2
+ import streamlit as st
3
+ # import numpy as np
4
+ from transformers import T5ForConditionalGeneration, T5Tokenizer
5
+ # from transformers import pipeline
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM
7
+
8
+ def load_model(model_name):
9
+ if model_name == "T5":
10
+ model = T5ForConditionalGeneration.from_pretrained('google/flan-t5-base')
11
+ tokenizer = T5Tokenizer.from_pretrained('google/flan-t5-base')
12
+ return model, tokenizer
13
+ if model_name == "Llama3":
14
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
15
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
16
+ return model, tokenizer
17
+ if model_name == "Llama3-Instruct":
18
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
19
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
20
+ return model, tokenizer
21
+ if model_name == "Phi3":
22
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
23
+ model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
24
+ return model, tokenizer
25
+ if model_name == "Gemma":
26
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
27
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-7b")
28
+ return model, tokenizer
29
+ else:
30
+ st.error(f"Model {model_name} not available.")
31
+ return None, None
32
+
33
+ def generate_question(model,tokenizer,context):
34
+ input_text = 'Generate a question from this: ' + context
35
+ input_ids = tokenizer(input_text, return_tensors='pt').input_ids
36
+ outputs = model.generate(input_ids,max_length=512)
37
+ output_text = tokenizer.decode(outputs[0][1:len(outputs[0])-1])
38
+ return output_text
39
+
40
+ def main():
41
+ st.title("Question Generation From Given Text")
42
+ context = st.text_area("Enter text","Laughter is the best medicine.")
43
+ st.write("Select a model and provide the text to generate questions.")
44
+ model_choice = st.selectbox("Select a model", ["T5", "Llama3", "Llama3-Instruct","Phi3","Gemma"])
45
+
46
+ if st.button("Generate Questions"):
47
+ model, tokenizer = load_model(model_choice)
48
+ if model and tokenizer:
49
+ questions = generate_question(model, tokenizer, context)
50
+ st.write("Generated Question:")
51
+ st.write(questions)
52
+ else:
53
+ st.error("Model loading failed.")
54
+ # tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
55
+ # model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
56
+ # tokenizer = AutoTokenizer.from_pretrained("ramsrigouthamg/t5_squad_v1")
57
+ # model = AutoModelForSeq2SeqLM.from_pretrained("ramsrigouthamg/t5_squad_v1")
58
+ # input_text = 'Generate a question from this: ' + context
59
+ # input_ids = tokenizer(input_text, return_tensors='pt').input_ids
60
+ # outputs = model.generate(input_ids)
61
+ # output_text = tokenizer.decode(outputs[0][1:len(outputs[0])-1])
62
+ # st.write("Generated question:")
63
+ # st.write(output_text)
64
+
65
+ if __name__ == '__main__':
66
  main()