BOUSLIMI commited on
Commit
37214be
1 Parent(s): 74b65cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -1,21 +1,28 @@
1
- import transformers
2
  import streamlit as st
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
  import tempfile
 
5
  # Corrected model class name
6
  model_name = "potsawee/t5-large-generation-squad-QuestionAnswer"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
 
9
  uploaded_file = st.file_uploader("Upload Document or Paragraph")
 
10
  if uploaded_file is not None:
11
  with tempfile.NamedTemporaryFile(delete=False) as temp_file:
12
  temp_file.write(uploaded_file.read())
13
- document_text = temp_file.read().decode('utf-8')
 
 
 
14
  st.success("Document uploaded successfully!")
15
  else:
16
  document_text = st.text_area("Enter Text (Optional)", height=200)
 
17
  question = st.text_input("Ask a Question")
18
  bouton_ok = st.button("Answer")
 
19
  if bouton_ok:
20
  # Improved prompt for better context
21
  context = document_text if document_text else "Empty document."
@@ -23,4 +30,4 @@ if bouton_ok:
23
  outputs = model.generate(inputs, max_length=150, min_length=80, length_penalty=5, num_beams=2)
24
  summary = tokenizer.decode(outputs[0])
25
  st.text("Answer:")
26
- st.text(summary)
 
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import tempfile
4
+
5
  # Corrected model class name
6
  model_name = "potsawee/t5-large-generation-squad-QuestionAnswer"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
9
+
10
  uploaded_file = st.file_uploader("Upload Document or Paragraph")
11
+
12
  if uploaded_file is not None:
13
  with tempfile.NamedTemporaryFile(delete=False) as temp_file:
14
  temp_file.write(uploaded_file.read())
15
+ # Close the file before reading its contents
16
+ temp_file.close()
17
+ with open(temp_file.name, 'r', encoding='utf-8') as file:
18
+ document_text = file.read()
19
  st.success("Document uploaded successfully!")
20
  else:
21
  document_text = st.text_area("Enter Text (Optional)", height=200)
22
+
23
  question = st.text_input("Ask a Question")
24
  bouton_ok = st.button("Answer")
25
+
26
  if bouton_ok:
27
  # Improved prompt for better context
28
  context = document_text if document_text else "Empty document."
 
30
  outputs = model.generate(inputs, max_length=150, min_length=80, length_penalty=5, num_beams=2)
31
  summary = tokenizer.decode(outputs[0])
32
  st.text("Answer:")
33
+ st.text(summary)