Supiri commited on
Commit
413eb14
1 Parent(s): 0937020

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -13,12 +13,10 @@ num_beams = st.slider('Number of beams', min_value=1, max_value=10, value=6)
13
  num_beam_groups = st.slider('Number of beam groups', min_value=1, max_value=10, value=2)
14
  diversity_penalty = st.slider('Diversity penalty', min_value=0.1, max_value=5.0, value=2.5)
15
 
16
- context = st.text_input('Personality', value="Hinata was soft-spoken and polite, always addressing people with proper honorifics. She is kind, always thinking of others more than for herself, caring for their feelings and well-being. She doesn't like being confrontational for any reason. This led to her being meek or timid to others, as her overwhelming kindness can render her unable to respond or act for fear of offending somebody.")
17
  query = st.text_input('Question', value="What's your name?")
18
 
19
  input_ids = tokenizer(f"personality: {context}", f"inquiry: {query}", return_tensors='pt').input_ids
20
  outputs = model.generate(input_ids, num_beams=num_beams, diversity_penalty=diversity_penalty, num_beam_groups=num_beam_groups)
21
 
22
- st.write("Bio:\t",context, "\n")
23
- st.write("Prompt:\t", query)
24
- st.write("Answer:\t", tokenizer.decode(outputs[0], skip_special_tokens=True), "\n")
13
  num_beam_groups = st.slider('Number of beam groups', min_value=1, max_value=10, value=2)
14
  diversity_penalty = st.slider('Diversity penalty', min_value=0.1, max_value=5.0, value=2.5)
15
 
16
+ context = st.text_area('Personality', value="Hinata was soft-spoken and polite, always addressing people with proper honorifics. She is kind, always thinking of others more than for herself, caring for their feelings and well-being. She doesn't like being confrontational for any reason. This led to her being meek or timid to others, as her overwhelming kindness can render her unable to respond or act for fear of offending somebody.")
17
  query = st.text_input('Question', value="What's your name?")
18
 
19
  input_ids = tokenizer(f"personality: {context}", f"inquiry: {query}", return_tensors='pt').input_ids
20
  outputs = model.generate(input_ids, num_beams=num_beams, diversity_penalty=diversity_penalty, num_beam_groups=num_beam_groups)
21
 
22
+ st.write(f"{context.split(' ')[0]}:\t", tokenizer.decode(outputs[0], skip_special_tokens=True))