Jipski commited on
Commit
867c22d
1 Parent(s): 30d5105

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -21,7 +21,7 @@ def infer(input_ids, max_length, temperature, top_k, top_p, num_return_sequences
21
  num_return_sequences=num_return_sequences,
22
  )
23
  return output_sequences
24
- old = "Test"
25
  default_value = "Jetzt tippen!"
26
  #prompts
27
  st.title("Flos gpt-2")
@@ -40,6 +40,7 @@ else:
40
  output_sequences = infer(input_ids, max_length, temperature, top_k, top_p, num_return_sequences)
41
 
42
  for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
 
43
  print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")
44
  generated_sequences = generated_sequence.tolist()
45
  # Decode text
@@ -48,11 +49,11 @@ for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
48
  #text = text[: text.find(args.stop_token) if args.stop_token else None]
49
  # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
50
  total_sequence = (
51
- old + sent + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
52
  )
53
  generated_sequences.append(total_sequence)
54
  print(total_sequence)
 
55
 
56
 
57
  st.write(generated_sequences[-1])
58
- old = old + (generated_sequences[-1])
 
21
  num_return_sequences=num_return_sequences,
22
  )
23
  return output_sequences
24
+ old
25
  default_value = "Jetzt tippen!"
26
  #prompts
27
  st.title("Flos gpt-2")
 
40
  output_sequences = infer(input_ids, max_length, temperature, top_k, top_p, num_return_sequences)
41
 
42
  for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
43
+ st.write(old)
44
  print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")
45
  generated_sequences = generated_sequence.tolist()
46
  # Decode text
 
49
  #text = text[: text.find(args.stop_token) if args.stop_token else None]
50
  # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
51
  total_sequence = (
52
+ sent + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
53
  )
54
  generated_sequences.append(total_sequence)
55
  print(total_sequence)
56
+ old = generated_sequences[-1]
57
 
58
 
59
  st.write(generated_sequences[-1])