randeom commited on
Commit
1d33274
1 Parent(s): b23b2e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -13
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
3
- import os
4
 
5
  client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1")
6
 
@@ -14,14 +13,6 @@ def format_prompt(message, history, system_prompt=""):
14
  prompt += f"[INST] {message} [/INST]"
15
  return prompt
16
 
17
- def extract_filename_and_code(bot_response):
18
- try:
19
- with open("default.py", 'w') as file:
20
- file.write(bot_response)
21
- st.write("Successfully wrote to default.py!")
22
- except Exception as e:
23
- st.write(f"Error writing to file: {e}")
24
-
25
  def generate(prompt, history, system_prompt="", temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
26
  temperature = float(temperature)
27
  if temperature < 1e-2:
@@ -43,8 +34,7 @@ def generate(prompt, history, system_prompt="", temperature=0.9, max_new_tokens=
43
 
44
  for response in stream:
45
  output += response.token.text
46
- extract_filename_and_code(output)
47
- yield output
48
 
49
  return output
50
 
@@ -64,12 +54,16 @@ max_new_tokens = st.slider("Max new tokens", 0, 8192, 512, step=64)
64
  top_p = st.slider("Top-p (nucleus sampling)", 0.0, 1.0, 0.95, step=0.05)
65
  repetition_penalty = st.slider("Repetition penalty", 1.0, 2.0, 1.0, step=0.05)
66
 
 
 
 
 
67
  # Generate button
68
  if st.button("Generate Waifu"):
69
  history = []
70
  prompt = f"Create a waifu character named {name} with {hair_color} hair, a {personality} personality, and wearing a {outfit_style}."
71
- output = generate(prompt, history, system_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
72
 
73
  # Display the generated character
74
  st.subheader("Generated Waifu Character")
75
- st.write(next(output))
 
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
 
3
 
4
  client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1")
5
 
 
13
  prompt += f"[INST] {message} [/INST]"
14
  return prompt
15
 
 
 
 
 
 
 
 
 
16
  def generate(prompt, history, system_prompt="", temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
17
  temperature = float(temperature)
18
  if temperature < 1e-2:
 
34
 
35
  for response in stream:
36
  output += response.token.text
37
+ st.session_state.generated_text = output
 
38
 
39
  return output
40
 
 
54
  top_p = st.slider("Top-p (nucleus sampling)", 0.0, 1.0, 0.95, step=0.05)
55
  repetition_penalty = st.slider("Repetition penalty", 1.0, 2.0, 1.0, step=0.05)
56
 
57
+ # Initialize session state for generated text
58
+ if "generated_text" not in st.session_state:
59
+ st.session_state.generated_text = ""
60
+
61
  # Generate button
62
  if st.button("Generate Waifu"):
63
  history = []
64
  prompt = f"Create a waifu character named {name} with {hair_color} hair, a {personality} personality, and wearing a {outfit_style}."
65
+ generate(prompt, history, system_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
66
 
67
  # Display the generated character
68
  st.subheader("Generated Waifu Character")
69
+ st.write(st.session_state.generated_text)