Mattral commited on
Commit
31dc28b
1 Parent(s): aeb7f98

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -8
app.py CHANGED
@@ -35,7 +35,7 @@ def initialize_history(chunks):
35
  # Initialize history with initial chunks
36
  history = initialize_history(all_chunks[:2]) # Starting with the first two chunks for example
37
 
38
- def format_prompt_mixtral(message, history, chunks):
39
  prompt = "<s>"
40
  prompt += f"{system_prompt_text}\n\n" # Add the system prompt
41
 
@@ -47,7 +47,7 @@ def format_prompt_mixtral(message, history, chunks):
47
  prompt += f"[INST] {message} [/INST]"
48
  return prompt
49
 
50
- def chat_inf(message, history, chunks, seed, temp, tokens, top_p, rep_p):
51
  generate_kwargs = dict(
52
  temperature=temp,
53
  max_new_tokens=tokens,
@@ -57,7 +57,7 @@ def chat_inf(message, history, chunks, seed, temp, tokens, top_p, rep_p):
57
  seed=seed,
58
  )
59
 
60
- formatted_prompt = format_prompt_mixtral(message, history, chunks)
61
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
62
  output = ""
63
  for response in stream:
@@ -73,11 +73,11 @@ def clear_fn():
73
 
74
  rand_val = random.randint(1, 1111111111111111)
75
 
76
- def check_rand(inp, val):
77
- if inp:
78
- return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1, 1111111111111111))
79
  else:
80
- return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
81
 
82
  with gr.Blocks() as app:
83
  gr.HTML("""<center><h1 style='font-size:xx-large;'>PTT Chatbot</h1><br><h3>running on Huggingface Inference </h3><br><h7>EXPERIMENTAL</center>""")
@@ -105,7 +105,13 @@ with gr.Blocks() as app:
105
 
106
  hid1 = gr.Number(value=1, visible=False)
107
 
108
- go = btn.click(check_rand, [rand, seed], seed).then(chat_inf, [inp, chat, all_chunks, seed, temp, tokens, top_p, rep_p], chat)
 
 
 
 
 
 
109
 
110
  stop_btn.click(None, None, None, cancels=[go])
111
  clear_btn.click(clear_fn, None, [inp, chat])
 
35
  # Initialize history with initial chunks
36
  history = initialize_history(all_chunks[:2]) # Starting with the first two chunks for example
37
 
38
+ def format_prompt_mixtral(message, history):
39
  prompt = "<s>"
40
  prompt += f"{system_prompt_text}\n\n" # Add the system prompt
41
 
 
47
  prompt += f"[INST] {message} [/INST]"
48
  return prompt
49
 
50
+ def chat_inf(message, history, seed, temp, tokens, top_p, rep_p):
51
  generate_kwargs = dict(
52
  temperature=temp,
53
  max_new_tokens=tokens,
 
57
  seed=seed,
58
  )
59
 
60
+ formatted_prompt = format_prompt_mixtral(message, history)
61
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
62
  output = ""
63
  for response in stream:
 
73
 
74
  rand_val = random.randint(1, 1111111111111111)
75
 
76
+ def check_rand(rand, val):
77
+ if rand:
78
+ return random.randint(1, 1111111111111111)
79
  else:
80
+ return int(val)
81
 
82
  with gr.Blocks() as app:
83
  gr.HTML("""<center><h1 style='font-size:xx-large;'>PTT Chatbot</h1><br><h3>running on Huggingface Inference </h3><br><h7>EXPERIMENTAL</center>""")
 
105
 
106
  hid1 = gr.Number(value=1, visible=False)
107
 
108
+ def on_chat(message, chat, seed, temp, tokens, top_p, rep_p):
109
+ chat.clear()
110
+ history = initialize_history(all_chunks[:2])
111
+ for response in chat_inf(message, history, seed, temp, tokens, top_p, rep_p):
112
+ chat.append(*response)
113
+
114
+ go = btn.click(on_chat, [inp, chat, seed, temp, tokens, top_p, rep_p], chat)
115
 
116
  stop_btn.click(None, None, None, cancels=[go])
117
  clear_btn.click(clear_fn, None, [inp, chat])