Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -58,11 +58,12 @@ def generate(prompt, history,max_new_tokens,health,temperature=temperature,top_p
|
|
| 58 |
)
|
| 59 |
cnt=0
|
| 60 |
history1=history
|
| 61 |
-
|
| 62 |
stats="*******************\n"
|
| 63 |
for eac in health:
|
| 64 |
stats+=f'{eac}\n'
|
| 65 |
stats+="*******************\n"
|
|
|
|
| 66 |
for ea in history:
|
| 67 |
print (ea)
|
| 68 |
for l in ea:
|
|
@@ -71,16 +72,16 @@ def generate(prompt, history,max_new_tokens,health,temperature=temperature,top_p
|
|
| 71 |
print(f'cnt:: {cnt}')
|
| 72 |
if cnt > MAX_HISTORY:
|
| 73 |
history1 = compress_history(str(history), temperature, top_p, repetition_penalty)
|
| 74 |
-
formatted_prompt = format_prompt(f"{GAME_MASTER.format(history=history1,stats=
|
| 75 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 76 |
output = ""
|
| 77 |
|
| 78 |
for response in stream:
|
| 79 |
output += response.token.text
|
| 80 |
if history:
|
| 81 |
-
yield "",[(prompt,output)],stats
|
| 82 |
else:
|
| 83 |
-
yield "",[(prompt,output)],stats
|
| 84 |
generate_kwargs2 = dict(
|
| 85 |
temperature=temperature,
|
| 86 |
max_new_tokens=128,
|
|
@@ -99,24 +100,29 @@ def generate(prompt, history,max_new_tokens,health,temperature=temperature,top_p
|
|
| 99 |
|
| 100 |
lines = output.strip().strip("\n").split("\n")
|
| 101 |
skills=[]
|
|
|
|
| 102 |
for i,line in enumerate(lines):
|
| 103 |
if ": " in line:
|
| 104 |
try:
|
| 105 |
lab_1 = line.split(": ")[0]
|
|
|
|
| 106 |
skill_1 = line.split(": ")[1].split(" ")[0]
|
| 107 |
skill_1=int(skill_1)
|
| 108 |
skill ={lab_1:skill_1}
|
| 109 |
skills.append(skill)
|
|
|
|
|
|
|
|
|
|
| 110 |
print(skills)
|
| 111 |
except Exception as e:
|
| 112 |
print (f'--Error :: {e}')
|
| 113 |
print(f'Line:: {line}')
|
| 114 |
-
stats=
|
| 115 |
if history:
|
| 116 |
history.append((prompt,output))
|
| 117 |
-
yield "",history,stats
|
| 118 |
else:
|
| 119 |
-
yield "",[(prompt,output)],stats
|
| 120 |
|
| 121 |
def clear_fn():
|
| 122 |
return None,None
|
|
@@ -144,12 +150,12 @@ with gr.Blocks() as app:
|
|
| 144 |
with gr.Row():
|
| 145 |
tokens = gr.Slider(label="Max new tokens",value=2096,minimum=0,maximum=1048*10,step=64,interactive=True,info="The maximum numbers of new tokens")
|
| 146 |
json_out=gr.JSON(value=base_stats)
|
| 147 |
-
|
| 148 |
#text=gr.JSON()
|
| 149 |
#inp_query.change(search_models,inp_query,models_dd)
|
| 150 |
#test_b=test_btn.click(itt,url,e_box)
|
| 151 |
clear_btn.click(clear_fn,None,[prompt,chatbot])
|
| 152 |
-
go=button.click(generate,[prompt,chatbot,tokens,
|
| 153 |
stop_button.click(None,None,None,cancels=[go])
|
| 154 |
app.launch(show_api=False)
|
| 155 |
|
|
|
|
| 58 |
)
|
| 59 |
cnt=0
|
| 60 |
history1=history
|
| 61 |
+
'''
|
| 62 |
stats="*******************\n"
|
| 63 |
for eac in health:
|
| 64 |
stats+=f'{eac}\n'
|
| 65 |
stats+="*******************\n"
|
| 66 |
+
'''
|
| 67 |
for ea in history:
|
| 68 |
print (ea)
|
| 69 |
for l in ea:
|
|
|
|
| 72 |
print(f'cnt:: {cnt}')
|
| 73 |
if cnt > MAX_HISTORY:
|
| 74 |
history1 = compress_history(str(history), temperature, top_p, repetition_penalty)
|
| 75 |
+
formatted_prompt = format_prompt(f"{GAME_MASTER.format(history=history1,stats=health,dice=random.randint(1,100))}, {prompt}", history)
|
| 76 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 77 |
output = ""
|
| 78 |
|
| 79 |
for response in stream:
|
| 80 |
output += response.token.text
|
| 81 |
if history:
|
| 82 |
+
yield "",[(prompt,output)],stats,None
|
| 83 |
else:
|
| 84 |
+
yield "",[(prompt,output)],stats,None
|
| 85 |
generate_kwargs2 = dict(
|
| 86 |
temperature=temperature,
|
| 87 |
max_new_tokens=128,
|
|
|
|
| 100 |
|
| 101 |
lines = output.strip().strip("\n").split("\n")
|
| 102 |
skills=[]
|
| 103 |
+
new_stats="*******************"
|
| 104 |
for i,line in enumerate(lines):
|
| 105 |
if ": " in line:
|
| 106 |
try:
|
| 107 |
lab_1 = line.split(": ")[0]
|
| 108 |
+
|
| 109 |
skill_1 = line.split(": ")[1].split(" ")[0]
|
| 110 |
skill_1=int(skill_1)
|
| 111 |
skill ={lab_1:skill_1}
|
| 112 |
skills.append(skill)
|
| 113 |
+
|
| 114 |
+
new_stat += f'{lab_1}: {skill_1}'
|
| 115 |
+
|
| 116 |
print(skills)
|
| 117 |
except Exception as e:
|
| 118 |
print (f'--Error :: {e}')
|
| 119 |
print(f'Line:: {line}')
|
| 120 |
+
stats=new_stat
|
| 121 |
if history:
|
| 122 |
history.append((prompt,output))
|
| 123 |
+
yield "",history,stats,skills
|
| 124 |
else:
|
| 125 |
+
yield "",[(prompt,output)],stats,skills
|
| 126 |
|
| 127 |
def clear_fn():
|
| 128 |
return None,None
|
|
|
|
| 150 |
with gr.Row():
|
| 151 |
tokens = gr.Slider(label="Max new tokens",value=2096,minimum=0,maximum=1048*10,step=64,interactive=True,info="The maximum numbers of new tokens")
|
| 152 |
json_out=gr.JSON(value=base_stats)
|
| 153 |
+
char_stats=gr.Textbox()
|
| 154 |
#text=gr.JSON()
|
| 155 |
#inp_query.change(search_models,inp_query,models_dd)
|
| 156 |
#test_b=test_btn.click(itt,url,e_box)
|
| 157 |
clear_btn.click(clear_fn,None,[prompt,chatbot])
|
| 158 |
+
go=button.click(generate,[prompt,chatbot,tokens,char_stats],[prompt,chatbot,char_stats,json_out])
|
| 159 |
stop_button.click(None,None,None,cancels=[go])
|
| 160 |
app.launch(show_api=False)
|
| 161 |
|