Spaces:
Paused
Paused
love2poppy
commited on
Commit
β’
3fa0f4e
1
Parent(s):
6ebf55c
update chatbot state
Browse files
app.py
CHANGED
@@ -45,7 +45,8 @@ def get_model_class(model_type,
|
|
45 |
model_base = model_class.model.from_pretrained(model_name_or_path,
|
46 |
load_in_8bit=LOAD_8BIT,
|
47 |
torch_dtype=torch.float16,
|
48 |
-
device_map="auto"
|
|
|
49 |
tokenizer = model_class.tokenizer.from_pretrained(model_name_or_path) # default add_eos_token=False
|
50 |
|
51 |
model = PeftModel.from_pretrained(
|
@@ -102,7 +103,7 @@ def predict(
|
|
102 |
|
103 |
history.append((instruction, bot_response))
|
104 |
|
105 |
-
return "", history
|
106 |
|
107 |
|
108 |
def predict_test(message, top_p, temperature, history):
|
@@ -116,7 +117,7 @@ def predict_test(message, top_p, temperature, history):
|
|
116 |
return history, history
|
117 |
|
118 |
def clear_session():
|
119 |
-
return '', None
|
120 |
|
121 |
parser = argparse.ArgumentParser(description='Process some integers.')
|
122 |
parser.add_argument('--size', default=7, type=int, help='the size of llama model')
|
@@ -143,7 +144,7 @@ with block as demo:
|
|
143 |
message = gr.Textbox()
|
144 |
state = gr.State()
|
145 |
|
146 |
-
message.submit(predict, inputs=[message, top_p, temperature, state], outputs=[chatbot, state], queue=False)
|
147 |
|
148 |
with gr.Row():
|
149 |
clear_history = gr.Button("π ζΈ
ι€εε²ε―Ήθ― | Clear History")
|
@@ -152,9 +153,9 @@ with block as demo:
|
|
152 |
regenerate = gr.Button("π ιζ°ηζ | regenerate")
|
153 |
|
154 |
# regenerate.click(regenerate, inputs=[message], outputs=[chatbot])
|
155 |
-
regenerate.click(fn=clear_session , inputs=[], outputs=[chatbot, state], queue=False)
|
156 |
-
send.click(predict, inputs=[message, top_p, temperature, state], outputs=[chatbot, state])
|
157 |
clear.click(lambda: None, None, message, queue=False)
|
158 |
-
clear_history.click(fn=clear_session , inputs=[], outputs=[chatbot, state], queue=False)
|
159 |
|
160 |
demo.queue(max_size=20, concurrency_count=20).launch()
|
|
|
45 |
model_base = model_class.model.from_pretrained(model_name_or_path,
|
46 |
load_in_8bit=LOAD_8BIT,
|
47 |
torch_dtype=torch.float16,
|
48 |
+
device_map="auto",
|
49 |
+
)
|
50 |
tokenizer = model_class.tokenizer.from_pretrained(model_name_or_path) # default add_eos_token=False
|
51 |
|
52 |
model = PeftModel.from_pretrained(
|
|
|
103 |
|
104 |
history.append((instruction, bot_response))
|
105 |
|
106 |
+
return "", history, history
|
107 |
|
108 |
|
109 |
def predict_test(message, top_p, temperature, history):
|
|
|
117 |
return history, history
|
118 |
|
119 |
def clear_session():
|
120 |
+
return '', '', None
|
121 |
|
122 |
parser = argparse.ArgumentParser(description='Process some integers.')
|
123 |
parser.add_argument('--size', default=7, type=int, help='the size of llama model')
|
|
|
144 |
message = gr.Textbox()
|
145 |
state = gr.State()
|
146 |
|
147 |
+
message.submit(predict, inputs=[message, top_p, temperature, state], outputs=[message, chatbot, state], queue=False)
|
148 |
|
149 |
with gr.Row():
|
150 |
clear_history = gr.Button("π ζΈ
ι€εε²ε―Ήθ― | Clear History")
|
|
|
153 |
regenerate = gr.Button("π ιζ°ηζ | regenerate")
|
154 |
|
155 |
# regenerate.click(regenerate, inputs=[message], outputs=[chatbot])
|
156 |
+
regenerate.click(fn=clear_session , inputs=[], outputs=[message, chatbot, state], queue=False)
|
157 |
+
send.click(predict, inputs=[message, top_p, temperature, state], outputs=[message, chatbot, state])
|
158 |
clear.click(lambda: None, None, message, queue=False)
|
159 |
+
clear_history.click(fn=clear_session , inputs=[], outputs=[message, chatbot, state], queue=False)
|
160 |
|
161 |
demo.queue(max_size=20, concurrency_count=20).launch()
|