ffreemt commited on
Commit
53c5ff4
1 Parent(s): 7eec20e
Files changed (2) hide show
  1. app.py +12 -10
  2. requirements.txt +2 -1
app.py CHANGED
@@ -61,16 +61,18 @@ if model_cache_path:
61
  model_size_gb = Path(model_cache_path[0]).stat().st_size / 2**30
62
  logger.info(f"{model_name=} {model_size_gb=:.2f} GB")
63
 
64
- # with gr.Blocks() as demo:
65
- # chatbot = gr.Chatbot()
66
- # msg = gr.Textbox()
67
- # clear = gr.ClearButton([msg, chatbot])
68
 
69
- # def respond(message, chat_history):
70
- # response, chat_history = model.chat(tokenizer, message, history=chat_history, temperature=0.7, repetition_penalty=1.2, max_length=128)
71
- # chat_history.append((message, response))
72
- # return "", chat_history
73
 
74
- # msg.submit(respond, [msg, chatbot], [msg, chatbot])
75
 
76
- # demo.launch()
 
 
 
 
 
 
 
 
61
  model_size_gb = Path(model_cache_path[0]).stat().st_size / 2**30
62
  logger.info(f"{model_name=} {model_size_gb=:.2f} GB")
63
 
 
 
 
 
64
 
65
+ def respond(message, chat_history):
66
+ response, chat_history = model.chat(tokenizer, message, history=chat_history, temperature=0.7, repetition_penalty=1.2, max_length=128)
67
+ chat_history.append((message, response))
68
+ return "", chat_history
69
 
 
70
 
71
+ with gr.Blocks() as block:
72
+ chatbot = gr.Chatbot()
73
+ msg = gr.Textbox()
74
+
75
+ clear = gr.ClearButton([msg, chatbot])
76
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
77
+
78
+ block.queue().launch()
requirements.txt CHANGED
@@ -2,6 +2,7 @@ torch
2
  transformers
3
  sentencepiece
4
  cpm_kernels
5
- accelerate
 
6
 
7
  loguru
 
2
  transformers
3
  sentencepiece
4
  cpm_kernels
5
+ # accelerate
6
+ # bitsandbytes
7
 
8
  loguru