ECUiVADE commited on
Commit
e7b6215
1 Parent(s): 1b31456

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -49
app.py CHANGED
@@ -1,73 +1,77 @@
1
  import gradio as gr
2
  import os
3
- from pathlib import Path
4
- import argparse
5
  from huggingface_hub import snapshot_download
6
-
 
 
 
 
 
 
 
7
 
8
  repo_name = "TheBloke/Mistral-7B-v0.1-GGUF"
9
  model_file = "mistral-7b-v0.1.Q4_K_M.gguf"
10
 
11
-
12
- #repo_name = 'ECUiVADE/Mistral'
13
- #model_file = "openhermes-2.5-mistral-7b.Q4_K_M.gguf"
14
 
15
 
16
- print('Fetching model:', repo_name, model_file)
17
- snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_file)
18
- print('Done fetching model:')
 
 
 
19
 
20
- DEFAULT_MODEL_PATH = model_file
21
 
22
- from llama_cpp import Llama
23
- llm = Llama(model_path=model_file, model_type="mistral")
24
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- def predict(input, chatbot, max_length, top_p, temperature, history):
27
- chatbot.append((input, ""))
28
  response = ""
29
- history.append(input)
30
-
31
- for output in llm(input, stream=True, temperature=temperature, top_p=top_p, max_tokens=max_length, ):
32
- piece = output['choices'][0]['text']
33
- response += piece
34
- chatbot[-1] = (chatbot[-1][0], response)
35
-
36
- yield chatbot, history
37
-
38
- history.append(response)
39
- yield chatbot, history
40
-
41
-
42
- def reset_user_input():
43
- return gr.update(value="")
44
 
 
 
 
45
 
46
- def reset_state():
47
- return [], []
48
 
 
 
49
 
50
  with gr.Blocks() as demo:
51
- gr.HTML("""<h1 align="center">So Rude</h1>""")
 
 
 
 
 
 
 
 
52
 
53
- chatbot = gr.Chatbot()
54
- with gr.Row():
55
- with gr.Column(scale=4):
56
- user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=8, elem_id="user_input")
57
- submitBtn = gr.Button("Submit", variant="primary", elem_id="submit_btn")
58
- with gr.Column(scale=1):
59
- max_length = gr.Slider(0, 256, value=64, step=1.0, label="Maximum Length", interactive=True)
60
- top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
61
- temperature = gr.Slider(0, 2.0, value=0.95, step=0.01, label="Temperature", interactive=True)
62
- emptyBtn = gr.Button("Clear History")
63
 
64
- history = gr.State([])
65
 
66
- submitBtn.click(
67
- predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], show_progress=True
68
- )
69
- submitBtn.click(reset_user_input, [], [user_input])
70
 
71
- emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
72
 
73
- demo.queue().launch(share=False, inbrowser=True)
 
1
  import gradio as gr
2
  import os
 
 
3
  from huggingface_hub import snapshot_download
4
+ from llama_cpp import Llama
5
+ import sys
6
+ from io import StringIO
7
+ from transformers import pipeline
8
+ from datasets import load_dataset
9
+ import torch
10
+ import json
11
+ from pathlib import Path
12
 
13
  repo_name = "TheBloke/Mistral-7B-v0.1-GGUF"
14
  model_file = "mistral-7b-v0.1.Q4_K_M.gguf"
15
 
16
+ llm = Llama(model_path=model_file, n_gpu_layers=100, n_ctx=2048)
 
 
17
 
18
 
19
+ # Global flag for first run
20
+ isFirstRun = True
21
+ context = ""
22
+ history = [] # Initialize history
23
+ initContext = """<|im_start|>You are playing the role of an aggressive patient called Barry Lawson. Barry has been waiting for hours at the hospital with severe pain in his chest and hasn't been seen yet. You are incredibly frustrated and angry that things are taking so long, you will only calm down if your concerns are addressed. Waiting will only make you more angry and aggressive towards the staff. At any moment you could break into a RAGE! Make sure you don't censor swear words. Make sure to explain your emotional state and thinking before responding, for example, Barry: (tired of waiting and very angry) What the fuck do I have to do around here to get some treatment!"""
24
+ feedback_file = Path("/content/datalog.json")
25
 
26
+ def AIPatient(message):
27
 
28
+ global isFirstRun, history,context
 
29
 
30
+ if isFirstRun:
31
+ context = initContext
32
+ isFirstRun = False
33
+ #else:
34
+ #for turn in history:
35
+ # context += f"\n<|im_start|> Nurse: {turn[0]}\n<|im_start|> Barry: {turn[1]}"
36
+ context += """
37
+ <|im_start|>nurse
38
+ Nurse: """+message+"""
39
+ <|im_start|>barry
40
+ Barry:
41
+ """
42
 
 
 
43
  response = ""
44
+ # Here, you should add the code to generate the response using your model
45
+ # For example:
46
+ while(len(response) < 1):
47
+ output = llm(context, max_tokens=400, stop=["Nurse:"], echo=False)
48
+ response = output["choices"][0]["text"]
49
+ response = response.strip()
 
 
 
 
 
 
 
 
 
50
 
51
+ with feedback_file.open("a") as f:
52
+ f.write(json.dumps({"Nurse": message, "Barry": response},indent=4))
53
+ f.write("\n")
54
 
55
+ context += response
56
+ print (context)
57
 
58
+ history.append((message,response))
59
+ return history
60
 
61
  with gr.Blocks() as demo:
62
+ gr.Markdown("# AI Patient Chatbot")
63
+ with gr.Group():
64
+ with gr.Tab("Patient Chatbot"):
65
+ chatbot = gr.Chatbot()
66
+ message = gr.Textbox(label="Enter your message to Barry", placeholder="Type here...", lines=2)
67
+ send_message = gr.Button("Submit")
68
+ send_message.click(AIPatient, inputs=[message], outputs=[chatbot])
69
+ save_chatlog = gr.Button("Save Chatlog")
70
+ #send_message.click(SaveChatlog, inputs=[message], outputs=[chatbot])
71
 
 
 
 
 
 
 
 
 
 
 
72
 
73
+ #message.submit(AIPatient, inputs=[message], outputs=[chatbot])
74
 
75
+ demo.launch(debug=True)
 
 
 
76
 
 
77