ECUiVADE commited on
Commit
c49c1a9
1 Parent(s): 586c116

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -42
app.py CHANGED
@@ -1,49 +1,73 @@
1
- import streamlit as st
2
- from langchain.prompts import PromptTemplate
3
- from langchain.llms import CTransformers
4
  import gradio as gr
5
- ## Function To get response from LLAma 2 model
6
-
7
- def getLLamaresponse(message):
8
-
9
- input_text = "home decoration"
10
- no_words = "100"
11
- blog_style = "lifestyle"
12
-
13
- ### LLama2 model
14
- llm=CTransformers(model='TheBloke/OpenHermes-2.5-Mistral-7B-GGUF',
15
- model_type='llama',
16
- config={'max_new_tokens':256,
17
- 'temperature':0.01})
18
-
19
- ## Prompt Template
20
-
21
- template="""
22
- Write a blog for {blog_style} job profile for a topic {input_text}
23
- within {no_words} words.
24
- """
25
-
26
- prompt=PromptTemplate(input_variables=["blog_style","input_text",'no_words'],
27
- template=template)
28
-
29
- ## Generate the ressponse from the LLama 2 model
30
- response=llm(prompt.format(blog_style=blog_style,input_text=input_text,no_words=no_words))
31
- print(response)
32
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
 
35
  with gr.Blocks() as demo:
36
- gr.Markdown("# AI Patient Chatbot")
37
- with gr.Group():
38
- with gr.Tab("Patient Chatbot"):
39
- chatbot = gr.Chatbot()
40
- message = gr.Textbox(label="Enter your message to Barry", placeholder="Type here...", lines=2)
41
- send_message = gr.Button("Submit")
42
- send_message.click(getLLamaresponse, inputs=[message], outputs=[chatbot])
43
- save_chatlog = gr.Button("Save Chatlog")
44
- #send_message.click(SaveChatlog, inputs=[message], outputs=[chatbot])
 
 
 
 
 
45
 
 
 
 
 
46
 
47
- #message.submit(AIPatient, inputs=[message], outputs=[chatbot])
48
 
49
- demo.launch(debug=True)
 
 
 
 
1
  import gradio as gr
2
+ import os
3
+ from pathlib import Path
4
+ import argparse
5
+ from huggingface_hub import snapshot_download
6
+
7
+
8
+ # repo_name = "TheBloke/Mistral-7B-v0.1-GGUF"
9
+ # model_file = "mistral-7b-v0.1.Q6_K.gguf"
10
+
11
+
12
+ repo_name = 'HumanityFTW/so_rude'
13
+ model_file = "mistral-comedy-2.0-ckpt-600.Q6_K.gguf"
14
+
15
+
16
+ print('Fetching model:', repo_name, model_file)
17
+ snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_file)
18
+ print('Done fetching model:')
19
+
20
+ DEFAULT_MODEL_PATH = model_file
21
+
22
+ from llama_cpp import Llama
23
+ llm = Llama(model_path=model_file, model_type="mistral")
24
+
25
+
26
+ def predict(input, chatbot, max_length, top_p, temperature, history):
27
+ chatbot.append((input, ""))
28
+ response = ""
29
+ history.append(input)
30
+
31
+ for output in llm(input, stream=True, temperature=temperature, top_p=top_p, max_tokens=max_length, ):
32
+ piece = output['choices'][0]['text']
33
+ response += piece
34
+ chatbot[-1] = (chatbot[-1][0], response)
35
+
36
+ yield chatbot, history
37
+
38
+ history.append(response)
39
+ yield chatbot, history
40
+
41
+
42
+ def reset_user_input():
43
+ return gr.update(value="")
44
+
45
+
46
+ def reset_state():
47
+ return [], []
48
 
49
 
50
  with gr.Blocks() as demo:
51
+ gr.HTML("""<h1 align="center">So Rude</h1>""")
52
+
53
+ chatbot = gr.Chatbot()
54
+ with gr.Row():
55
+ with gr.Column(scale=4):
56
+ user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=8, elem_id="user_input")
57
+ submitBtn = gr.Button("Submit", variant="primary", elem_id="submit_btn")
58
+ with gr.Column(scale=1):
59
+ max_length = gr.Slider(0, 256, value=64, step=1.0, label="Maximum Length", interactive=True)
60
+ top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
61
+ temperature = gr.Slider(0, 2.0, value=0.95, step=0.01, label="Temperature", interactive=True)
62
+ emptyBtn = gr.Button("Clear History")
63
+
64
+ history = gr.State([])
65
 
66
+ submitBtn.click(
67
+ predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], show_progress=True
68
+ )
69
+ submitBtn.click(reset_user_input, [], [user_input])
70
 
71
+ emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
72
 
73
+ demo.queue().launch(share=False, inbrowser=True)