freddyaboulton HF staff commited on
Commit
6951aff
1 Parent(s): dece49c
Files changed (2) hide show
  1. app.py +75 -0
  2. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import requests
4
+ from text_generation import Client
5
+
6
+ HF_TOKEN = os.getenv('HF_TOKEN')
7
+ ENDPOINT = os.getenv('INFERENCE_ENDPOINT')
8
+
9
+
10
+ system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
11
+
12
+ if ENDPOINT:
13
+ client = Client(ENDPOINT, headers={"Authorization": f"Bearer {HF_TOKEN}"})
14
+
15
+
16
+ def predict(message, chatbot):
17
+
18
+ input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
19
+ for interaction in chatbot:
20
+ input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "
21
+
22
+ input_prompt = input_prompt + str(message) + " [/INST] "
23
+
24
+ response = client.generate(input_prompt, max_new_tokens=256).generated_text
25
+
26
+ return response
27
+
28
+ interface = gr.ChatInterface(predict).queue(concurrency_count=75)
29
+
30
+ with gr.Blocks() as demo:
31
+ gr.Markdown("""
32
+ # Llama-2-13b-chat-hf Discord Bot Powered by Gradio and Hugging Face Endpoints
33
+
34
+ Make sure you read the 'Inference Endpoints' section below first! 🦙
35
+
36
+ ### First install the `gradio_client`
37
+
38
+ ```bash
39
+ pip install gradio_client
40
+ ```
41
+
42
+ ### Then deploy to discord in one line! ⚡️
43
+
44
+ ```python
45
+ secrets = {"HF_TOKEN": "<your-key-here>", "INFERENCE_ENDPOINT": "<endpoint-url>"}
46
+ client = grc.Client.duplicate("gradio-discord-bots/Llama-2-13b-chat-hf", private=False, secrets=secrets)
47
+ client.deploy_discord(api_names=["chat"])
48
+ ```
49
+
50
+ """)
51
+ with gr.Accordion(label="Inference Endpoints", open=False):
52
+ gr.Markdown("""
53
+ ## Setting Up Inference Endpoints 💪
54
+ To deploy this space as a discord bot, you will need to deploy your own Llama model to Hugging Face Endpoints.
55
+ Don't worry it's super easy!
56
+
57
+ 1. Go to the [model page](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) 🦙
58
+ 2. Click Deploy > Inference Endpoints
59
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/modelpage_llama.png" alt="drawing" width="800" height=400/>
60
+ 3. Select your desired cloud provider and region ☁️
61
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/falcon_instruct.png" alt="drawing" width="800" height=400/>
62
+ 4. Optional: Set Automatic Scale to Zero. This will pause your endpoint after 15 minutes of inactivity to prevent unwanted billing. 💰
63
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/autoscale.png" alt="drawing" width="800" height=400/>
64
+ 5. Create the endpoint! Copy the endpoint URL after it's complete.
65
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/running_model.png" alt="drawing" width="800" height=400/>
66
+ """
67
+ )
68
+ gr.Markdown("""
69
+ Note: As a derivate work of [Llama-2-70b-chat](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, this demo is governed by the original [license](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/USE_POLICY.md)
70
+ """)
71
+ with gr.Row(visible=False):
72
+ interface.render()
73
+
74
+
75
+ demo.queue(concurrency_count=70).launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ text-generation