freddyaboulton HF staff commited on
Commit
7d80f05
0 Parent(s):

Duplicate from gradio-discord-bots/Llama-2-13b-chat-hf

Browse files
Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +14 -0
  3. app.py +75 -0
  4. requirements.txt +1 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Llama 2 13b Chat Hf
3
+ emoji: 👀
4
+ colorFrom: purple
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.38.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: gradio-discord-bots/Llama-2-13b-chat-hf
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import requests
4
+ from text_generation import Client
5
+
6
+ HF_TOKEN = os.getenv('HF_TOKEN')
7
+ ENDPOINT = os.getenv('INFERENCE_ENDPOINT')
8
+
9
+
10
+ system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
11
+
12
+ if ENDPOINT:
13
+ client = Client(ENDPOINT, headers={"Authorization": f"Bearer {HF_TOKEN}"})
14
+
15
+
16
+ def predict(message, chatbot):
17
+
18
+ input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
19
+ for interaction in chatbot:
20
+ input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "
21
+
22
+ input_prompt = input_prompt + str(message) + " [/INST] "
23
+
24
+ response = client.generate(input_prompt, max_new_tokens=256).generated_text
25
+
26
+ return response
27
+
28
+ interface = gr.ChatInterface(predict).queue(concurrency_count=75)
29
+
30
+ with gr.Blocks() as demo:
31
+ gr.Markdown("""
32
+ # Llama-2-13b-chat-hf Discord Bot Powered by Gradio and Hugging Face Endpoints
33
+
34
+ Make sure you read the 'Inference Endpoints' section below first! 🦙
35
+
36
+ ### First install the `gradio_client`
37
+
38
+ ```bash
39
+ pip install gradio_client
40
+ ```
41
+
42
+ ### Then deploy to discord in one line! ⚡️
43
+
44
+ ```python
45
+ secrets = {"HF_TOKEN": "<your-key-here>", "INFERENCE_ENDPOINT": "<endpoint-url>"}
46
+ client = grc.Client.duplicate("gradio-discord-bots/Llama-2-13b-chat-hf", private=False, secrets=secrets)
47
+ client.deploy_discord(api_names=["chat"])
48
+ ```
49
+
50
+ """)
51
+ with gr.Accordion(label="Inference Endpoints", open=False):
52
+ gr.Markdown("""
53
+ ## Setting Up Inference Endpoints 💪
54
+ To deploy this space as a discord bot, you will need to deploy your own Llama model to Hugging Face Endpoints.
55
+ Don't worry it's super easy!
56
+
57
+ 1. Go to the [model page](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) 🦙
58
+ 2. Click Deploy > Inference Endpoints
59
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/modelpage_llama.png" alt="drawing" width="800" height=400/>
60
+ 3. Select your desired cloud provider and region ☁️
61
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/falcon_instruct.png" alt="drawing" width="800" height=400/>
62
+ 4. Optional: Set Automatic Scale to Zero. This will pause your endpoint after 15 minutes of inactivity to prevent unwanted billing. 💰
63
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/autoscale.png" alt="drawing" width="800" height=400/>
64
+ 5. Create the endpoint! Copy the endpoint URL after it's complete.
65
+ <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/running_model.png" alt="drawing" width="800" height=400/>
66
+ """
67
+ )
68
+ gr.Markdown("""
69
+ Note: As a derivate work of [Llama-2-70b-chat](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, this demo is governed by the original [license](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/USE_POLICY.md)
70
+ """)
71
+ with gr.Row(visible=False):
72
+ interface.render()
73
+
74
+
75
+ demo.queue(concurrency_count=70).launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ text-generation