File size: 3,743 Bytes
6951aff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35c4284
6951aff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5c4aeca
6951aff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6b7078
6951aff
 
 
 
 
aff5e12
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import gradio as gr
import os
import requests
from text_generation import Client

HF_TOKEN = os.getenv('HF_TOKEN')
ENDPOINT = os.getenv('INFERENCE_ENDPOINT') 


system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe.  Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."

if ENDPOINT:
    client = Client(ENDPOINT, headers={"Authorization": f"Bearer {HF_TOKEN}"})


def predict(message, chatbot):
    
    input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
    for interaction in chatbot:
        input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "

    input_prompt = input_prompt + str(message) + " [/INST] "

    response = client.generate(input_prompt, max_new_tokens=256).generated_text

    return response

interface = gr.ChatInterface(predict).queue()

with gr.Blocks() as demo:
    gr.Markdown("""
    # Llama-2-13b-chat-hf Discord Bot Powered by Gradio and Hugging Face Endpoints
    
    Make sure you read the 'Inference Endpoints' section below first! 🦙
                
    ### First install the `gradio_client`
        
    ```bash
    pip install gradio_client
    ```
    
    ### Then deploy to discord in one line! ⚡️
    
    ```python
    secrets = {"HF_TOKEN": "<your-key-here>", "INFERENCE_ENDPOINT": "<endpoint-url>"}
    client = grc.Client.duplicate("gradio-discord-bots/Llama-2-13b-chat-hf", private=False, secrets=secrets, sleep_timeout=2880)
    client.deploy_discord(api_names=["chat"])
    ```

    """)
    with gr.Accordion(label="Inference Endpoints", open=False):
        gr.Markdown("""
    ## Setting Up Inference Endpoints 💪
    To deploy this space as a discord bot, you will need to deploy your own Llama model to Hugging Face Endpoints.
    Don't worry it's super easy!
                
    1. Go to the [model page](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) 🦙
    2. Click Deploy > Inference Endpoints
    <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/modelpage_llama.png" alt="drawing" width="800" height=400/>
    3. Select your desired cloud provider and region ☁️
    <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/falcon_instruct.png" alt="drawing" width="800" height=400/>
    4. Optional: Set Automatic Scale to Zero. This will pause your endpoint after 15 minutes of inactivity to prevent unwanted billing. 💰
    <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/autoscale.png" alt="drawing" width="800" height=400/>
    5. Create the endpoint! Copy the endpoint URL after it's complete.
    <img src="https://gradio-builds.s3.amazonaws.com/demo-files/discordbots/inference_endpoints/running_model.png" alt="drawing" width="800" height=400/>
    """
    )
    gr.Markdown("""
    Note: As a derivate work of [Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) by Meta, this demo is governed by the original [license](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/USE_POLICY.md)
    """)
    with gr.Row(visible=False):
        interface.render()


demo.queue().launch()