File size: 3,658 Bytes
217d8ad
 
 
 
ff1bdd6
217d8ad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff1bdd6
 
217d8ad
 
 
 
3e89ff8
217d8ad
9cd43a1
 
 
 
217d8ad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f9c875
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import json
import os
import requests
import gradio as gr
from loguru import logger

# Environment variables for backend URL and model name
BACKEND_URL = os.getenv('BACKEND_URL','')
MODEL_NAME = os.getenv('MODEL_NAME')
API_KEY = os.getenv('API_KEY')

# Custom headers for the API request
HEADERS = {
    'orionstar-api-key': API_KEY,
    'Content-Type': 'application/json'
}

def clear_session():
    """Clears the chat session."""
    return '', None

def chat_stream_generator(url, payload):
    """Generator function to stream chat responses from the backend."""
    answer = ''
    with requests.post(url, json=payload, headers=HEADERS, stream=True) as response:
        if response.encoding is None:
            response.encoding = 'utf-8'
        for line in response.iter_lines(decode_unicode=True):
            if line:
                line = line.replace('data: ', '')
                if line != '[DONE]':
                    data = json.loads(line)
                    if 'choices' in data and data['choices']:
                        choice = data['choices'][0]
                        if 'delta' in choice and choice['delta'].get('content'):
                            answer += choice['delta']['content']
                            yield answer

def generate_chat(input_text: str, history=None):
    """Generates chat responses and updates the chat history."""
    if input_text is None:
        input_text = ''
    if history is None:
        history = []
    history = history[-5:]  # Keep the last 5 messages in history
    url = BACKEND_URL
    payload = {
        "model": MODEL_NAME,
        "stream": True,
        "messages": [
            {"role": "user", "content": input_text}
        ]
    }

    gen = chat_stream_generator(url, payload)
    for response in gen:
        history.append((input_text, response))
        yield None, history
        history.pop()
    history.append((input_text, response))
    return None, gen

logger.info('Starting the OrionStart-Yi-34B Chat...')

# Gradio interface
block = gr.Blocks()

with block as demo:
    gr.Markdown("<center><h1>OrionStar-Yi-34B-Chat Demo</h1></center>")
    gr.Markdown("""
* The Yi series LLM models are large-scale models open-sourced by the 01.AI team, achieving commendable results on various authoritative Chinese, English, and general domain benchmarks.
* [Orionstar](https://www.orionstar.com/) has further tapped into the potential of the Orionstar-Yi-34B-Chat with the Yi-34B model. By deeply training on a large corpus of high-quality fine-tuning data, we are dedicated to making it an outstanding open-source alternative in the ChatGPT field.
* Orionstar-Yi-34B-Chat performs impressively on mainstream evaluation sets such as C-Eval, MMLU, and CMMLU, significantly outperforming other open-source conversational models around the world(as of November 2023). For a detailed comparison with other open-source models, see [here](https://github.com/OrionStarAI/OrionStar-Yi-34B-Chat).
* Please click Star to support us on [Github](https://github.com/OrionStarAI/OrionStar-Yi-34B-Chat).""")

    chatbot = gr.Chatbot(label='OrionStar-Yi-34B-Chat', elem_classes="control-height")
    message = gr.Textbox(label='Input')

    with gr.Row():
        submit = gr.Button("🚀 Submit")
        clear_history = gr.Button("🧹 Clear History")

    submit.click(
        fn=generate_chat,
        inputs=[message, chatbot],
        outputs=[message, chatbot]
    )
    clear_history.click(
        fn=clear_session,
        inputs=[],
        outputs=[message, chatbot],
        queue=False
    )

demo.queue(api_open=False).launch(height=800, share=False, show_api=False)