import gradio as gr import gradio_client from gradio_client import Client title = """# Welcome To Tonic's Easy YI-6B-200K""" description = """This is a [connector that looks like open ai](https://huggingface.co/spaces/Tonic1/TonicsYI-6B-200k) on the inputs , then ignores all that and connects to my [YI-6B-200K endpoint](https://huggingface.co/01-ai/Yi-6B-200K). This helps using [Yi for AGI here](https://huggingface.co/spaces/Tonic/AGYIntelligence)""" client = Client("https://tonic1-tonicsyi-6b-200k.hf.space/--replicas/cvjqf/") def chat_api_interface(messages, model, frequency_penalty=0, logit_bias=None, max_tokens=None, n=1, presence_penalty=0, response_format="text", seed=None, stop=None, stream=False, temperature=0.9, top_p=1, tools=None, tool_choice="none", user=None): result = client.predict( your_message = messages, system_prompt = "I am YiTonic, an AI language model created by Tonic-AI. I am a cautious assistant. I carefully follow instructions. I am helpful and harmless and I follow ethical guidelines and promote positive behavior.", max_new_tokens = 16000, temperature, top_p, top_k, disable_for_faster_inference=False, api_name="/predict" ) return result with gr.Blocks() as demo: with gr.Row(): with gr.Column(): messages = gr.Textbox(label="Messages (JSON format)", placeholder="Enter messages as a JSON array", value="[{\"role\": \"user\", \"content\": \"Hello, how can I help you today?\"}, {\"role\": \"assistant\", \"content\": \"I'm looking for information on renewable energy sources.\"}]") model = gr.Textbox(label="Model ID", placeholder="Enter the model ID", value="text-davinci-003") frequency_penalty = gr.Number(label="Frequency Penalty", value=0.5) logit_bias = gr.Textbox(label="Logit Bias (JSON format)", placeholder="Enter logit bias as JSON", value="{\"50256\": -100, \"50257\": 100}") max_tokens = gr.Number(label="Max Tokens", optional=True, value=150) n = gr.Number(label="N", value=3) presence_penalty = gr.Number(label="Presence Penalty", value=0.6) response_format = gr.Radio(label="Response Format", choices=["text", "json_object"], value="text") seed = gr.Number(label="Seed", optional=True, value=42) stop = gr.Textbox(label="Stop Sequence(s)", placeholder="Enter stop sequences", value="[\"\\n\", \" end\"]") stream = gr.Checkbox(label="Stream", value=False) temperature = gr.Slider(label="Temperature", minimum=0, maximum=2, value=0.7) top_p = gr.Slider(label="Top P", minimum=0, maximum=1, value=0.9) tools = gr.Textbox(label="Tools (JSON format)", optional=True, placeholder="Enter tools as JSON", value="{\"spellcheck\": true, \"grammar_check\": false}") tool_choice = gr.Textbox(label="Tool Choice", value="spellcheck", placeholder="Enter tool choice") user = gr.Textbox(label="User", optional=True, placeholder="Enter user identifier", value="user123") with gr.Column(): submit_btn = gr.Button("Submit") output = gr.Textbox(label="API Response", readonly=True) submit_btn.click( chat_api_interface, inputs=[messages, model, frequency_penalty, logit_bias, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, top_p, tools, tool_choice, user], outputs=output ) demo.launch()