IBN-FYP-Kaavish / app.py
sf06583's picture
Update app.py
1568f83 verified
# from huggingface_hub import InferenceClient
import gradio as gr
import os
import requests
api_url = "https://api-inference.huggingface.co/models/sf06583/gpt2-model-network-automation"
access_token = os.getenv("HF_HOME")
def chat(input_text):
payload = {"inputs": input_text}
headers = {"Authorization": f"Bearer {access_token}"}
response = requests.post(api_url, json=payload, headers=headers)
response_data = response.json() # Parse response as JSON
response_text = ""
# while True:
# response = requests.post(api_url, json=payload, headers=headers)
# print("response_json", response.json())
# response_text += response.json()[0].get("generated_text") # Parse response as JSON
# print("response:", response)
# print("response_text:", response_text)
# # Check if the response contains "</s>"
# if "</s>" in response_text:
# print("Response contains '</s>'. Exiting loop.")
# break
# payload = {"inputs": response_text}
# Extract text from "generated_text" key in each dictionary
for item in response_data:
generated_text = item.get("generated_text", "")
response_text += generated_text
print("Response Text:", response_text) # Check the response content
index_of_inst = response_text.find("</inst>")
index_of_end = response_text.find("</s>")
print("Index of </inst>:", index_of_inst) # Check the index of </inst>
print("Index of </s>:", index_of_end) # Check the index of </s>
# Extract text between </inst> and </s> tags
if index_of_inst != -1:
text_between_tags = response_text[index_of_inst + len("</inst>"):index_of_end].strip()
else:
text_between_tags = "Error: Tags not found in response"
text_between_tags_with_line_breaks = text_between_tags.replace("\\n", "\n")
return text_between_tags_with_line_breaks
iface = gr.Interface(
fn=chat,
inputs=gr.Textbox(
label="Intent",
info="Type your network intent here",
lines=1,
placeholder="I want to perform basic network configuration on my topology",
),
outputs=gr.Textbox(
label="Intent-based Network Commands",
info="Your network commands will generate here",
show_copy_button=True,
),
title="CybHermes Network Automation Tool",
theme='shivi/calm_seafoam'
)
iface.launch(share=True)
# # os.environ["HF_HOME"] = os.getenv("HF_HOME")
# client = InferenceClient("sf06583/gpt2-model-network-automation")
# #def format_prompt(message, history, system_prompt=None):
# # prompt = "<s>"
# # for user_prompt, bot_response in history:
# # prompt += f"<INST> {user_prompt} </INST>"
# # prompt += f" {bot_response}</s> "
# # if system_prompt:
# # prompt += f"[SYS] {system_prompt} [/SYS]"
# # prompt += f"[INST] {message} [/INST]"
# # return prompt
# def format_prompt(message, history, system_prompt=None):
# prompt = "<s>"
# for user_prompt, bot_response in history:
# inst_start_tag = "<inst>"
# inst_end_tag = "</inst>"
# inst_start_pos = user_prompt.find(inst_start_tag)
# inst_end_pos = user_prompt.find(inst_end_tag)
# if inst_start_pos != -1 and inst_end_pos != -1:
# # Extract the intent and command between <inst> tags
# intent = user_prompt[inst_start_pos + len(inst_start_tag):inst_end_pos]
# command = user_prompt[inst_end_pos + len(inst_end_tag):]
# # Format the intent and command into the prompt
# prompt += f"[INST] {intent} [/INST]"
# prompt += f" {command}"
# else:
# prompt += f"[INST] {user_prompt} [/INST]"
# prompt += f" {bot_response}"
# prompt += "</s>"
# if system_prompt:
# prompt += f"[SYS] {system_prompt} [/SYS]"
# prompt += f"[INST] {message} [/INST]"
# return prompt
# #def generate(
# # prompt, history, system_prompt=None, temperature=0.2, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,
# #):
# # try:
# # temperature = float(temperature)
# # if temperature < 1e-2:
# # temperature = 1e-2
# # top_p = float(top_p)
# # generate_kwargs = dict(
# # temperature=temperature,
# # max_length=max_new_tokens,
# # top_p=top_p,
# # repetition_penalty=repetition_penalty,
# # do_sample=True,
# # pad_token_id=model.config.eos_token_id,
# # )
# # formatted_prompt = format_prompt(prompt, history, system_prompt)
# # ids = tokenizer.encode(formatted_prompt, return_tensors='pt')
# # outputs = model.generate(ids, **generate_kwargs)
# # generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# # return generated_text
# # except Exception as e:
# # print("An error occurred during text generation:", e)
# # return "An error occurred during text generation. Please try again later."
# def generate_text(prompt):
# try:
# # Call the text_generation method of the client to generate text
# stream = client.text_generation(prompt, stream=True, details=True, return_full_text=False)
# output = ""
# for response in stream:
# output += response.token.text
# return output
# except Exception as e:
# print("An error occurred during text generation:", e)
# return "An error occurred during text generation. Please try again later."
# mychatbot = gr.Chatbot(
# avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
# demo = gr.ChatInterface(
# fn=generate,
# chatbot=mychatbot,
# title="Adaab! I am CybHermes, what do you want to do on your network?",
# css="body { background-color: inherit; overflow-x:hidden;}"
# ":root {--color-accent: transparent !important; --color-accent-soft:transparent !important; --code-background-fill:black !important; --body-text-color:white !important;}"
# "#component-2 {background:#ffffff1a; display:contents;}"
# "div#component-0 { height: auto !important;}"
# ".gradio-container.gradio-container-4-8-0.svelte-1kyws56.app {max-width: 100% !important;}"
# "gradio-app {background: linear-gradient(134deg,#00425e 0%,#001a3f 43%,#421438 77%) !important; background-attachment: fixed !important; background-position: top;}"
# ".panel.svelte-vt1mxs {background: transparent; padding:0;}"
# ".block.svelte-90oupt { background: transparent; border-color: transparent;}"
# ".bot.svelte-12dsd9j.svelte-12dsd9j.svelte-12dsd9j { background: #ffffff1a; border-color: transparent; color: black !important;}"
# ".user.svelte-12dsd9j.svelte-12dsd9j.svelte-12dsd9j { background: #ffffff1a; border-color: transparent; color: white; padding: 10px 18px;}"
# "div.svelte-iyf88w{ background: #cc98d445; border-color: transparent; border-radius: 25px;}"
# "textarea.scroll-hide.svelte-1f354aw { background: transparent; color: black !important;}"
# ".primary.svelte-cmf5ev { background: transparent; color: white;}"
# ".primary.svelte-cmf5ev:hover { background: transparent; color: white;}"
# "button#component-8 { display: none; position: absolute; margin-top: 60px; border-radius: 25px;}"
# "div#component-9 { max-width: fit-content; margin-left: auto; margin-right: auto;}"
# "button#component-10, button#component-11, button#component-12 { flex: none; background: #ffffff1a; border: none; color: white; margin-right: auto; margin-left: auto; border-radius: 9px; min-width: fit-content;}"
# ".share-button.svelte-12dsd9j { display: none;}"
# "footer.svelte-mpyp5e { display: none !important;}"
# ".message-buttons-bubble.svelte-12dsd9j.svelte-12dsd9j.svelte-12dsd9j { border-color: #31546E; background: #31546E;}"
# ".bubble-wrap.svelte-12dsd9j.svelte-12dsd9j.svelte-12dsd9j {padding: 0;}"
# ".prose h1 { color: white !important; font-size: 16px !important; font-weight: normal !important; background: #ffffff1a; padding: 20px; border-radius: 20px; width: 90%; margin-left: auto !important; margin-right: auto !important;}"
# ".toast-wrap.svelte-pu0yf1 { display:none !important;}"
# ".scroll-hide { scrollbar-width: auto !important;}"
# ".main svelte-1kyws56 {max-width: 800px; align-self: center;}"
# "div#component-4 {max-width: 650px; margin-left: auto; margin-right: auto;}"
# "body::-webkit-scrollbar { display: none;}"
# )
# demo.queue().launch(show_api=False)