|
import json |
|
import requests |
|
import gradio as gr |
|
import random |
|
import time |
|
import os |
|
import datetime |
|
from datetime import datetime |
|
|
|
API_TOKEN = os.getenv("API_TOKEN") |
|
from huggingface_hub import InferenceApi |
|
inference = InferenceApi("bigscience/bloom",token=API_TOKEN) |
|
|
|
DECODEM_TOKEN=os.getenv("DECODEM_TOKEN") |
|
|
|
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} |
|
url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/getdecodemprompts' |
|
|
|
data={"prompt_type":'devils_advocate',"decodem_token":DECODEM_TOKEN} |
|
try: |
|
r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers) |
|
except requests.exceptions.ReadTimeout as e: |
|
print(e) |
|
|
|
|
|
prompt=str(r.content, 'UTF-8') |
|
|
|
|
|
def infer(prompt, |
|
max_length = 250, |
|
top_k = 0, |
|
num_beams = 0, |
|
no_repeat_ngram_size = 2, |
|
top_p = 0.9, |
|
seed=42, |
|
temperature=0.7, |
|
greedy_decoding = False, |
|
return_full_text = False): |
|
|
|
print(seed) |
|
top_k = None if top_k == 0 else top_k |
|
do_sample = False if num_beams > 0 else not greedy_decoding |
|
num_beams = None if (greedy_decoding or num_beams == 0) else num_beams |
|
no_repeat_ngram_size = None if num_beams is None else no_repeat_ngram_size |
|
top_p = None if num_beams else top_p |
|
early_stopping = None if num_beams is None else num_beams > 0 |
|
|
|
params = { |
|
"max_new_tokens": max_length, |
|
"top_k": top_k, |
|
"top_p": top_p, |
|
"temperature": temperature, |
|
"do_sample": do_sample, |
|
"seed": seed, |
|
"early_stopping":early_stopping, |
|
"no_repeat_ngram_size":no_repeat_ngram_size, |
|
"num_beams":num_beams, |
|
"return_full_text":return_full_text |
|
} |
|
|
|
s = time.time() |
|
response = inference(prompt, params=params) |
|
|
|
proc_time = time.time()-s |
|
|
|
return response |
|
|
|
def getdevilsadvocate(text_inp): |
|
print(text_inp) |
|
print(datetime.today().strftime("%d-%m-%Y")) |
|
text = prompt+"\nInput:"+text_inp + "\nOutput:" |
|
resp = infer(text,seed=random.randint(0,100)) |
|
|
|
generated_text=resp[0]['generated_text'] |
|
result = generated_text.replace(text,'').strip() |
|
result = result.replace("Output:","") |
|
parts = result.split("###") |
|
topic = parts[0].strip() |
|
topic="\n".join(topic.split('\n')[:3]) |
|
print(topic) |
|
return(topic) |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("<h1><center>Devil's Advocate</center></h1>") |
|
gr.Markdown( |
|
"""ChatGPT based Insights from <a href="https://www.decodem.ai">Decodem.ai</a> for businesses.\nWhile ChatGPT has multiple use cases we have evolved specific use cases/ templates for businesses \n\n This template provides a devil's advocate view for your ideas. Enter a crisp idea (2-3 words) and get the results. Use examples to guide. We use a equally powerful AI model bigscience/bloom.""" |
|
) |
|
textbox = gr.Textbox(placeholder="Enter the crisp idea here...", lines=1,label='The Idea') |
|
btn = gr.Button("Generate") |
|
output1 = gr.Textbox(lines=2,label="Devil's Advocate") |
|
|
|
btn.click(getdevilsadvocate,inputs=[textbox], outputs=[output1]) |
|
examples = gr.Examples(examples=['paneer donuts','smart tee shirt','blockchain for EV chargers','autonomous cars'], |
|
inputs=[textbox]) |
|
|
|
|
|
demo.launch() |