File size: 3,378 Bytes
7d03289
4794293
709e2ee
94cc813
894399d
fb756a3
7c6ac3f
 
709e2ee
4e61e31
fb756a3
aab89e4
 
1f51c22
fc0a441
 
7d03289
aab89e4
 
 
 
 
 
 
 
e5c634b
 
 
7e127b8
aab89e4
81cac81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c6ac3f
 
471fa7e
d7151b5
9c7e230
7d03289
81cac81
9c7e230
81cac81
 
 
 
7c6ac3f
81cac81
7d03289
88543cb
4f582df
73bfffc
97ef84f
73bfffc
2be4fac
88543cb
73bfffc
 
81cac81
959058a
 
94cc813
88543cb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import json
import requests
import gradio as gr
import random
import time
import os
import datetime
from datetime import datetime


API_TOKEN = os.getenv("API_TOKEN")
DECODEM_TOKEN=os.getenv("DECODEM_TOKEN")


from huggingface_hub import InferenceApi
inference = InferenceApi("bigscience/bloom",token=API_TOKEN)

headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/getdecodemprompts'

data={"prompt_type":'business_tech_ideas',"decodem_token":DECODEM_TOKEN}
try:
    r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers)
except requests.exceptions.ReadTimeout as e:
    print(e)
#print(r.content)

prompt=str(r.content, 'UTF-8')


def infer(prompt,
          max_length = 250,
          top_k = 0,
          num_beams = 0,
          no_repeat_ngram_size = 2,
          top_p = 0.9,
          seed=42,
          temperature=0.7,
          greedy_decoding = False,
          return_full_text = False):
    
    print(seed)
    top_k = None if top_k == 0 else top_k
    do_sample = False if num_beams > 0 else not greedy_decoding
    num_beams = None if (greedy_decoding or num_beams == 0) else num_beams
    no_repeat_ngram_size = None if num_beams is None else no_repeat_ngram_size
    top_p = None if num_beams else top_p
    early_stopping = None if num_beams is None else num_beams > 0

    params = {
        "max_new_tokens": max_length,
        "top_k": top_k,
        "top_p": top_p,
        "temperature": temperature,
        "do_sample": do_sample,
        "seed": seed,
        "early_stopping":early_stopping,
        "no_repeat_ngram_size":no_repeat_ngram_size,
        "num_beams":num_beams,
        "return_full_text":return_full_text
    }
    
    s = time.time()
    response = inference(prompt, params=params)
    #print(response)
    proc_time = time.time()-s
    #print(f"Processing time was {proc_time} seconds")
    return response

def getideas(text_inp):
  print(text_inp)
  print(datetime.today().strftime("%d-%m-%Y"))
  
  text = prompt+"\nInput:"+text_inp + "\nOutput:"
  resp = infer(text,seed=random.randint(0,100))

  generated_text=resp[0]['generated_text']
  result = generated_text.replace(text,'').strip()
  result = result.replace("Output:","")
  parts = result.split("###")
  topic = parts[0].strip()
  topic="\n".join(topic.split('\n')[:3])
  print(topic)
  return(topic)

with gr.Blocks() as demo:
    gr.Markdown("<h1><center>Tech Ideas for Your Business</center></h1>")
    gr.Markdown(
        """ChatGPT based Insights from <a href="https://www.decodem.ai">Decodem.ai</a> for businesses.\nWhile ChatGPT has multiple use cases we have evolved specific use cases/ templates for businesses \n\n This template provides ideas on how a business would look like in the future. Enter a business area and get the results. We use a equally powerful AI model bigscience/bloom."""
        )
    textbox = gr.Textbox(placeholder="Enter business type here...", lines=1,label='Your business area')
    btn = gr.Button("Generate")    
    output1 = gr.Textbox(lines=2,label='The future')

    btn.click(getideas,inputs=[textbox], outputs=[output1])
    examples = gr.Examples(examples=['icecream parlor','space travel','book shop','ecommerce','grocery delivery'],
                           inputs=[textbox])
    

demo.launch()