decodemai commited on
Commit
32cd2af
1 Parent(s): d8770e4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -0
app.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import requests
3
+ import gradio as gr
4
+ import random
5
+ import time
6
+ import os
7
+ import datetime
8
+ from datetime import datetime
9
+
10
+ print('for update')
11
+
12
+ API_TOKEN = os.getenv("API_TOKEN")
13
+ DECODEM_TOKEN=os.getenv("DECODEM_TOKEN")
14
+
15
+
16
+ from huggingface_hub import InferenceApi
17
+ inference = InferenceApi("bigscience/bloom",token=API_TOKEN)
18
+
19
+ headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
20
+ url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/getdecodemprompts'
21
+
22
+ data={"prompt_type":'ad_text_prompt',"decodem_token":DECODEM_TOKEN}
23
+ try:
24
+ r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers)
25
+ except requests.exceptions.ReadTimeout as e:
26
+ print(e)
27
+ #print(r.content)
28
+
29
+ prompt_text=str(r.content, 'UTF-8')
30
+ print(prompt_text)
31
+
32
+ def infer(prompt,
33
+ max_length = 250,
34
+ top_k = 0,
35
+ num_beams = 0,
36
+ no_repeat_ngram_size = 2,
37
+ top_p = 0.9,
38
+ seed=42,
39
+ temperature=0.7,
40
+ greedy_decoding = False,
41
+ return_full_text = False):
42
+
43
+ print(seed)
44
+ top_k = None if top_k == 0 else top_k
45
+ do_sample = False if num_beams > 0 else not greedy_decoding
46
+ num_beams = None if (greedy_decoding or num_beams == 0) else num_beams
47
+ no_repeat_ngram_size = None if num_beams is None else no_repeat_ngram_size
48
+ top_p = None if num_beams else top_p
49
+ early_stopping = None if num_beams is None else num_beams > 0
50
+
51
+ params = {
52
+ "max_new_tokens": max_length,
53
+ "top_k": top_k,
54
+ "top_p": top_p,
55
+ "temperature": temperature,
56
+ "do_sample": do_sample,
57
+ "seed": seed,
58
+ "early_stopping":early_stopping,
59
+ "no_repeat_ngram_size":no_repeat_ngram_size,
60
+ "num_beams":num_beams,
61
+ "return_full_text":return_full_text
62
+ }
63
+
64
+ s = time.time()
65
+ response = inference(prompt, params=params)
66
+ #print(response)
67
+ proc_time = time.time()-s
68
+ #print(f"Processing time was {proc_time} seconds")
69
+ return response
70
+
71
+ def getadline(text_inp):
72
+ print(text_inp)
73
+ print(datetime.today().strftime("%d-%m-%Y"))
74
+
75
+ text = prompt+"\nInput:"+text_inp + "\nOutput:"
76
+ resp = infer(text,seed=random.randint(0,100))
77
+
78
+ generated_text=resp[0]['generated_text']
79
+ result = generated_text.replace(text,'').strip()
80
+ result = result.replace("Output:","")
81
+ parts = result.split("###")
82
+ topic = parts[0].strip()
83
+ topic="\n".join(topic.split('\n')[:3])
84
+
85
+ response_nsfw = requests.get('https://github.com/coffee-and-fun/google-profanity-words/raw/main/data/list.txt')
86
+ data_nsfw = response_nsfw.text
87
+ nsfwlist=data_nsfw.split('\n')
88
+ nsfwlowerlist=[]
89
+ for each in nsfwlist:
90
+ if each!='':
91
+ nsfwlowerlist.append(each.lower())
92
+ nsfwlowerlist.extend(['bra','gay','lesbian',])
93
+ print(topic)
94
+ foundnsfw=0
95
+ for each_word in nsfwlowerlist:
96
+ if each_word in topic.lower() or each_word in text_inp :
97
+ foundnsfw=1
98
+ if foundnsfw==1:
99
+ topic="Unsafe content found. Please try again with different prompts."
100
+ print(topic)
101
+ return(topic)
102
+
103
+ with gr.Blocks() as demo:
104
+ gr.Markdown("<h1><center>Market Sizing Framework for Your Business</center></h1>")
105
+ gr.Markdown(
106
+ """ChatGPT based Insights from <a href="https://www.decodem.ai">Decodem.ai</a> for businesses.\nWhile ChatGPT has multiple use cases we have evolved specific use cases/ templates for businesses \n\n This template provides ideas on how a business can size a market they are entering. Enter a business area to size and get the results. Use examples as a guide. We use a equally powerful AI model bigscience/bloom."""
107
+ )
108
+ textbox = gr.Textbox(placeholder="Enter market size focus for business here...", lines=1,label='Your business area')
109
+ btn = gr.Button("Generate")
110
+ #output1 = gr.Textbox(lines=2,label='Market Sizing Framework')
111
+ output_image = gr.components.Image(label="Image")
112
+
113
+
114
+ btn.click(getideas,inputs=[textbox], outputs=[output_image])
115
+ examples = gr.Examples(examples=['ice cream parlor in London','HR saas for fintech','book shops in NYC','Starbucks cafe in Bangalore','organic vegetables via ecommerce','grocery delivery'],
116
+ inputs=[textbox])
117
+
118
+
119
+ demo.launch()