playgrdstar commited on
Commit
54d55c0
1 Parent(s): f5a0148

Add app.py and requirements.txt

Browse files
Files changed (2) hide show
  1. app.py +228 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, requests
2
+ import gradio as gr
3
+
4
+
5
+ ### This code loads the models and undertakes inference locally ###
6
+
7
+ # from transformers import GPTNeoForCausalLM, GPT2Tokenizer
8
+ # from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
9
+ # model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
10
+ # tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
11
+ # tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
12
+ # model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-small")
13
+
14
+ model_list = ['google/flan-t5-small', 'google/flan-t5-base', 'google/flan-t5-large', 'google/flan-t5-xl', 'google/flan-t5-xxl',
15
+ 'gpt2-medium', 'gpt2-large', 'gpt2-xl',
16
+ 'EleutherAI/gpt-neo-1.3B', 'EleutherAI/gpt-neo-2.7B', 'EleutherAI/gpt-neo-6b', 'EleutherAI/gpt-neox-20b',
17
+ 'bigscience/bloom-1b7', 'bigscience/bloom-3b', 'bigscience/bloom-7b1'
18
+ ]
19
+
20
+ def load_model(model_name):
21
+ if model_name == 'EleutherAI/gpt-neo-2.7B' or model_name == 'gpt2-medium' or model_name == 'gpt2-large':
22
+ model = AutoModelForCausalLM.from_pretrained(model_name)
23
+ else:
24
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
25
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
26
+ tokenizer.pad_token = tokenizer.eos_token
27
+ # tokenizer.padding_side = "left"
28
+ return model, tokenizer
29
+
30
+ def maybe_is_truncated(s):
31
+ punct = [".", "!", "?", '"']
32
+ if s[-1] in punct:
33
+ return False
34
+ return True
35
+
36
+ def load_and_generate(model_name, prompt):
37
+ model, tokenizer = load_model(model_name)
38
+
39
+ temperature=0.25
40
+ tokens = tokenizer(prompt, return_tensors="pt")
41
+ max_length = len(tokens.input_ids[0])+5
42
+ input_ids = tokens.input_ids
43
+ attention_mask = tokens.attention_mask
44
+ # see huggingface.co/docs/transformers/main_classes/text_generation
45
+ gen_tokens = model.generate(
46
+ input_ids=input_ids,
47
+ attention_mask=attention_mask,
48
+ pad_token_id=tokenizer.eos_token_id,
49
+ do_sample=True,
50
+ temperature=temperature,
51
+ # max_length=max_length,
52
+ max_new_tokens=max_length,
53
+ # use_cache=False,
54
+ # penalty_alpha=0.1,
55
+ # top_k=100,
56
+ # early_stopping=False
57
+ )
58
+ gen_text = tokenizer.batch_decode(gen_tokens)[0]
59
+
60
+ max_times = 20
61
+ while maybe_is_truncated(gen_text) and max_times > 0:
62
+ tokens = tokenizer(gen_text, return_tensors="pt")
63
+ max_length = len(tokens.input_ids[0])+5
64
+ input_ids = tokens.input_ids
65
+ attention_mask = tokens.attention_mask
66
+
67
+ gen_tokens = model.generate(
68
+ input_ids=input_ids,
69
+ attention_mask=attention_mask,
70
+ pad_token_id=tokenizer.eos_token_id,
71
+ do_sample=True,
72
+ temperature=temperature,
73
+ max_length=max_length,
74
+ # max_new_tokens=100,
75
+ # use_cache=True,
76
+ # penalty_alpha=0.1,
77
+ # top_k=100,
78
+ # early_stopping=False
79
+ )
80
+
81
+ gen_text = tokenizer.batch_decode(gen_tokens)[0]
82
+
83
+ max_times -= 1
84
+
85
+ return gen_text.replace("<pad>", "").replace("</s>", "")
86
+
87
+
88
+ ### This code for the inference api ###
89
+
90
+ def generate_from_api(query, model_name, temperature, max_tokens):
91
+ headers = {f"Authorization": f"Bearer {HF_READ_API_KEY}",
92
+ "wait_for_model": "true",
93
+ "temperature": str(temperature),
94
+ "max_tokens": str(max_tokens),
95
+ "max_time": str(120)}
96
+
97
+ model_api_url = f"https://api-inference.huggingface.co/models/{model_name}"
98
+
99
+ payload = {"inputs": query}
100
+ response = requests.post(model_api_url, headers=headers, json=payload)
101
+ while response.status_code != 200:
102
+ response = requests.post(model_api_url, headers=headers, json=payload)
103
+ return response.json()[0]['generated_text']
104
+
105
+ with gr.Blocks(css='style.css') as demo:
106
+ gr.HTML("""
107
+ <div style="text-align: center; max-width: 1240px; margin: 0 auto;">
108
+ <h1 style="font-weight: 200; font-size: 20px; margin-bottom:8px; margin-top:0px;">
109
+ Different Strokes (Prompts) for Different Folks (LLMs)
110
+ </h1>
111
+ <h4 style="font-weight: 50; font-size: 14px; margin-bottom:0px; margin-top:0px;">
112
+ After reading <a href="https://github.com/dair-ai/Prompt-Engineering-Guide">Prompt Engineering Guide</a>, which is an excellent guide on prompts for large language model (LLM), specifically OpenAI's LLMs, I was interested in seeing the results with for other LLMs. Hence, did up a simple demonstration of different prompts for different popular language models of different sizes. The prompt examples are taken from the <a href="https://github.com/dair-ai/Prompt-Engineering-Guide">Prompt Engineering Guide</a>, and the LLMs that you can select below are all available on Hugging Face. If you are interested in comparing them with the prompts from OpenAI's model, you can refer to the <a href="https://github.com/dair-ai/Prompt-Engineering-Guide">Prompt Engineering Guide</a> itself.
113
+ </h4>
114
+ </div>
115
+ """)
116
+
117
+ with gr.Column(elem_id="col-container"):
118
+ with gr.Row(variant="compact"):
119
+
120
+ model_name = gr.Dropdown(
121
+ model_list,
122
+ label="Select model",
123
+ value=model_list[0],
124
+ ).style(
125
+ container=False,
126
+ )
127
+
128
+ temperature = gr.Slider(
129
+ 0.1, 100.0, value=1.0, label="Temperature",
130
+ ).style(
131
+ container=False,
132
+ )
133
+
134
+ max_tokens = gr.Slider(
135
+ 10, 250, step=1, value=100, label="Max. Tokens (in Output)",
136
+ ).style(
137
+ container=False,
138
+ )
139
+
140
+ with gr.Row(variant="compact"):
141
+ prompt = gr.Textbox(
142
+ label="Enter your prompt",
143
+ show_label=False,
144
+ # max_lines=2,
145
+ placeholder="Select your prompt below",
146
+ ).style(
147
+ container=False,
148
+ )
149
+ process = gr.Button("Generate").style(full_width=False)
150
+
151
+ with gr.Row():
152
+ output=gr.Textbox(
153
+ label="LLM Output",
154
+ show_label=True)
155
+
156
+ gr.HTML("""
157
+ <div>
158
+ <h4 style="font-weight: 50; font-size: 14px; margin-bottom:0px; margin-top:0px;">
159
+ Prompt examples. Select the prompt you would like to test, and it will appear (properly formatted) in the input box above.
160
+ </h4>
161
+ </div>
162
+ """)
163
+ with gr.Tab("Intro."):
164
+ example_set_1 = gr.Examples(label = 'Simple Prompt vs. Instruct then Prompt.',
165
+ examples=["The sky is ", "Complete the following sentence: The sky is ",],
166
+ inputs=[prompt])
167
+ example_set_2 = gr.Examples(label = 'Few Shot Prompt.',
168
+ examples=["This is awesome! // Positive\nThis is bad! // Negative\nWow that movie was rad! // Positive\nWhat a horrible show! //",],
169
+ inputs=[prompt])
170
+ example_set_3 = gr.Examples(label = 'Explicitly Specify the Instruction',
171
+ examples=["### Instruction ###\nTranslate the text below to Spanish:\nText: 'hello!'",],
172
+ inputs=[prompt])
173
+ example_set_4 = gr.Examples(label = 'Be Very Specific',
174
+ examples=["Extract the name of places in the following text.\nDesired format:\nPlace: <comma_separated_list_of_company_names>\nInput: 'Although these developments are encouraging to researchers, much is still a mystery. “We often have a black box between the brain and the effect we see in the periphery,” says Henrique Veiga-Fernandes, a neuroimmunologist at the Champalimaud Centre for the Unknown in Lisbon. “If we want to use it in the therapeutic context, we actually need to understand the mechanism.'",],
175
+ inputs=[prompt])
176
+ example_set_5 = gr.Examples(label = 'Precision',
177
+ examples=["Explain the concept of deep learning. Keep the explanation short, only a few sentences, and don't be too descriptive.", "Use 2-3 sentences to explain the concept of deep learning to a high school student."],
178
+ inputs=[prompt])
179
+ example_set_6 = gr.Examples(label = 'Focus on What LLM Should Do',
180
+ examples=["The following is an agent that recommends movies to a customer. The agent is responsible to recommend a movie from the top global trending movies. It should refrain from asking users for their preferences and avoid asking for personal information. If the agent doesn't have a movie to recommend, it should respond 'Sorry, couldn't find a movie to recommend today.'.\nCustomer: Please recommend a movie based on my interests.\nAgent:"],
181
+ inputs=[prompt])
182
+
183
+ with gr.Tab("Basics"):
184
+ example_set_7 = gr.Examples(label = 'Explain vs. Summarize',
185
+ examples=["Explain antibiotics.\nA:", "Antibiotics are a type of medication used to treat bacterial infections. They work by either killing the bacteria or preventing them from reproducing, allowing the body’s immune system to fight off the infection. Antibiotics are usually taken orally in the form of pills, capsules, or liquid solutions, or sometimes administered intravenously. They are not effective against viral infections, and using them inappropriately can lead to antibiotic resistance.\nExplain the above in one sentence:",],
186
+ inputs=[prompt])
187
+ example_set_8 = gr.Examples(label = 'Information Extraction',
188
+ examples=["Author-contribution statements and acknowledgements in research papers should state clearly and specifically whether, and to what extent, the authors used AI technologies such as ChatGPT in the preparation of their manuscript and analysis. They should also indicate which LLMs were used. This will alert editors and reviewers to scrutinize manuscripts more carefully for potential biases, inaccuracies and improper source crediting. Likewise, scientific journals should be transparent about their use of LLMs, for example when selecting submitted manuscripts.\nMention the large language model based product mentioned in the paragraph above:",],
189
+ inputs=[prompt])
190
+ example_set_9 = gr.Examples(label = 'Question and Answer',
191
+ examples=["Answer the question based on the context below. Keep the answer short and concise. Respond 'Unsure about answer' if not sure about the answer.\nContext: Teplizumab traces its roots to a New Jersey drug company called Ortho Pharmaceutical. There, scientists generated an early version of the antibody, dubbed OKT3. Originally sourced from mice, the molecule was able to bind to the surface of T cells and limit their cell-killing potential. In 1986, it was approved to help prevent organ rejection after kidney transplants, making it the first therapeutic antibody allowed for human use.\nQuestion: What was OKT3 originally sourced from?\nAnswer:",],
192
+ inputs=[prompt])
193
+ example_set_10 = gr.Examples(label = 'Text Classification',
194
+ examples=["Classify the text into neutral, negative or positive.\nText: I think the food was okay.\nSentiment:","Classify the text into neutral, negative or positive.\nText: I think the vacation is okay.\nSentiment: neutral\nText: I think the food was okay.\nSentiment:"],
195
+ inputs=[prompt])
196
+ example_set_11 = gr.Examples(label = 'Conversation',
197
+ examples=["The following is a conversation with an AI research assistant. The assistant tone is technical and scientific.\nHuman: Hello, who are you?\nAI: Greeting! I am an AI research assistant. How can I help you today?\nHuman: Can you tell me about the creation of blackholes?\nAI:", "The following is a conversation with an AI research assistant. The assistant answers should be easy to understand even by primary school students.\nHuman: Hello, who are you?\nAI: Greeting! I am an AI research assistant. How can I help you today?\nHuman: Can you tell me about the creation of black holes?\nAI: "],
198
+ inputs=[prompt])
199
+ example_set_12 = gr.Examples(label = 'Reasoning',
200
+ examples=["The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nA: ", "The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nSolve by breaking the problem into steps. First, identify the odd numbers, add them, and indicate whether the result is odd or even."],
201
+ inputs=[prompt])
202
+
203
+
204
+ with gr.Tab("Advanced"):
205
+ example_set_13 = gr.Examples(label = 'Zero Shot',
206
+ examples=["Classify the text into neutral, negative or positive.\nText: I think the vacation is okay.\nSentiment:",],
207
+ inputs=[prompt])
208
+ example_set_14 = gr.Examples(label = 'Few Shot',
209
+ examples=["The odd numbers in this group add up to an even number: 4, 8, 9, 15, 12, 2, 1.\nA: The answer is False.\n\nThe odd numbers in this group add up to an even number: 17, 10, 19, 4, 8, 12, 24.\nA: The answer is True.\n\nThe odd numbers in this group add up to an even number: 16, 11, 14, 4, 8, 13, 24.\nA: The answer is True.\n\nThe odd numbers in this group add up to an even number: 17, 9, 10, 12, 13, 4, 2.\nA: The answer is False.\n\nThe odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nA: ",],
210
+ inputs=[prompt])
211
+ example_set_15 = gr.Examples(label = 'Chain of Thought',
212
+ examples=["The odd numbers in this group add up to an even number: 4, 8, 9, 15, 12, 2, 1.\nA: Adding all the odd numbers (9, 15, 1) gives 25. The answer is False.\n\nThe odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nA:",],
213
+ inputs=[prompt])
214
+ example_set_16 = gr.Examples(label = 'Zero Shot Chain of Thought',
215
+ examples=["I went to the market and bought 10 apples. I gave 2 apples to the neighbor and 2 to the repairman. I then went and bought 5 more apples and ate 1. How many apples did I remain with?\nLet's think step by step.",],
216
+ inputs=[prompt])
217
+ example_set_17 = gr.Examples(label = 'Self Consistency',
218
+ examples=["Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done,there will be 21 trees. How many trees did the grove workers plant today?\nA: We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted.\nSo, they must have planted 21 - 15 = 6 trees. The answer is 6.\n\nQ: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\nA: There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.\n\nQ: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\nA: She bought 5 bagels for $3 each. This means she spent 5\n\nQ: When I was 6 my sister was half my age. Now I’m 70 how old is my sister?\nA:",],
219
+ inputs=[prompt])
220
+ example_set_18 = gr.Examples(label = 'Generating Knowledge',
221
+ examples=["Input: Greece is larger than mexico.\nKnowledge: Greece is approximately 131,957 sq km, while Mexico is approximately 1,964,375 sq km, making Mexico 1,389% larger than Greece.\n\nInput: Glasses always fog up.\nKnowledge: Condensation occurs on eyeglass lenses when water vapor from your sweat, breath, and ambient humidity lands on a cold surface, cools, and then changes into tiny drops of liquid, forming a film that you see as fog. Your lenses will be relatively cool compared to your breath, especially when the outside air is cold.\n\nInput: A fish is capable of thinking.\nKnowledge: Fish are more intelligent than they appear. In many areas, such as memory, their cognitive powers match or exceed those of ’higher’ vertebrates including non-human primates. Fish’s long-term memories help them keep track of complex social relationships.\n\nInput: A common effect of smoking lots of cigarettes in one’s lifetime is a higher than normal chance of getting lung cancer.\nKnowledge: Those who consistently averaged less than one cigarette per day over their lifetime had nine times the risk of dying from lung cancer than never smokers. Among people who smoked between one and 10 cigarettes per day, the risk of dying from lung cancer was nearly 12 times higher than that of never smokers.\n\nInput: Part of golf is trying to get a higher point total than others.\nKnowledge:",],
222
+ inputs=[prompt])
223
+
224
+ # process.click(load_and_generate, inputs=[model_name, prompt], outputs=[output])
225
+ process.click(generate_from_api, inputs=[prompt, model_name, temperature, max_tokens], outputs=[output])
226
+
227
+ demo.launch(server_port=8080)
228
+ # demo.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ accelerate