treadon commited on
Commit
eb0afed
1 Parent(s): e128669
Files changed (1) hide show
  1. app.py +34 -3
app.py CHANGED
@@ -1,8 +1,39 @@
1
 
2
  import gradio as gr
 
 
3
 
4
- def greet(name):
5
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
6
 
7
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  iface.launch()
 
1
 
2
  import gradio as gr
3
+ #import peft
4
+ import transformers
5
 
6
+ device = "cpu"
7
+ is_peft = False
8
+ model_id = "treadon/promt-fungineer-355M"
9
+ # if is_peft:
10
+ # config = peft.PeftConfig.from_pretrained(model_id)
11
+ # model = transformers.AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, low_cpu_mem_usage=True)
12
+ # tokenizer = transformers.AutoTokenizer.from_pretrained(config.base_model_name_or_path)
13
+ # model = peft.PeftModel.from_pretrained(model, model_id)
14
+ # else:
15
+ model = transformers.AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True)
16
+ tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2")
17
 
18
+ def generate_text(prompt):
19
+
20
+ if not prompt.startswith("BRF:"):
21
+ prompt = "BRF: " + prompt
22
+
23
+ model.eval()
24
+ # SOFT SAMPLE
25
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
26
+ samples = []
27
+ try:
28
+ for i in range(1):
29
+ outputs = model.generate(**inputs, max_length=256, do_sample=True, top_k=100, top_p=0.95, temperature=0.85, num_return_sequences=4, pad_token_id=tokenizer.eos_token_id)
30
+ for output in outputs:
31
+ sample = tokenizer.decode(output, skip_special_tokens=True)
32
+ samples.append(sample)
33
+ except Exception as e:
34
+ print(e)
35
+
36
+ return samples
37
+
38
+ iface = gr.Interface(fn=generate_text, inputs="text", outputs=("text","text","text","text") )
39
  iface.launch()