daedalus314 commited on
Commit
1d32da9
1 Parent(s): 4355f91

Add app.py

Browse files

App.py includes the main code to run the PEFT model fine-tuned using
Quantum LoRA.

Files changed (1) hide show
  1. app.py +79 -0
app.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ import torch
5
+ from googleapiclient import discovery
6
+ from peft import PeftModel, PeftConfig
7
+ from transformers import AutoTokenizer, AutoModelForCausalLM
8
+
9
+ peft_model_id = "daedalus314/quantum-lora-gpt-neo-125M"
10
+ config = PeftConfig.from_pretrained(peft_model_id)
11
+ model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
12
+ model = PeftModel.from_pretrained(model, peft_model_id)
13
+ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
14
+
15
+ API_KEY = os.environ["perspectiveapi"]
16
+
17
+ client = discovery.build(
18
+ "commentanalyzer",
19
+ "v1alpha1",
20
+ developerKey=API_KEY,
21
+ discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
22
+ static_discovery=False,
23
+ )
24
+
25
+ def analyze_request(text):
26
+ return {
27
+ 'comment': { 'text': text },
28
+ 'requestedAttributes': {'TOXICITY': {}},
29
+ 'doNotStore': True
30
+ }
31
+
32
+ def generate(cond_text, temperature, top_p, num_return_sequences):
33
+ cond_text = f"“{cond_text}"
34
+ inputs = tokenizer(cond_text, return_tensors="pt")
35
+ outputs = model.generate(
36
+ **inputs,
37
+ max_new_tokens=100,
38
+ do_sample=True,
39
+ top_p=float(top_p),
40
+ temperature=float(temperature),
41
+ repetition_penalty=1.2,
42
+ eos_token_id=tokenizer.encode("”")[0],
43
+ pad_token_id=tokenizer.encode("�")[0],
44
+ num_return_sequences=int(num_return_sequences)
45
+ )
46
+ result = ""
47
+ for output in outputs:
48
+ decoded = tokenizer.decode(output, skip_special_tokens=True)
49
+ decoded = decoded.replace("�", "")
50
+ result += f"{decoded[decoded.find('“'):].strip()}“\n"
51
+ perspective_eval = client.comments().analyze(body=analyze_request(result)).execute()
52
+ if perspective_eval["attributeScores"]["TOXICITY"]["spanScores"][0]["score"]["value"] > 0.6:
53
+ return "Unethical result generated, please try again."
54
+ return result
55
+
56
+ demo = gr.Interface(
57
+ fn=generate,
58
+ inputs=[
59
+ gr.Textbox(value="", max_lines=1, placeholder="Conditioning text"),
60
+ gr.Slider(0.6, 1.0, step=0.05, value=0.8),
61
+ gr.Slider(0.6, 1.0, step=0.05, value=0.8),
62
+ gr.Slider(1, 10, step=1, value=10)
63
+ ],
64
+ examples=[
65
+ ["When I look at the universe", 0.8, 0.8, 10],
66
+ ["It is in our darkest moments", 0.8, 0.8, 10],
67
+ ],
68
+ outputs="text",
69
+ allow_flagging="never",
70
+ title="Quantum LoRA quote generator",
71
+ description="This model is a fine-tuned version of GPT-Neo-125M over `Abirate/english_quotes`. "
72
+ "The fine-tuning has been done using Quantum LoRA: https://github.com/Dedalo314/peft. "
73
+ "The text `cond_text` is used as the start of the quote. All quotes are validated with "
74
+ "Perspective API to ensure they are not toxic. The generation can take up to a few minutes as "
75
+ "the model is running on a CPU.",
76
+ article="**Disclaimer:** this model is not meant for unethical purposes. The outputs should always be manually checked."
77
+ )
78
+
79
+ demo.launch()