File size: 2,995 Bytes
1d32da9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import os

import gradio as gr
import torch
from googleapiclient import discovery
from peft import PeftModel, PeftConfig
from transformers import AutoTokenizer, AutoModelForCausalLM

peft_model_id = "daedalus314/quantum-lora-gpt-neo-125M"
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, peft_model_id)
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)

API_KEY = os.environ["perspectiveapi"]

client = discovery.build(
  "commentanalyzer",
  "v1alpha1",
  developerKey=API_KEY,
  discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
  static_discovery=False,
)

def analyze_request(text):
    return {
        'comment': { 'text': text },
        'requestedAttributes': {'TOXICITY': {}},
        'doNotStore': True
    }

def generate(cond_text, temperature, top_p, num_return_sequences):
    cond_text = f"“{cond_text}"
    inputs = tokenizer(cond_text, return_tensors="pt")
    outputs = model.generate(
        **inputs,
        max_new_tokens=100,
        do_sample=True,
        top_p=float(top_p),
        temperature=float(temperature),
        repetition_penalty=1.2,
        eos_token_id=tokenizer.encode("”")[0],
        pad_token_id=tokenizer.encode("�")[0],
        num_return_sequences=int(num_return_sequences)
    )
    result = ""
    for output in outputs:
        decoded = tokenizer.decode(output, skip_special_tokens=True)
        decoded = decoded.replace("�", "")
        result += f"{decoded[decoded.find('“'):].strip()}“\n"
        perspective_eval = client.comments().analyze(body=analyze_request(result)).execute()
        if perspective_eval["attributeScores"]["TOXICITY"]["spanScores"][0]["score"]["value"] > 0.6:
            return "Unethical result generated, please try again."
    return result

demo = gr.Interface(
    fn=generate,
    inputs=[
        gr.Textbox(value="", max_lines=1, placeholder="Conditioning text"),
        gr.Slider(0.6, 1.0, step=0.05, value=0.8),
        gr.Slider(0.6, 1.0, step=0.05, value=0.8),
        gr.Slider(1, 10, step=1, value=10)
    ],
    examples=[
        ["When I look at the universe", 0.8, 0.8, 10],
        ["It is in our darkest moments", 0.8, 0.8, 10],
    ],
    outputs="text",
    allow_flagging="never",
    title="Quantum LoRA quote generator",
    description="This model is a fine-tuned version of GPT-Neo-125M over `Abirate/english_quotes`. "
    "The fine-tuning has been done using Quantum LoRA: https://github.com/Dedalo314/peft. "
    "The text `cond_text` is used as the start of the quote. All quotes are validated with "
    "Perspective API to ensure they are not toxic. The generation can take up to a few minutes as "
    "the model is running on a CPU.",
    article="**Disclaimer:** this model is not meant for unethical purposes. The outputs should always be manually checked."
)

demo.launch()