File size: 3,469 Bytes
268b9c8
6dba677
 
 
268b9c8
 
 
dad54e8
 
268b9c8
 
 
 
 
6dba677
268b9c8
 
7ab5c6f
268b9c8
 
 
 
 
6dba677
dad54e8
 
5a48be8
dad54e8
 
 
 
268b9c8
888c495
b286fa8
 
 
888c495
 
 
 
 
 
 
 
268b9c8
 
 
dad54e8
108f368
268b9c8
dad54e8
247c2f7
 
268b9c8
108f368
268b9c8
247c2f7
268b9c8
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForMaskedLM, AutoModelForSeq2SeqLM, AutoModelForCausalLM

BERTTokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese")
BERTModel = AutoModelForMaskedLM.from_pretrained("cl-tohoku/bert-base-japanese")

mBERTTokenizer = AutoTokenizer.from_pretrained("bert-base-multilingual-cased")
mBERTModel = AutoModelForMaskedLM.from_pretrained("bert-base-multilingual-cased")

GPT2Tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt2-medium")
GPT2Model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt2-medium")

votes=[]
def MELCHIOR(sue):#BERT
    allow=BERTTokenizer("承認").input_ids[1]
    deny=BERTTokenizer("否定").input_ids[1]
    output=BERTModel(**BERTTokenizer('MELCHIORは科学者としての人格を持っています。人間とMELCHIORの対話です。人間「'+sue+'。承認 か 否定 のどちらかで答えてください。」'+"MELCHIOR 「[MASK]」",return_tensors="pt")).logits
    BERTTokenizer.batch_decode(torch.argmax(output,-1))
    mask=output[0,-3,:]
    votes.append(1 if mask[allow]>mask[deny] else -1)
    return "承認"  if mask[allow]>mask[deny] else "否定"

def BALTHASAR(sue):#mT5
    allow=mBERTTokenizer("Yes").input_ids[1]
    deny=mBERTTokenizer("No").input_ids[1]
    output=mBERTModel(**mBERTTokenizer('BALTHASARは母としての人格を持っています。人間とBALTHASARの対話です。人間「'+sue+'。YesかNoか。」'+"BALTHASAR 「[MASK]」",return_tensors="pt")).logits
    mask=output[0,-3,:]
    votes.append(1 if mask[allow]>mask[deny] else -1)
    return "承認"  if mask[allow]>mask[deny] else "否定"


def CASPER(sue):#GPT2
    allow=GPT2Tokenizer("承認").input_ids[1]
    deny=GPT2Tokenizer("否定").input_ids[1]
    inpt=GPT2Tokenizer('女としての人格を持ったAI・カスパーと人間の対話です。人間「'+sue+'。これに承認か否定か。」'+"カスパー「私は,",return_tensors="pt")
    probs=GPT2Model(input_ids=inpt.input_ids[:,:-1],attention_mask=inpt.attention_mask[:,:-1]).logits[0]
    i=-1
    p_answer=probs
    id=torch.argmax(probs[i])
    votes.append(1 if probs[i][allow]>probs[i][deny] else -1)
    return "承認" if probs[i][allow]>probs[i][deny] else "否定"


def greet(sue):
    text1="BERT-1"+MELCHIOR(sue)
    text2="GPT-2"+CASPER(sue)
    text3="mBERT-3"+BALTHASAR(sue)
    return text1+" "+text2+" "+text3+"\n___\n\n"+("|可決|" if sum(votes[-3:])>0 else "| 否決 |")+"\n___"


css="@import url('https://fonts.googleapis.com/css2?family=Shippori+Mincho:wght@800&display=swap');  .gradio-container {background-color: black} .gr-button {background-color: blue;color:black; weight:200%;font-family:'Shippori Mincho', serif;}"
css+=".block{color:orange;} ::placeholder {font-size:35%} .gr-box {text-align: center;font-size: 125%;border-color:orange;background-color: #000000;weight:200%;font-family:'Shippori Mincho', serif;}:disabled {color: orange;opacity:1.0;}"
with gr.Blocks(css=css) as demo:
    sue = gr.Textbox(label="NAGI System",placeholder="決議を入力(多数決)")
    greet_btn = gr.Button("提訴")
    output = gr.Textbox(label="決議",placeholder="本システムは事前学習モデルのpromptにより行われています.決議結果に対して当サービス開発者は一切の責任を負いません.")
    greet_btn.click(fn=greet, inputs=sue, outputs=output)
demo.launch()