File size: 3,767 Bytes
268b9c8
6dba677
 
 
268b9c8
 
 
dad54e8
 
268b9c8
 
 
 
 
6dba677
268b9c8
 
7ab5c6f
268b9c8
 
 
 
 
6dba677
268b9c8
 
6bfce25
268b9c8
 
 
 
df8a0e2
268b9c8
df8a0e2
 
268b9c8
df8a0e2
 
268b9c8
dad54e8
 
 
 
 
 
 
 
 
268b9c8
 
 
 
dad54e8
37de83a
268b9c8
dad54e8
247c2f7
 
268b9c8
247c2f7
268b9c8
247c2f7
268b9c8
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForMaskedLM, AutoModelForSeq2SeqLM, AutoModelForCausalLM

BERTTokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese")
BERTModel = AutoModelForMaskedLM.from_pretrained("cl-tohoku/bert-base-japanese")

mBERTTokenizer = AutoTokenizer.from_pretrained("bert-base-multilingual-cased")
mBERTModel = AutoModelForMaskedLM.from_pretrained("bert-base-multilingual-cased")

GPT2Tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt2-medium")
GPT2Model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt2-medium")

votes=[]
def MELCHIOR(sue):#BERT
    allow=BERTTokenizer("承認").input_ids[1]
    deny=BERTTokenizer("否定").input_ids[1]
    output=BERTModel(**BERTTokenizer('MELCHIORは科学者としての人格を持っています。人間とMELCHIORの対話です。人間「'+sue+'。承認 か 否定 のどちらかで答えてください。」'+"MELCHIOR 「[MASK]」",return_tensors="pt")).logits
    BERTTokenizer.batch_decode(torch.argmax(output,-1))
    mask=output[0,-3,:]
    votes.append(1 if mask[allow]>mask[deny] else -1)
    return "承認"  if mask[allow]>mask[deny] else "否定"

def BALTHASAR(sue):#mT5
    allow=mT5Tokenizer("承認").input_ids[1]
    deny=mT5Tokenizer("否定").input_ids[1]
    encoder_output=mT5Model.encoder(**mT5Tokenizer('BALTHASARは母としての人格としての人格を持っています。人間とBALTHASARの対話です。人間「'+sue+'。承認 か 否定 のどちらかで答えてください。」'+"BALTHASAR 「<X>」",return_tensors="pt"))
    id=None
    p_answer=None
    probs=None
    i=0
    txt="<pad><X>"
    i=i+1
    probs=mT5Model(inputs_embeds=encoder_output.last_hidden_state,decoder_input_ids=mT5Tokenizer(txt,return_tensors="pt").input_ids[:,:-1]).logits[0]
    id=torch.argmax(probs[-1])
    txt=txt+mT5Tokenizer.decode(id)
    votes.append(1 if probs[-1][allow]>probs[-1][deny] else -1)
    return "承認"  if probs[-1][allow]>probs[-1][deny] else "否定"

def BALTHASAR(sue):#mBERT
    allow=mBERTTokenizer("Yes").input_ids[1]
    deny=mBERTTokenizer("No").input_ids[1]
    output=mBERTModel(**mBERTTokenizer('MELCHIORは科学者としての人格を持っています。人間とMELCHIORの対話です。人間「'+sue+'。賛成か反対か。」'+"MELCHIOR 「[MASK]」",return_tensors="pt")).logits
    mask=output[0,-3,:]
    print(mBERTTokenizer.decode(torch.argmax(output[0,-3,:])))
    votes.append(1 if mask[allow]>mask[deny] else -1)
    return "承認"  if mask[allow]>mask[deny] else "否定"


def greet(sue):
    text1="BERT-1"+MELCHIOR(sue)
    text2="GPT-2"+CASPER(sue)
    text3="mBERT-3"+BALTHASAR(sue)
    return text1+" "+text2+" "+text3+"\n___\n\n"+("|可決|" if sum(votes[-3:])>0 else "| 否決 |")+"\n___"


css="@import url('https://fonts.googleapis.com/css2?family=Shippori+Mincho:wght@800&display=swap');  .gradio-container {background-color: black} .gr-button {background-color: blue;color:black; weight:200%;font-family:'Shippori Mincho', serif;}"
css+=".block{color:orange;} ::placeholder {font-size:35%} .gr-box {text-align: center;font-size: 125%;border-color:orange;background-color: #000000;weight:200%;font-family:'Shippori Mincho', serif;}:disabled {color: orange;opacity:1.0;}"
with gr.Blocks(css=css) as demo:
    sue = gr.Textbox(label="NAGI System",placeholder="決議内容を入力")
    greet_btn = gr.Button("提訴")
    output = gr.Textbox(label="決議",placeholder="本システムは事前学習モデルのpromptにより行われています.決議結果に対して当サービス開発者は一切の責任を負いません.")
    greet_btn.click(fn=greet, inputs=sue, outputs=output)
demo.launch()