File size: 3,835 Bytes
5eceee5
 
9ecda95
 
 
 
 
 
 
 
 
cdc5960
9ecda95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0915ca6
9ecda95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cdc5960
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import gradio as gr


import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig

from transformers import GPT2TokenizerFast,GPT2LMHeadModel
tokenizer = GPT2TokenizerFast.from_pretrained("AlexWortega/instruct_rugptlarge")
special_tokens_dict = {'additional_special_tokens': ['<code>', '</code>', '<instructionS>', '<instructionE>', '<next>']}

tokenizer.add_special_tokens(special_tokens_dict)
device = 'cpu' # мэх дорога
model = GPT2LMHeadModel.from_pretrained("AlexWortega/instruct_rugptlarge")
#

model.resize_token_embeddings(len(tokenizer))

def generate_prompt(instruction, input=None):
    if input:
        return f"{input}:"
    return f"{instruction}"

def generate_seqs(q, temp, topp, topk, nb, maxtok):
    k=1
    gen_kwargs = {
        "min_length": 20,
        "max_new_tokens": maxtok,
        "top_k": topk,
        "top_p": topp,
        "do_sample": True,  
        "early_stopping": True,
        "no_repeat_ngram_size": 2,
        "temperature":temp,
        
        "eos_token_id": tokenizer.eos_token_id,
        "pad_token_id": tokenizer.eos_token_id,
        "use_cache": True,
        "repetition_penalty": 1.5,  
        "length_penalty": 0.8,  
        "num_beams": nb,
        "num_return_sequences": k
    }
    if len(q)>0:
        q = q + '<instructionS>'
    else:
        q = 'Как зарабатывать денег на нейросетях ?' + '<instructionS>'
    t = tokenizer.encode(q, return_tensors='pt').to(device)
    g = model.generate(t, **gen_kwargs)
    generated_sequences = tokenizer.batch_decode(g, skip_special_tokens=False)
    #print(generated_sequences)
    # Add </s></s>A: after the question and before each generated sequence
    #sequences = [f"H:{q}</s></s>A:{s.replace(q, '')}" for s in generated_sequences]

    # Compute the reward score for each generated sequence
    #cores = [reward_model.reward_score(q, s.split('</s></s>A:')[-1]) for s in sequences]

    # Return the k sequences with the highest score and their corresponding scores
    # results = [(s, score) for score, s in sorted(zip(scores, sequences), reverse=True)[:k]]
    ans = generated_sequences[0].replace('<instructionS>','\n').replace('<instructionE>','').replace('<|endoftext|>','')
    return ans

description_html = '''
<p>Обучена на 2v100, коллективом авторов:</p>
<ul>
    <li><a href="https://t.me/YallenGusev" target="_blank">@YallenGusev</a></li>
    <li><a href="https://t.me/lovedeathtransformers" target="_blank">@lovedeathtransformers</a></li>
    <li><a href="https://t.me/alexkuk" target="_blank">@alexkuk</a></li>
    <li><a href="https://t.me/chckdskeasfsd" target="_blank">@chckdskeasfsd</a></li>
    <li><a href="https://t.me/dno5iq" target="_blank">@dno5iq</a></li>
</ul>
'''

g = gr.Interface(
    fn=generate_seqs,
    inputs=[
        gr.components.Textbox(
            lines=2, label="Впишите сюда задачу, а я попробую решить", placeholder="Как зарабатывать денег на нейросетях?"
        ),
        #gr.components.Textbox(lines=2, label="Вход", placeholder="Нет"),
        gr.components.Slider(minimum=0.1, maximum=2, value=1.0, label="Temperature"),
        gr.components.Slider(minimum=0, maximum=1, value=0.9, label="Top p"),
        gr.components.Slider(minimum=0, maximum=100, value=50, label="Top k"),
        gr.components.Slider(minimum=0, maximum=5, step=1, value=4, label="Beams"),
        gr.components.Slider(
            minimum=1, maximum=256, step=1, value=100, label="Max tokens"
        ),
    ],
    outputs=[
        gr.inputs.Textbox(
            lines=5,
            label="Output",
        )
    ],
    title="ruInstructlarge",
    description=description_html)


g.queue(concurrency_count=5)
g.launch()