File size: 2,926 Bytes
1c0e302
075341e
1c0e302
 
4671f72
1c0e302
 
 
 
 
 
 
4671f72
1c0e302
 
 
 
 
 
 
 
 
 
075341e
1c0e302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148548f
 
5f6f93d
 
1c0e302
 
 
 
66bd4f7
1c0e302
 
 
 
 
 
 
1578b0f
1c0e302
 
3817432
 
1c0e302
c8563aa
d799106
1c0e302
 
e9c890c
40ac173
 
0555380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import pathlib
import gradio as gr
import transformers
from transformers import AutoTokenizer
from transformers import AutoModelForCausalLM 
from transformers import GenerationConfig
from typing import List, Dict, Union
from typing import Any, TypeVar

Pathable = Union[str, pathlib.Path]

def load_model(name: str) -> Any:
    return AutoModelForCausalLM.from_pretrained(name)

def load_tokenizer(name: str) -> Any:
    return AutoTokenizer.from_pretrained(name)

def create_generator():
    return GenerationConfig(
    temperature=1.0,
    top_p=0.75,
    num_beams=4,
)
    
def generate_prompt(instruction, input=None):
    if input:
        return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.

### Instruction:
{instruction}

### Input:
{input}

### Response:"""
    else:
        return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.

### Instruction:
{instruction}

### Response:"""

# model= load_model(name = 's3nh/pythia-410m-70k-steps-self-instruct-polish')
# tokenizer = load_tokenizer(name = 's3nh/pythia-410m-70k-steps-self-instruct-polish')
generation_config = create_generator()


def evaluate(instruction, input=None):
    prompt = generate_prompt(instruction, input)
    inputs = tokenizer(prompt, return_tensors="pt")
    input_ids = inputs["input_ids"]
    generation_output = model.generate(
        input_ids=input_ids,
        generation_config=generation_config,
        return_dict_in_generate=True,
        output_scores=True,
        max_new_tokens=256
    )
    result = []
    for s in generation_output.sequences:
        output = tokenizer.decode(s)
        result.append( output.split("### Response:")[1].strip())
    return ' '.join(el for el in result)            

def inference(text, input):
    output = evaluate(instruction = text, input = input)
    return output

def choose_model(name):
    return load_model(name), load_tokenizer(name)

demo = gr.Blocks()

with demo:

        
    name =  gr.inputs.Dropdown(["s3nh/pythia-1.4b-deduped-16k-steps-self-instruct-polish", "s3nh/pythia-410m-91k-steps-self-instruct-polish"], "value", label= "output")
    model = load_model(name)
    tokenizer = load_tokenizer(name)
    
    io = gr.Interface(
        inference, 
        inputs = [gr.Textbox(
            lines = 3,
            max_lines = 10, 
            placeholder = "Add question here", 
            interactive = True, 
            show_label = False
        ), 
        gr.Textbox(
            lines = 3, 
            max_lines = 10, 
            placeholder = "Add context here", 
            interactive  = True, 
            show_label = False
        )],
        outputs = [gr.Textbox(lines = 1, label = 'Pythia410m', interactive = False)],
        cache_examples = False, 
    )

demo.launch()