File size: 1,799 Bytes
2abe29a
488dbc6
2abe29a
6131110
2abe29a
 
 
488dbc6
2abe29a
 
488dbc6
2abe29a
 
488dbc6
2abe29a
 
 
 
 
 
 
 
 
 
488dbc6
2abe29a
 
 
 
 
 
488dbc6
 
2abe29a
 
488dbc6
2abe29a
488dbc6
2abe29a
 
488dbc6
2abe29a
 
 
 
488dbc6
2abe29a
 
 
 
 
6131110
2abe29a
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from transformers import pipeline
# from transformers import T5Tokenizer, T5ForConditionalGeneration
import gradio as gr

def pipe(input_text):
    # tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
    # model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")

    # input_text = "reword for clarity" + input_text
    # input_ids = tokenizer(input_text, return_tensors="pt").input_ids

    # outputs = model.generate(input_ids)
    # return tokenizer.decode(outputs[0])

    # Use a pipeline as a high-level helper
    model = pipeline(
        task='question-answering',
        model="mistralai/Mistral-7B-Instruct-v0.3",
    )
    output = model(
        question="reword for clarity",
        context=input_text,
    )
    return output["answer"]

demo = gr.Interface(
    fn=pipe,
    inputs=gr.Textbox(lines=7),
    outputs="text",
)
demo.launch()


# # pip install -q transformers
# from transformers import AutoModelForSeq2SeqLM, AutoTokenizer

# checkpoint = "CohereForAI/aya-101"

# tokenizer = AutoTokenizer.from_pretrained(checkpoint)
# aya_model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)

# def generator(input_text):
#     inputs = tokenizer.encode("Translate to English: " + input_text, return_tensors="pt")
#     outputs = aya_model.generate(inputs, max_new_tokens=128)
#     return tokenizer.decode(outputs[0])

# # # Turkish to English translation
# # tur_inputs = tokenizer.encode("Translate to English: Aya cok dilli bir dil modelidir.", return_tensors="pt")
# # tur_outputs = aya_model.generate(tur_inputs, max_new_tokens=128)
# # print(tokenizer.decode(tur_outputs[0]))
# # # Aya is a multi-lingual language model

# demo = gr.Interface(
#     fn=generator,
#     inputs=gr.Textbox(lines=7),
#     outputs="text",
# )
# demo.launch()