Spaces:
Sleeping
Sleeping
from transformers import pipeline | |
# from transformers import T5Tokenizer, T5ForConditionalGeneration | |
import gradio as gr | |
def pipe(input_text): | |
# tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base") | |
# model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base") | |
# input_text = "reword for clarity" + input_text | |
# input_ids = tokenizer(input_text, return_tensors="pt").input_ids | |
# outputs = model.generate(input_ids) | |
# return tokenizer.decode(outputs[0]) | |
# Use a pipeline as a high-level helper | |
model = pipeline( | |
task='question-answering', | |
model="mistralai/Mistral-7B-Instruct-v0.3", | |
) | |
output = model( | |
question="reword for clarity", | |
context=input_text, | |
) | |
return output["answer"] | |
demo = gr.Interface( | |
fn=pipe, | |
inputs=gr.Textbox(lines=7), | |
outputs="text", | |
) | |
demo.launch() | |
# # pip install -q transformers | |
# from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
# checkpoint = "CohereForAI/aya-101" | |
# tokenizer = AutoTokenizer.from_pretrained(checkpoint) | |
# aya_model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) | |
# def generator(input_text): | |
# inputs = tokenizer.encode("Translate to English: " + input_text, return_tensors="pt") | |
# outputs = aya_model.generate(inputs, max_new_tokens=128) | |
# return tokenizer.decode(outputs[0]) | |
# # # Turkish to English translation | |
# # tur_inputs = tokenizer.encode("Translate to English: Aya cok dilli bir dil modelidir.", return_tensors="pt") | |
# # tur_outputs = aya_model.generate(tur_inputs, max_new_tokens=128) | |
# # print(tokenizer.decode(tur_outputs[0])) | |
# # # Aya is a multi-lingual language model | |
# demo = gr.Interface( | |
# fn=generator, | |
# inputs=gr.Textbox(lines=7), | |
# outputs="text", | |
# ) | |
# demo.launch() | |