import requests import streamlit as st import time from transformers import pipeline import os from .utils import query HF_AUTH_TOKEN = os.getenv('HF_AUTH_TOKEN') headers = {"Authorization": f"Bearer {HF_AUTH_TOKEN}"} def write(): st.markdown("# Text Paraphrasing") st.sidebar.header("Text Paraphrasing") st.write( """Here, you can paraphrase your text using the fine-tuned TURNA paraphrasing models. """ ) # Sidebar # Taken from https://huggingface.co/spaces/flax-community/spanish-gpt2/blob/main/app.py st.sidebar.subheader("Configurable parameters") model_name = st.sidebar.selectbox( "Model Selector", options=[ "turna_paraphrasing_tatoeba", "turna_paraphrasing_opensubtitles", ], index=0, ) max_new_tokens = st.sidebar.number_input( "Maximum length", min_value=0, max_value=20, value=20, help="The maximum length of the sequence to be generated.", ) input_text = st.text_area(label='Enter a text: ', height=100, value="Kalp krizi geçirenlerin yaklaşık üçte birinin kısa bir süre önce grip atlattığı düşünülüyor. ") url = ("https://api-inference.huggingface.co/models/boun-tabi-LMG/" + model_name.lower()) params = {"max_new_tokens": max_new_tokens } if st.button("Generate"): with st.spinner('Generating...'): output = query(input_text, url, params) st.success(output)