Abinaya Mahendiran
Updated import
13889d9
raw
history blame
1.26 kB
""" Script for streamlit demo
@author: AbinayaM02
"""
# Install necessary libraries
from transformers import AutoTokenizer, GPT2LMHeadModel, pipeline
import streamlit as st
from pprint import pprint
import json
# Read the config
with open("config.json") as f:
cfg = json.loads(f.read())
# Set page layout
st.set_page_config(layout="wide")
# Load the model
@st.cache(allow_output_mutation=True)
def load_model():
tokenizer = AutoTokenizer.from_pretrained(cfg["model_name_or_path"])
model = GPT2LMHeadModel.from_pretrained(cfg["model_name_or_path"])
generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
return generator, tokenizer
with st.spinner('Loading model...'):
generator, tokenizer = load_model()
# st.image("images/chef-transformer.png", width=400)
st.header("Tamil Language Demos")
st.markdown(
"This demo uses [GPT2 trained on Oscar dataset](https://huggingface.co/flax-community/gpt-2-tamil) "
"to show language generation and other downstream tasks"
)
img = st.sidebar.image("images/tamil_logo.png", width=100)
add_text_sidebar = st.sidebar.title("Select demo:")
sampling_mode = st.sidebar.selectbox("select a demo", index=0, options=["Text Generation", "Text Classification"])