import streamlit as st def app(): # st.title("About") st.markdown("

About

", unsafe_allow_html=True) st.markdown("""## Introduction""") st.markdown( """**RoBERTa-hindi** is one of the many projects in the Flax/JAX community week organized by HuggingFace in collaboration with Google to make compute-intensive projects more practicable.""" ) st.markdown( """It is a monolingual transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts.""" ) st.markdown("""## Datasets used""") st.markdown( """RoBERTa-Hindi has been pretrained on a huge corpus consisting of multiple datasets. The entire list of datasets used is mentioned below : """ ) st.markdown( """ 1. OSCAR 2. mC4 3. Indic-glue 4. Hindi-wikipedia-articles-172k 5. Hindi-text-short-summarization corpus 6. Hindi-text-short-and-large-summarization corpus 7. Oldnewspaperhindi 8. Samanantar """ ) st.markdown( """ ***NOTE: Some of the datasets are readily available on the HuggingFace Datasets while the team developed the rest as per the docs.*** """ )