import streamlit as st import datasets import os import json from transformers import AutoTokenizer import ast CACHE_DIR = "cache_ds/" #Use this to build the dataset contribution_json = "contributors.json" contribution_dict = json.load(open(contribution_json,"r")) splits = ['EuroParliamentProceedings', 'TED2020', 'PileOfLaw', 'StackExchange_ver2', 'GithubIssues', 'Opensubtitles', 'USPTO', 'S2ORC', 'DevDocs', 'CodePileReddit2022', 'DMMath', 'Gutenberg', 'USENET', 'GithubDiff', 'Enwiki', 'GNOME', 'ASFPublicMail', 'PileV2Reddit2020', 'CodePilePosts', 'Discourse', 'Tanzil', 'arXiv', 'UbuntuIRC', 'PubMed', 'CodePileReddit2020', 'CodePileReddit2021', 'GlobalVoices', 'FreeLaw_Options', 'PileV2Posts','Bible'] cached_ds = os.listdir(CACHE_DIR) tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b') def load_page(split): with st.spinner('Downloading and buidling dataset...'): if split not in cached_ds: ds = datasets.load_dataset('CarperAI/pile-v2-small-filtered',"train", data_files="data/"+split+"/data.json") else: ds = datasets.load_from_disk(CACHE_DIR+split) print("Sucessfully loaded "+split) st.title("Dataset Explorer") st.write(f"# {split}") st.caption(f"Contributors: {','.join(contribution_dict[split])}") with st.form("dataset_form"): index = st.slider('Select a row', 0, len(ds['train'])-1, 0) if st.form_submit_button("Load"): st.write(f"Row {index}") data = ds['train'][index] content = data["text"] meta = data["meta"] with st.expander("Render Content"): st.write(content) st.write("### Content:") st.text(content) st.write("### Meta:") st.write(ast.literal_eval(meta)) tokenized = tokenizer(content, return_length=True)['length'][0] token_count_metric = st.metric("Token Count",value=tokenized,delta=2048-tokenized) demo_name = st.sidebar.selectbox("Choose a demo", splits) load_page(demo_name) # st.write(f"Loaded {ds} with {len(dataset['train'])} rows") # st.sidebar.title('Pile v2 Explorer') # split = st.sidebar.selectbox('Select a split', splits) # st.sidebar.write('You selected:', split) # dataset = datasets.load_dataset('CarperAI/pile-v2-small-filtered', data_dir="data/"+split+"/data.json") # index = st.sidebar.slider('Select a row', 0, len(dataset['train'])-1, 0) # st.write(dataset['train'][index]['text'])