# -*- coding: utf-8 -*- """scratchpad Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/notebooks/empty.ipynb """ pip install -q datasets transformers from datasets import load_dataset dataset = load_dataset("tau/scrolls", "qmsum") dataset !pip install clean-text[gpl] -q from cleantext import clean train_df = dataset["train"].to_pandas().convert_dtypes() val_df = dataset["validation"].to_pandas().convert_dtypes() test_df = dataset["test"].to_pandas().convert_dtypes() from tqdm.auto import tqdm tqdm.pandas() train_df["input"] = train_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) val_df["input"] = val_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) test_df["input"] = test_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) train_df["output"] = train_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) val_df["output"] = val_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) test_df["output"] = test_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) import re import re def fix_punct_whitespace(text: str) -> str: # Fix spaces around apostrophes text = re.sub(r"([a-zA-Z])\s?'\s?([a-zA-Z])", r"\1'\2", text) # Remove spaces before punctuation marks (except for parentheses) text = re.sub(r"\s+([.,;:!?])", r"\1", text) # Add a space after punctuation marks (except for parentheses) if missing text = re.sub(r"([.,;:!?])(?=[^\s])", r"\1 ", text) # Handle spaces around parentheses text = re.sub(r"\s?\(\s?", r" (", text) text = re.sub(r"\s?\)\s?", r")", text) # Add a space after a closing parenthesis if: # followed by a word or opening parenthesis text = re.sub(r"\)(?=[^\s.,;:!?])", r") ", text) # Handle spaces around quotation marks text = re.sub(r'\s?"', r'"', text) text = re.sub(r'"\s?', r'" ', text) # Handle spaces around single quotes text = re.sub(r"\s?'", r"'", text) text = re.sub(r"'\s?", r"' ", text) # Handle comma in numbers text = re.sub(r"(\d),\s+(\d)", r"\1,\2", text) return text.replace("' ", "'") train_df["input"] = train_df["input"].progress_apply(fix_punct_whitespace) val_df["input"] = val_df["input"].progress_apply(fix_punct_whitespace) test_df["input"] = test_df["input"].progress_apply(fix_punct_whitespace) train_df["output"] = train_df["output"].progress_apply(fix_punct_whitespace) val_df["output"] = val_df["output"].progress_apply(fix_punct_whitespace) test_df["output"] = test_df["output"].progress_apply(fix_punct_whitespace) train_df.head(2) from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("pszemraj/long-t5-tglobal-xl-16384-book-summary-8bit") def get_token_count(text:str): if len(text) < 1: return 0 else: return len(tokenizer.encode(text, truncation=False, padding=False)) get_token_count("ayyy waddup my g") train_df["input_token_count"] = train_df["input"].progress_apply(get_token_count) val_df["input_token_count"] = val_df["input"].progress_apply(get_token_count) test_df["input_token_count"] = test_df["input"].progress_apply(get_token_count) train_df["output_token_count"] = train_df["output"].progress_apply(get_token_count) val_df["output_token_count"] = val_df["output"].progress_apply(get_token_count) test_df["output_token_count"] = test_df["output"].progress_apply(get_token_count) train_df.describe() """# New Section""" # Commented out IPython magic to ensure Python compatibility. # %%bash # curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash # apt-get install git-lfs -q # git lfs install !pip install -U -q transformers accelerate from huggingface_hub import notebook_login notebook_login() # Commented out IPython magic to ensure Python compatibility. # %%bash # # git lfs install # git clone https://huggingface.co/datasets/pszemraj/qmsum-cleaned from pathlib import Path target = Path.cwd() / 'qmsum-cleaned' target.exists() train_df.to_parquet(target / 'train.parquet') val_df.to_parquet(target / 'validation.parquet') test_df.to_parquet(target/ 'test.parquet') !ls $target # Commented out IPython magic to ensure Python compatibility. # %cd $target !git pull !git lfs install && git lfs track *.parquet !git add . && git commit -a -m add_cleaned !git push # %cd .. # Commented out IPython magic to ensure Python compatibility. # %%bash # git config --global user.email "you@example.com" # git config --global user.name "colab"