|
|
|
"""scratchpad |
|
|
|
Automatically generated by Colaboratory. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/notebooks/empty.ipynb |
|
""" |
|
|
|
pip install -q datasets transformers |
|
|
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("tau/scrolls", "qmsum") |
|
|
|
dataset |
|
|
|
!pip install clean-text[gpl] -q |
|
from cleantext import clean |
|
|
|
train_df = dataset["train"].to_pandas().convert_dtypes() |
|
val_df = dataset["validation"].to_pandas().convert_dtypes() |
|
test_df = dataset["test"].to_pandas().convert_dtypes() |
|
|
|
from tqdm.auto import tqdm |
|
|
|
tqdm.pandas() |
|
|
|
train_df["input"] = train_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) |
|
val_df["input"] = val_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) |
|
test_df["input"] = test_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) |
|
|
|
train_df["output"] = train_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) |
|
val_df["output"] = val_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) |
|
test_df["output"] = test_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True) |
|
|
|
import re |
|
import re |
|
def fix_punct_whitespace(text: str) -> str: |
|
|
|
text = re.sub(r"([a-zA-Z])\s?'\s?([a-zA-Z])", r"\1'\2", text) |
|
|
|
|
|
text = re.sub(r"\s+([.,;:!?])", r"\1", text) |
|
|
|
|
|
text = re.sub(r"([.,;:!?])(?=[^\s])", r"\1 ", text) |
|
|
|
|
|
text = re.sub(r"\s?\(\s?", r" (", text) |
|
text = re.sub(r"\s?\)\s?", r")", text) |
|
|
|
|
|
|
|
text = re.sub(r"\)(?=[^\s.,;:!?])", r") ", text) |
|
|
|
|
|
text = re.sub(r'\s?"', r'"', text) |
|
text = re.sub(r'"\s?', r'" ', text) |
|
|
|
|
|
text = re.sub(r"\s?'", r"'", text) |
|
text = re.sub(r"'\s?", r"' ", text) |
|
|
|
|
|
text = re.sub(r"(\d),\s+(\d)", r"\1,\2", text) |
|
|
|
return text.replace("' ", "'") |
|
|
|
train_df["input"] = train_df["input"].progress_apply(fix_punct_whitespace) |
|
val_df["input"] = val_df["input"].progress_apply(fix_punct_whitespace) |
|
test_df["input"] = test_df["input"].progress_apply(fix_punct_whitespace) |
|
|
|
train_df["output"] = train_df["output"].progress_apply(fix_punct_whitespace) |
|
val_df["output"] = val_df["output"].progress_apply(fix_punct_whitespace) |
|
test_df["output"] = test_df["output"].progress_apply(fix_punct_whitespace) |
|
|
|
train_df.head(2) |
|
|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("pszemraj/long-t5-tglobal-xl-16384-book-summary-8bit") |
|
|
|
def get_token_count(text:str): |
|
if len(text) < 1: |
|
return 0 |
|
else: |
|
return len(tokenizer.encode(text, truncation=False, padding=False)) |
|
|
|
get_token_count("ayyy waddup my g") |
|
|
|
train_df["input_token_count"] = train_df["input"].progress_apply(get_token_count) |
|
val_df["input_token_count"] = val_df["input"].progress_apply(get_token_count) |
|
test_df["input_token_count"] = test_df["input"].progress_apply(get_token_count) |
|
|
|
train_df["output_token_count"] = train_df["output"].progress_apply(get_token_count) |
|
val_df["output_token_count"] = val_df["output"].progress_apply(get_token_count) |
|
test_df["output_token_count"] = test_df["output"].progress_apply(get_token_count) |
|
|
|
|
|
|
|
train_df.describe() |
|
|
|
"""# New Section""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
!pip install -U -q transformers accelerate |
|
from huggingface_hub import notebook_login |
|
notebook_login() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from pathlib import Path |
|
|
|
target = Path.cwd() / 'qmsum-cleaned' |
|
target.exists() |
|
|
|
train_df.to_parquet(target / 'train.parquet') |
|
val_df.to_parquet(target / 'validation.parquet') |
|
test_df.to_parquet(target/ 'test.parquet') |
|
!ls $target |
|
|
|
|
|
|
|
!git pull |
|
!git lfs install && git lfs track *.parquet |
|
!git add . && git commit -a -m add_cleaned |
|
!git push |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|