File size: 4,673 Bytes
031526e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
# -*- coding: utf-8 -*-
"""scratchpad

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/notebooks/empty.ipynb
"""

pip install -q datasets transformers

from datasets import load_dataset

dataset = load_dataset("tau/scrolls", "qmsum")

dataset

!pip install clean-text[gpl] -q
from cleantext import clean

train_df = dataset["train"].to_pandas().convert_dtypes()
val_df = dataset["validation"].to_pandas().convert_dtypes()
test_df = dataset["test"].to_pandas().convert_dtypes()

from tqdm.auto import tqdm 

tqdm.pandas()

train_df["input"] = train_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
val_df["input"] = val_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
test_df["input"] = test_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)

train_df["output"] = train_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
val_df["output"] = val_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
test_df["output"] = test_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)

import re
import re
def fix_punct_whitespace(text: str) -> str:
    # Fix spaces around apostrophes
    text = re.sub(r"([a-zA-Z])\s?'\s?([a-zA-Z])", r"\1'\2", text)

    # Remove spaces before punctuation marks (except for parentheses)
    text = re.sub(r"\s+([.,;:!?])", r"\1", text)

    # Add a space after punctuation marks (except for parentheses) if missing
    text = re.sub(r"([.,;:!?])(?=[^\s])", r"\1 ", text)

    # Handle spaces around parentheses
    text = re.sub(r"\s?\(\s?", r" (", text)
    text = re.sub(r"\s?\)\s?", r")", text)

    # Add a space after a closing parenthesis if:
    # followed by a word or opening parenthesis
    text = re.sub(r"\)(?=[^\s.,;:!?])", r") ", text)

    # Handle spaces around quotation marks
    text = re.sub(r'\s?"', r'"', text)
    text = re.sub(r'"\s?', r'" ', text)

    # Handle spaces around single quotes
    text = re.sub(r"\s?'", r"'", text)
    text = re.sub(r"'\s?", r"' ", text)

    # Handle comma in numbers
    text = re.sub(r"(\d),\s+(\d)", r"\1,\2", text)

    return text.replace("' ", "'")

train_df["input"] = train_df["input"].progress_apply(fix_punct_whitespace)
val_df["input"] = val_df["input"].progress_apply(fix_punct_whitespace)
test_df["input"] = test_df["input"].progress_apply(fix_punct_whitespace)

train_df["output"] = train_df["output"].progress_apply(fix_punct_whitespace)
val_df["output"] = val_df["output"].progress_apply(fix_punct_whitespace)
test_df["output"] = test_df["output"].progress_apply(fix_punct_whitespace)

train_df.head(2)

from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

tokenizer = AutoTokenizer.from_pretrained("pszemraj/long-t5-tglobal-xl-16384-book-summary-8bit")

def get_token_count(text:str):
    if len(text) < 1:
        return 0
    else:
        return len(tokenizer.encode(text, truncation=False, padding=False))

get_token_count("ayyy waddup my g")

train_df["input_token_count"] = train_df["input"].progress_apply(get_token_count)
val_df["input_token_count"] = val_df["input"].progress_apply(get_token_count)
test_df["input_token_count"] = test_df["input"].progress_apply(get_token_count)

train_df["output_token_count"] = train_df["output"].progress_apply(get_token_count)
val_df["output_token_count"] = val_df["output"].progress_apply(get_token_count)
test_df["output_token_count"] = test_df["output"].progress_apply(get_token_count)



train_df.describe()

"""# New Section"""

# Commented out IPython magic to ensure Python compatibility.
# %%bash
# curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
# apt-get install git-lfs -q
# git lfs install

!pip install -U -q transformers accelerate
from huggingface_hub import notebook_login
notebook_login()

# Commented out IPython magic to ensure Python compatibility.
# %%bash
# 
# git lfs install
# git clone https://huggingface.co/datasets/pszemraj/qmsum-cleaned

from pathlib import Path 

target = Path.cwd() / 'qmsum-cleaned'
target.exists()

train_df.to_parquet(target / 'train.parquet')
val_df.to_parquet(target / 'validation.parquet')
test_df.to_parquet(target/ 'test.parquet')
!ls $target

# Commented out IPython magic to ensure Python compatibility.
# %cd $target
!git pull
!git lfs install && git lfs track *.parquet
!git add . && git commit -a -m add_cleaned
!git push
# %cd ..

# Commented out IPython magic to ensure Python compatibility.
# %%bash
# git config --global user.email "you@example.com"
# git config --global user.name "colab"