pszemraj commited on
Commit
031526e
1 Parent(s): dbe97e2

Upload cleaning_code.py

Browse files
Files changed (1) hide show
  1. cleaning_code.py +144 -0
cleaning_code.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """scratchpad
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/notebooks/empty.ipynb
8
+ """
9
+
10
+ pip install -q datasets transformers
11
+
12
+ from datasets import load_dataset
13
+
14
+ dataset = load_dataset("tau/scrolls", "qmsum")
15
+
16
+ dataset
17
+
18
+ !pip install clean-text[gpl] -q
19
+ from cleantext import clean
20
+
21
+ train_df = dataset["train"].to_pandas().convert_dtypes()
22
+ val_df = dataset["validation"].to_pandas().convert_dtypes()
23
+ test_df = dataset["test"].to_pandas().convert_dtypes()
24
+
25
+ from tqdm.auto import tqdm
26
+
27
+ tqdm.pandas()
28
+
29
+ train_df["input"] = train_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
30
+ val_df["input"] = val_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
31
+ test_df["input"] = test_df["input"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
32
+
33
+ train_df["output"] = train_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
34
+ val_df["output"] = val_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
35
+ test_df["output"] = test_df["output"].progress_apply(clean, lower=False, no_urls=True, no_emails=True)
36
+
37
+ import re
38
+ import re
39
+ def fix_punct_whitespace(text: str) -> str:
40
+ # Fix spaces around apostrophes
41
+ text = re.sub(r"([a-zA-Z])\s?'\s?([a-zA-Z])", r"\1'\2", text)
42
+
43
+ # Remove spaces before punctuation marks (except for parentheses)
44
+ text = re.sub(r"\s+([.,;:!?])", r"\1", text)
45
+
46
+ # Add a space after punctuation marks (except for parentheses) if missing
47
+ text = re.sub(r"([.,;:!?])(?=[^\s])", r"\1 ", text)
48
+
49
+ # Handle spaces around parentheses
50
+ text = re.sub(r"\s?\(\s?", r" (", text)
51
+ text = re.sub(r"\s?\)\s?", r")", text)
52
+
53
+ # Add a space after a closing parenthesis if:
54
+ # followed by a word or opening parenthesis
55
+ text = re.sub(r"\)(?=[^\s.,;:!?])", r") ", text)
56
+
57
+ # Handle spaces around quotation marks
58
+ text = re.sub(r'\s?"', r'"', text)
59
+ text = re.sub(r'"\s?', r'" ', text)
60
+
61
+ # Handle spaces around single quotes
62
+ text = re.sub(r"\s?'", r"'", text)
63
+ text = re.sub(r"'\s?", r"' ", text)
64
+
65
+ # Handle comma in numbers
66
+ text = re.sub(r"(\d),\s+(\d)", r"\1,\2", text)
67
+
68
+ return text.replace("' ", "'")
69
+
70
+ train_df["input"] = train_df["input"].progress_apply(fix_punct_whitespace)
71
+ val_df["input"] = val_df["input"].progress_apply(fix_punct_whitespace)
72
+ test_df["input"] = test_df["input"].progress_apply(fix_punct_whitespace)
73
+
74
+ train_df["output"] = train_df["output"].progress_apply(fix_punct_whitespace)
75
+ val_df["output"] = val_df["output"].progress_apply(fix_punct_whitespace)
76
+ test_df["output"] = test_df["output"].progress_apply(fix_punct_whitespace)
77
+
78
+ train_df.head(2)
79
+
80
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
81
+
82
+ tokenizer = AutoTokenizer.from_pretrained("pszemraj/long-t5-tglobal-xl-16384-book-summary-8bit")
83
+
84
+ def get_token_count(text:str):
85
+ if len(text) < 1:
86
+ return 0
87
+ else:
88
+ return len(tokenizer.encode(text, truncation=False, padding=False))
89
+
90
+ get_token_count("ayyy waddup my g")
91
+
92
+ train_df["input_token_count"] = train_df["input"].progress_apply(get_token_count)
93
+ val_df["input_token_count"] = val_df["input"].progress_apply(get_token_count)
94
+ test_df["input_token_count"] = test_df["input"].progress_apply(get_token_count)
95
+
96
+ train_df["output_token_count"] = train_df["output"].progress_apply(get_token_count)
97
+ val_df["output_token_count"] = val_df["output"].progress_apply(get_token_count)
98
+ test_df["output_token_count"] = test_df["output"].progress_apply(get_token_count)
99
+
100
+
101
+
102
+ train_df.describe()
103
+
104
+ """# New Section"""
105
+
106
+ # Commented out IPython magic to ensure Python compatibility.
107
+ # %%bash
108
+ # curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
109
+ # apt-get install git-lfs -q
110
+ # git lfs install
111
+
112
+ !pip install -U -q transformers accelerate
113
+ from huggingface_hub import notebook_login
114
+ notebook_login()
115
+
116
+ # Commented out IPython magic to ensure Python compatibility.
117
+ # %%bash
118
+ #
119
+ # git lfs install
120
+ # git clone https://huggingface.co/datasets/pszemraj/qmsum-cleaned
121
+
122
+ from pathlib import Path
123
+
124
+ target = Path.cwd() / 'qmsum-cleaned'
125
+ target.exists()
126
+
127
+ train_df.to_parquet(target / 'train.parquet')
128
+ val_df.to_parquet(target / 'validation.parquet')
129
+ test_df.to_parquet(target/ 'test.parquet')
130
+ !ls $target
131
+
132
+ # Commented out IPython magic to ensure Python compatibility.
133
+ # %cd $target
134
+ !git pull
135
+ !git lfs install && git lfs track *.parquet
136
+ !git add . && git commit -a -m add_cleaned
137
+ !git push
138
+ # %cd ..
139
+
140
+ # Commented out IPython magic to ensure Python compatibility.
141
+ # %%bash
142
+ # git config --global user.email "you@example.com"
143
+ # git config --global user.name "colab"
144
+