|
import pandas as pd |
|
import re |
|
from concurrent.futures import ProcessPoolExecutor |
|
from tqdm import tqdm |
|
import os |
|
import glob |
|
|
|
|
|
science_keywords_list = [ |
|
|
|
|
|
"deductive reasoning", "inductive reasoning", "abductive reasoning", "logical fallacy", |
|
"syllogism", "proposition", "premise", "conclusion", |
|
"argument", "critical thinking", "analytical skills", "hypothesis testing", |
|
"problem analysis", "brainstorming", "decision making", "creative thinking", |
|
"heuristic", "algorithm", "data analysis", "causal reasoning", |
|
"correlation", "evidence-based reasoning", "validity", "soundness", |
|
"cognitive bias", "confirmation bias", "cognitive dissonance", "logical consistency", |
|
"counterargument", "debate", "dialectic", "socratic questioning", |
|
"root cause analysis", "SWOT analysis", "decision tree", "flow chart", |
|
"mind mapping", "ideation", "brainwriting", "lateral thinking", |
|
"problem decomposition", "synthesis", "pattern recognition", "inference", |
|
"troubleshooting", "risk assessment", "scenario planning", "cost-benefit analysis", |
|
"optimization", "simulation", "strategic planning", "logical operator", "chain of thought", |
|
"step by step", |
|
|
|
|
|
"addition", "subtraction", "multiplication", "division", "fraction", |
|
"decimal", "percentage", "ratio", "proportion", "absolute value", |
|
|
|
|
|
"algebra", "equation", "coefficient", "variable", "polynomial", |
|
"quadratic", "exponential", "logarithm", "factorial", "sum", |
|
"quotient", |
|
"linear equation", "nonlinear equation", "system of equations", |
|
|
|
|
|
"data", "statistics", "analysis", "median", |
|
"standard deviation", "probability", |
|
"binomial", "normal distribution", "histogram", |
|
"scatter plot", "correlation", "regression", |
|
|
|
|
|
"function", "linear function", "nonlinear function", |
|
"intercept", "coordinate", "domain", "range", "limit", |
|
"derivative", "integral", "differentiation", |
|
"infinite series", "sequence", "convergence", "power series", |
|
|
|
|
|
"geometry", "angle", "triangle", "rectangle", "square", |
|
"circle", "polygon", "perimeter", "area", "circumference", |
|
"diameter", "radius", "pythagorean theorem", "trigonometry", |
|
"sine", "cosine", "tangent", "secant", "cosecant", |
|
"cotangent", "arc", "parallelogram", |
|
"rhombus", "trapezoid", "congruence", |
|
|
|
|
|
"calculus", "differential equation", "partial derivative", |
|
"vector", "matrix", "determinant", "eigenvalue", "eigenvector", |
|
"linear transformation", "tensor", "multivariable calculus", |
|
|
|
|
|
"logic", "subset", "union", "intersection", |
|
"element", "cardinality", "venn diagram", "truth table", |
|
"proposition", "theorem", "proof", "induction", |
|
|
|
|
|
"combinatorics", "permutation", "combination", "graph theory", |
|
"tree", "vertex", |
|
"probability", "random variable", "expected value", |
|
|
|
|
|
"computation", "measurement", "length", "width", "height", |
|
"area", "volume", "density", "mass", |
|
"velocity", "acceleration", "force", |
|
|
|
|
|
"binary", "decimal", "octal", "hexadecimal", "modulus", |
|
"prime number", "composite number", "greatest common divisor", |
|
"least common multiple", "factorization", "irrational number", |
|
"complex number", "imaginary unit", "real number", |
|
"absolute value" |
|
] |
|
|
|
|
|
science_keywords = [ |
|
r"\b" + re.escape(keyword).replace(r'\ ', ' ') + r"\b" for keyword in science_keywords_list |
|
] |
|
|
|
|
|
science_regex = r'(?:' + r'|'.join(science_keywords) + r')' |
|
|
|
|
|
def process_chunk(chunk): |
|
|
|
if list(chunk.columns) != ['score', 'text', 'url']: |
|
chunk.columns = ['score', 'text', 'url'] |
|
|
|
|
|
|
|
score_counts = chunk['score'].astype(str).str.count(science_regex, flags=re.IGNORECASE) |
|
url_counts = chunk['url'].astype(str).str.count(science_regex, flags=re.IGNORECASE) |
|
text_counts = chunk['text'].astype(str).str.count(science_regex, flags=re.IGNORECASE) |
|
|
|
|
|
score_counts = score_counts.fillna(0) |
|
url_counts = url_counts.fillna(0) |
|
text_counts = text_counts.fillna(0) |
|
|
|
|
|
match_counts = score_counts + url_counts + text_counts |
|
match_counts = match_counts.astype(int) |
|
|
|
|
|
|
|
|
|
threshold = 50 |
|
|
|
|
|
|
|
|
|
filtered_chunk = chunk[match_counts >= threshold].copy() |
|
filtered_chunk['science_score'] = match_counts[match_counts >= threshold] |
|
|
|
|
|
filtered_chunk['score'] = filtered_chunk['science_score'] |
|
filtered_chunk = filtered_chunk.drop(columns=['science_score']) |
|
|
|
return filtered_chunk |
|
|
|
|
|
def process_file(input_file, output_file): |
|
|
|
chunk_size = 10000 |
|
reader = pd.read_csv(input_file, chunksize=chunk_size, header=None) |
|
|
|
|
|
first_chunk = True |
|
|
|
|
|
num_workers = 20 |
|
|
|
|
|
batch_size = num_workers * 4 |
|
|
|
chunk_list = [] |
|
with ProcessPoolExecutor(max_workers=num_workers) as executor: |
|
for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'): |
|
chunk_list.append(chunk) |
|
if len(chunk_list) == batch_size: |
|
|
|
futures = [executor.submit(process_chunk, c) for c in chunk_list] |
|
for future in tqdm(futures, desc='Processing batch', leave=False): |
|
filtered_chunk = future.result() |
|
if not filtered_chunk.empty: |
|
if first_chunk: |
|
filtered_chunk.to_csv(output_file, mode='w', index=False, header=False) |
|
first_chunk = False |
|
else: |
|
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False) |
|
chunk_list = [] |
|
|
|
if chunk_list: |
|
futures = [executor.submit(process_chunk, c) for c in chunk_list] |
|
for future in tqdm(futures, desc='Processing last batch', leave=False): |
|
filtered_chunk = future.result() |
|
if not filtered_chunk.empty: |
|
if first_chunk: |
|
filtered_chunk.to_csv(output_file, mode='w', index=False, header=False) |
|
first_chunk = False |
|
else: |
|
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False) |
|
print(f'Finished processing {input_file}') |
|
|
|
|
|
data_dir = '/media/joe/512-3/csv' |
|
years = [f'CC-MAIN-{year}' for year in range(2013, 2025)] |
|
directories = [os.path.join(data_dir, year) for year in years] |
|
|
|
|
|
for dir_path in directories: |
|
if not os.path.isdir(dir_path): |
|
print(f'Directory not found: {dir_path}') |
|
continue |
|
csv_files = glob.glob(os.path.join(dir_path, '*.csv')) |
|
print(f'Found {len(csv_files)} CSV files in {dir_path}') |
|
for input_file in csv_files: |
|
|
|
base_name = os.path.basename(input_file) |
|
output_file = os.path.join( |
|
dir_path, 'math_' + base_name |
|
) |
|
|
|
|
|
if os.path.exists(output_file): |
|
print(f'Output file already exists. Skipping: {output_file}') |
|
continue |
|
|
|
process_file(input_file, output_file) |
|
|
|
|