Josephgflowers's picture
Upload 2 files
9816cdd verified
raw
history blame
7.11 kB
import pandas as pd
import re
from concurrent.futures import ProcessPoolExecutor
from tqdm import tqdm
import os
import glob
# Expanded financial keywords (formatted for regex word boundaries)
financial_keywords = [
r"\bfinance\b", r"\bfinancial\b", r"\beconomy\b", r"\beconomic\b", r"\bmarket\b", r"\bstock\b", r"\bbond\b",
r"\bshare\b", r"\basset\b", r"\bportfolio\b", r"\binvestment\b", r"\binvestor\b", r"\btrading\b", r"\bbroker\b",
r"\bcommodity\b", r"\bcurrency\b", r"\bforeign exchange\b", r"\bforex\b", r"\bderivative\b", r"\boption\b",
r"\bfutures\b", r"\bhedging\b", r"\brisk\b", r"\bdividend\b", r"\binterest\b", r"\bliquidity\b", r"\bcredit\b",
r"\bdebt\b", r"\bcapital\b", r"\bfund\b", r"\bventure\b", r"\bvaluation\b", r"\bmerger\b", r"\bacquisition\b",
r"\bIPO\b", r"\binitial public offering\b", r"\bprivate equity\b", r"\bhedge fund\b", r"\bmutual fund\b",
r"\bETF\b", r"\bexchange-traded fund\b", r"\bfinancial statement\b", r"\bbalance sheet\b", r"\bincome statement\b",
r"\bcash flow\b", r"\brevenue\b", r"\bprofit\b", r"\bloss\b", r"\bexpense\b", r"\bbudget\b", r"\bforecast\b",
r"\banalysis\b", r"\bearnings\b", r"\bEBITDA\b", r"\bEPS\b", r"\bP/E ratio\b", r"\bprice to earnings\b",
r"\bROI\b", r"\breturn on investment\b", r"\bROE\b", r"\breturn on equity\b", r"\bdiversification\b",
r"\bNASDAQ\b", r"\bNYSE\b", r"\bS&P 500\b", r"\bDow Jones\b", r"\bFTSE\b", r"\bNikkei\b", r"\bcommodities\b",
r"\bgold\b", r"\bsilver\b", r"\boil\b", r"\bGDP\b", r"\bgross domestic product\b", r"\binflation\b",
r"\bunemployment\b", r"\binterest rate\b", r"\bfederal reserve\b", r"\bcentral bank\b", r"\bmonetary policy\b",
r"\bquantitative easing\b", r"\bfiscal policy\b", r"\btax\b", r"\btreasury\b", r"\bbudget deficit\b",
r"\bnational debt\b", r"\bcredit rating\b", r"\bstandard & poor's\b", r"\bmoody's\b", r"\bfitch\b",
r"\bsovereign wealth fund\b", r"\binternational monetary fund\b", r"\bIMF\b", r"\bworld bank\b",
r"\bbasel III\b", r"\bdodd-frank\b", r"\bfinancial regulation\b", r"\binsurance\b", r"\breal estate\b",
r"\bmortgage\b", r"\bloan\b", r"\bbank\b", r"\bbanking\b", r"\bfintech\b", r"\bblockchain\b", r"\bcryptocurrency\b",
r"\bbitcoin\b", r"\bethereum\b", r"\bsmart contract\b", r"\bdigital currency\b", r"\bdecentralized finance\b",
# Add more general financial terms as needed
]
# Combine financial keywords into a single regex pattern using non-capturing groups
financial_regex = r'(?:' + r'|'.join(financial_keywords) + r')'
# Function to process a chunk of the dataset
def process_chunk(chunk):
# Use vectorized string operations for efficiency
# Count the number of matches in each column
score_counts = chunk['score'].astype(str).str.count(financial_regex, flags=re.IGNORECASE)
url_counts = chunk['url'].astype(str).str.count(financial_regex, flags=re.IGNORECASE)
text_counts = chunk['text'].astype(str).str.count(financial_regex, flags=re.IGNORECASE)
# Handle NaN values by filling them with zero
score_counts = score_counts.fillna(0)
url_counts = url_counts.fillna(0)
text_counts = text_counts.fillna(0)
# Sum the counts to get the financial score
match_counts = score_counts + url_counts + text_counts
match_counts = match_counts.astype(int)
# Set a threshold for the minimum financial score
threshold = 14 # Adjust this value as needed
# Filter rows that meet the threshold
filtered_chunk = chunk[match_counts >= threshold].copy()
filtered_chunk['financial_score'] = match_counts[match_counts >= threshold]
# Replace the original 'score' with 'financial_score'
filtered_chunk['score'] = filtered_chunk['financial_score']
filtered_chunk = filtered_chunk.drop(columns=['financial_score'])
return filtered_chunk
# Function to process a single CSV file
def process_file(input_file, output_file):
# Read the CSV file in chunks
chunk_size = 10000 # Adjust this value based on your memory constraints
reader = pd.read_csv(input_file, chunksize=chunk_size)
# Prepare the output file
first_chunk = True
# Number of worker processes
num_workers = 8 # Adjust based on your CPU cores
# Batch size for chunks to process in parallel
batch_size = num_workers * 4 # Adjust based on memory constraints
chunk_list = []
with ProcessPoolExecutor(max_workers=num_workers) as executor:
futures = []
for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'):
chunk_list.append(chunk)
if len(chunk_list) == batch_size:
# Process batch of chunks in parallel
futures = [executor.submit(process_chunk, c) for c in chunk_list]
for future in tqdm(futures, desc='Processing batch', leave=False):
filtered_chunk = future.result()
if not filtered_chunk.empty:
if first_chunk:
filtered_chunk.to_csv(output_file, mode='w', index=False)
first_chunk = False
else:
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
chunk_list = []
# Process any remaining chunks
if chunk_list:
futures = [executor.submit(process_chunk, c) for c in chunk_list]
for future in tqdm(futures, desc='Processing last batch', leave=False):
filtered_chunk = future.result()
if not filtered_chunk.empty:
if first_chunk:
filtered_chunk.to_csv(output_file, mode='w', index=False)
first_chunk = False
else:
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
print(f'Finished processing {input_file}')
# List of directories to process
data_dir = '/media/joe/512-3/csv'
years = [f'CC-MAIN-{year}' for year in range(2013, 2025)] # Adjust years as needed
directories = [os.path.join(data_dir, year) for year in years]
# Process each CSV file in each directory
for dir_path in directories:
if not os.path.isdir(dir_path):
print(f'Directory not found: {dir_path}')
continue
csv_files = glob.glob(os.path.join(dir_path, '*.csv'))
print(f'Found {len(csv_files)} CSV files in {dir_path}')
for input_file in csv_files:
# Construct output file name
# Example: If input_file is '/path/CC-MAIN-2013/train-00001-of-00001.csv'
# Output file will be 'fin_CC-MAIN-2013_train-00001-of-00001.csv'
base_name = os.path.basename(input_file)
output_file = os.path.join(
dir_path, 'fin_' + base_name
)
# Check if output file already exists to avoid reprocessing
if os.path.exists(output_file):
print(f'Output file already exists. Skipping: {output_file}')
continue
process_file(input_file, output_file)