|
import pandas as pd |
|
import re |
|
from concurrent.futures import ProcessPoolExecutor |
|
from tqdm import tqdm |
|
import os |
|
import glob |
|
|
|
|
|
financial_keywords = [ |
|
r"\bfinance\b", r"\bfinancial\b", r"\beconomy\b", r"\beconomic\b", r"\bmarket\b", r"\bstock\b", r"\bbond\b", |
|
r"\bshare\b", r"\basset\b", r"\bportfolio\b", r"\binvestment\b", r"\binvestor\b", r"\btrading\b", r"\bbroker\b", |
|
r"\bcommodity\b", r"\bcurrency\b", r"\bforeign exchange\b", r"\bforex\b", r"\bderivative\b", r"\boption\b", |
|
r"\bfutures\b", r"\bhedging\b", r"\brisk\b", r"\bdividend\b", r"\binterest\b", r"\bliquidity\b", r"\bcredit\b", |
|
r"\bdebt\b", r"\bcapital\b", r"\bfund\b", r"\bventure\b", r"\bvaluation\b", r"\bmerger\b", r"\bacquisition\b", |
|
r"\bIPO\b", r"\binitial public offering\b", r"\bprivate equity\b", r"\bhedge fund\b", r"\bmutual fund\b", |
|
r"\bETF\b", r"\bexchange-traded fund\b", r"\bfinancial statement\b", r"\bbalance sheet\b", r"\bincome statement\b", |
|
r"\bcash flow\b", r"\brevenue\b", r"\bprofit\b", r"\bloss\b", r"\bexpense\b", r"\bbudget\b", r"\bforecast\b", |
|
r"\banalysis\b", r"\bearnings\b", r"\bEBITDA\b", r"\bEPS\b", r"\bP/E ratio\b", r"\bprice to earnings\b", |
|
r"\bROI\b", r"\breturn on investment\b", r"\bROE\b", r"\breturn on equity\b", r"\bdiversification\b", |
|
r"\bNASDAQ\b", r"\bNYSE\b", r"\bS&P 500\b", r"\bDow Jones\b", r"\bFTSE\b", r"\bNikkei\b", r"\bcommodities\b", |
|
r"\bgold\b", r"\bsilver\b", r"\boil\b", r"\bGDP\b", r"\bgross domestic product\b", r"\binflation\b", |
|
r"\bunemployment\b", r"\binterest rate\b", r"\bfederal reserve\b", r"\bcentral bank\b", r"\bmonetary policy\b", |
|
r"\bquantitative easing\b", r"\bfiscal policy\b", r"\btax\b", r"\btreasury\b", r"\bbudget deficit\b", |
|
r"\bnational debt\b", r"\bcredit rating\b", r"\bstandard & poor's\b", r"\bmoody's\b", r"\bfitch\b", |
|
r"\bsovereign wealth fund\b", r"\binternational monetary fund\b", r"\bIMF\b", r"\bworld bank\b", |
|
r"\bbasel III\b", r"\bdodd-frank\b", r"\bfinancial regulation\b", r"\binsurance\b", r"\breal estate\b", |
|
r"\bmortgage\b", r"\bloan\b", r"\bbank\b", r"\bbanking\b", r"\bfintech\b", r"\bblockchain\b", r"\bcryptocurrency\b", |
|
r"\bbitcoin\b", r"\bethereum\b", r"\bsmart contract\b", r"\bdigital currency\b", r"\bdecentralized finance\b", |
|
|
|
] |
|
|
|
|
|
financial_regex = r'(?:' + r'|'.join(financial_keywords) + r')' |
|
|
|
|
|
def process_chunk(chunk): |
|
|
|
|
|
score_counts = chunk['score'].astype(str).str.count(financial_regex, flags=re.IGNORECASE) |
|
url_counts = chunk['url'].astype(str).str.count(financial_regex, flags=re.IGNORECASE) |
|
text_counts = chunk['text'].astype(str).str.count(financial_regex, flags=re.IGNORECASE) |
|
|
|
|
|
score_counts = score_counts.fillna(0) |
|
url_counts = url_counts.fillna(0) |
|
text_counts = text_counts.fillna(0) |
|
|
|
|
|
match_counts = score_counts + url_counts + text_counts |
|
match_counts = match_counts.astype(int) |
|
|
|
|
|
threshold = 14 |
|
|
|
|
|
filtered_chunk = chunk[match_counts >= threshold].copy() |
|
filtered_chunk['financial_score'] = match_counts[match_counts >= threshold] |
|
|
|
|
|
filtered_chunk['score'] = filtered_chunk['financial_score'] |
|
filtered_chunk = filtered_chunk.drop(columns=['financial_score']) |
|
|
|
return filtered_chunk |
|
|
|
|
|
def process_file(input_file, output_file): |
|
|
|
chunk_size = 10000 |
|
reader = pd.read_csv(input_file, chunksize=chunk_size) |
|
|
|
|
|
first_chunk = True |
|
|
|
|
|
num_workers = 8 |
|
|
|
|
|
batch_size = num_workers * 4 |
|
|
|
chunk_list = [] |
|
with ProcessPoolExecutor(max_workers=num_workers) as executor: |
|
futures = [] |
|
for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'): |
|
chunk_list.append(chunk) |
|
if len(chunk_list) == batch_size: |
|
|
|
futures = [executor.submit(process_chunk, c) for c in chunk_list] |
|
for future in tqdm(futures, desc='Processing batch', leave=False): |
|
filtered_chunk = future.result() |
|
if not filtered_chunk.empty: |
|
if first_chunk: |
|
filtered_chunk.to_csv(output_file, mode='w', index=False) |
|
first_chunk = False |
|
else: |
|
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False) |
|
chunk_list = [] |
|
|
|
if chunk_list: |
|
futures = [executor.submit(process_chunk, c) for c in chunk_list] |
|
for future in tqdm(futures, desc='Processing last batch', leave=False): |
|
filtered_chunk = future.result() |
|
if not filtered_chunk.empty: |
|
if first_chunk: |
|
filtered_chunk.to_csv(output_file, mode='w', index=False) |
|
first_chunk = False |
|
else: |
|
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False) |
|
print(f'Finished processing {input_file}') |
|
|
|
|
|
data_dir = '/media/joe/512-3/csv' |
|
years = [f'CC-MAIN-{year}' for year in range(2013, 2025)] |
|
directories = [os.path.join(data_dir, year) for year in years] |
|
|
|
|
|
for dir_path in directories: |
|
if not os.path.isdir(dir_path): |
|
print(f'Directory not found: {dir_path}') |
|
continue |
|
csv_files = glob.glob(os.path.join(dir_path, '*.csv')) |
|
print(f'Found {len(csv_files)} CSV files in {dir_path}') |
|
for input_file in csv_files: |
|
|
|
|
|
|
|
base_name = os.path.basename(input_file) |
|
output_file = os.path.join( |
|
dir_path, 'fin_' + base_name |
|
) |
|
|
|
|
|
if os.path.exists(output_file): |
|
print(f'Output file already exists. Skipping: {output_file}') |
|
continue |
|
|
|
process_file(input_file, output_file) |
|
|
|
|