{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Warning : `load_model` does not return WordVectorModel or SupervisedModel any more, but a `FastText` object which is very similar.\n" ] } ], "source": [ "import pandas as pd\n", "import fasttext\n", "import json\n", "import polars as pl\n", "\n", "PRETRAINED_MODEL_PATH = 'langdetect_model/lid.176.bin'\n", "model = fasttext.load_model(PRETRAINED_MODEL_PATH) " ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def load_file(path):\n", " df = pl.DataFrame(columns = ['subreddit', 'body'])\n", "\n", " count = 0\n", " with open(path, 'r', encoding='utf-8') as file:\n", " data = file.readlines()\n", " \n", " data = [json.loads(message) for message in data]\n", " df = pd.DataFrame(data)\n", " data = None\n", " df = df[['subreddit', 'body']]\n", " df = pl.DataFrame(df)\n", " print(f'amount of rows in read file: {len(df)}')\n", " df = df.unique(subset=[\"body\"])\n", " print(f'unique rows in read file: {len(df)}')\n", " df = df.filter((pl.col(\"body\").str.lengths() > 30))\n", " print(f'unique rows with len over 30: {len(df)}')\n", " #df = df.filter(pl.col(\"body\").len() > 30)\n", " return df" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import re\n", "\n", "def pred_lang(row):\n", " try:\n", " pred = model.predict(str(re.sub('\\n', '', str(row[1]))))\n", " row = row + (pred[0][0],)\n", " row = row + (pred[1][0],)\n", " except Exception as e:\n", " row = row + ('could_not_predict',)\n", " row = row + ('could_not_predict',)\n", " return row\n", "\n", " " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Process year by year\n", "process_years = ['2011', '2012']\n", "\n", "for process_year in process_years:\n", " filepaths = [os.getcwd() + os.sep + process_year + os.sep + filepath for filepath in os.listdir(os.getcwd() + os.sep + process_year) if filepath.endswith('.zst') == False]\n", " print(f\"Starting year: {process_year}\")\n", " for i, filepath in enumerate(filepaths):\n", " print(f'{i+1}/{len(filepaths)}')\n", " if i == 0:\n", " print(f'loading file: {filepaths[i]}')\n", " df = load_file(filepaths[i])\n", " df = df.apply(lambda row: pred_lang(row))\n", " print(f'amount of rows in read file after filtering: {len(df)}')\n", " df = df.rename({\"column_0\": \"subreddit\", \"column_1\": 'body', \"column_2\": 'label', \"column_3\": 'proba'})\n", " df = df.filter(pl.col(\"label\").str.contains('fi'))\n", " df = df.filter(pl.col(\"proba\") > 0.5)\n", " else:\n", " print(\"in else\")\n", " new_df = load_file(filepaths[i])\n", " new_df = new_df.apply(pred_lang)\n", " new_df = new_df.rename({\"column_0\": \"subreddit\", \"column_1\": 'body', \"column_2\": 'label', \"column_3\": 'proba'})\n", " new_df = new_df.filter(pl.col(\"label\").str.contains('fi'))\n", " new_df = new_df.filter(pl.col(\"proba\") > 0.5)\n", " print(f\"amount of new rows in file to add: {len(new_df)}\")\n", " df.extend(new_df)\n", " print(len(df))\n", " print('\\n')\n", " df.write_csv(f'processed{os.sep}{process_year}_data.csv')\n", " print('\\n')\n", " print('\\n')\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Process file by file\n", "process_years = ['2012']\n", "\n", "\n", "for process_year in process_years:\n", " filepaths = [os.getcwd() + os.sep + process_year + os.sep + filepath for filepath in os.listdir(os.getcwd() + os.sep + process_year) if filepath.endswith('.zst') == False]\n", " filepaths = filepaths[6::]\n", " print(f\"Starting year: {process_year}\")\n", " for i, filepath in enumerate(filepaths):\n", " print(f'{i+1}/{len(filepaths)}')\n", " print(f'loading file: {filepaths[i]}')\n", " df = load_file(filepaths[i])\n", " df = df.apply(lambda row: pred_lang(row))\n", " print(f'amount of rows in read file after filtering: {len(df)}')\n", " df = df.rename({\"column_0\": \"subreddit\", \"column_1\": 'body', \"column_2\": 'label', \"column_3\": 'proba'})\n", " df = df.filter(pl.col(\"label\").str.contains('fi'))\n", " df = df.filter(pl.col(\"proba\") > 0.5)\n", " df.write_csv(f'processed{os.sep}{process_year}_{i+1}_data.csv')\n", " print('\\n')\n", " print('\\n')\n" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['i:\\\\NLP_Datasets\\\\Reddit\\\\2022\\\\RC_2022-01.zst']\n", "Starting year: 2022\n", "1/1\n" ] }, { "ename": "UnicodeDecodeError", "evalue": "'charmap' codec can't decode byte 0x8d in position 7292: character maps to ", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mUnicodeDecodeError\u001b[0m Traceback (most recent call last)", "Cell \u001b[1;32mIn[25], line 45\u001b[0m\n\u001b[0;32m 43\u001b[0m records \u001b[39m=\u001b[39m \u001b[39mmap\u001b[39m(json\u001b[39m.\u001b[39mloads, read_lines_from_zst_file(file))\n\u001b[0;32m 44\u001b[0m datas \u001b[39m=\u001b[39m []\n\u001b[1;32m---> 45\u001b[0m \u001b[39mfor\u001b[39;00m record \u001b[39min\u001b[39;00m records:\n\u001b[0;32m 46\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(record\u001b[39m.\u001b[39mget(\u001b[39m'\u001b[39m\u001b[39mbody\u001b[39m\u001b[39m'\u001b[39m)) \u001b[39m>\u001b[39m \u001b[39m30\u001b[39m:\n\u001b[0;32m 47\u001b[0m datas\u001b[39m.\u001b[39mappend((\u001b[39mstr\u001b[39m(record\u001b[39m.\u001b[39mget(\u001b[39m'\u001b[39m\u001b[39msubreddit\u001b[39m\u001b[39m'\u001b[39m)), \u001b[39mstr\u001b[39m(record\u001b[39m.\u001b[39mget(\u001b[39m'\u001b[39m\u001b[39mcreated_utc\u001b[39m\u001b[39m'\u001b[39m)),\u001b[39mstr\u001b[39m(record\u001b[39m.\u001b[39mget(\u001b[39m'\u001b[39m\u001b[39mscore\u001b[39m\u001b[39m'\u001b[39m)),\u001b[39mstr\u001b[39m(record\u001b[39m.\u001b[39mget(\u001b[39m'\u001b[39m\u001b[39mbody\u001b[39m\u001b[39m'\u001b[39m))))\n", "Cell \u001b[1;32mIn[25], line 19\u001b[0m, in \u001b[0;36mread_lines_from_zst_file\u001b[1;34m(zstd_file_path)\u001b[0m\n\u001b[0;32m 14\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mread_lines_from_zst_file\u001b[39m(zstd_file_path:Path):\n\u001b[0;32m 15\u001b[0m \u001b[39mwith\u001b[39;00m (\n\u001b[0;32m 16\u001b[0m zstd\u001b[39m.\u001b[39mopen(zstd_file_path, mode\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mrb\u001b[39m\u001b[39m'\u001b[39m, dctx\u001b[39m=\u001b[39mDCTX, encoding\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mutf-8\u001b[39m\u001b[39m'\u001b[39m, errors\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mignore\u001b[39m\u001b[39m'\u001b[39m) \u001b[39mas\u001b[39;00m zfh,\n\u001b[0;32m 17\u001b[0m io\u001b[39m.\u001b[39mTextIOWrapper(zfh) \u001b[39mas\u001b[39;00m iofh\n\u001b[0;32m 18\u001b[0m ):\n\u001b[1;32m---> 19\u001b[0m \u001b[39mfor\u001b[39;00m line \u001b[39min\u001b[39;00m iofh:\n\u001b[0;32m 20\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m 21\u001b[0m \u001b[39myield\u001b[39;00m line\n", "File \u001b[1;32mf:\\tools\\Anaconda3\\envs\\redditEnv\\lib\\encodings\\cp1252.py:23\u001b[0m, in \u001b[0;36mIncrementalDecoder.decode\u001b[1;34m(self, input, final)\u001b[0m\n\u001b[0;32m 22\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mdecode\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39minput\u001b[39m, final\u001b[39m=\u001b[39m\u001b[39mFalse\u001b[39;00m):\n\u001b[1;32m---> 23\u001b[0m \u001b[39mreturn\u001b[39;00m codecs\u001b[39m.\u001b[39;49mcharmap_decode(\u001b[39minput\u001b[39;49m,\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49merrors,decoding_table)[\u001b[39m0\u001b[39m]\n", "\u001b[1;31mUnicodeDecodeError\u001b[0m: 'charmap' codec can't decode byte 0x8d in position 7292: character maps to " ] } ], "source": [ "# In use 14 GB, 1min 46.5s\n", "import pandas as pd\n", "import io\n", "import zstandard as zstd\n", "from pathlib import Path\n", "import json\n", "import os\n", "import sys\n", "\n", "virhe_count = 0\n", "\n", "DCTX = zstd.ZstdDecompressor(max_window_size=2**31)\n", "\n", "def read_lines_from_zst_file(zstd_file_path:Path):\n", " with (\n", " zstd.open(zstd_file_path, mode='rb', dctx=DCTX, encoding='utf-8', errors='ignore') as zfh,\n", " io.TextIOWrapper(zfh) as iofh\n", " ):\n", " for line in iofh:\n", " try:\n", " yield line\n", " except Exception as e:\n", " virhe_count +=1\n", " if virhe_count % 1000 == 0:\n", " print(f'virhe_count: {virhe_count}')\n", " pass\n", "\n", "\n", "\n", "process_years = ['2022']\n", "file_counter = 1\n", "\n", "for process_year in process_years:\n", " filepaths = [os.getcwd() + os.sep + process_year + os.sep + filepath for filepath in os.listdir(os.getcwd() + os.sep + process_year) if filepath.endswith('.zst')]\n", " filepaths = filepaths[0:1]\n", " print(filepaths)\n", " \n", " print(f\"Starting year: {process_year}\")\n", " for i, filepath in enumerate(filepaths):\n", " file_counter = 1\n", " print(f'{i+1}/{len(filepaths)}')\n", " file = Path(filepath)\n", " records = map(json.loads, read_lines_from_zst_file(file))\n", " datas = []\n", " for record in records:\n", " if len(record.get('body')) > 30:\n", " datas.append((str(record.get('subreddit')), str(record.get('created_utc')),str(record.get('score')),str(record.get('body'))))\n", " if len(datas) % 1000000 == 0:\n", " print(len(datas))\n", " #print(f'{sys.getsizeof(datas) / (1024 * 1024)} MegaBytes')\n", " if len(datas) > 10000000:\n", " df = pd.DataFrame(datas)\n", " df = df.rename(columns={0:'subreddit', 1:'created_utc', 2:'score', 3:'body'})\n", " df.to_parquet(f'{str(process_year) + os.sep}{filepath.split(os.sep)[-1].replace(\".zst\",\"\")}_{file_counter}.parquet')\n", " file_counter +=1\n", " datas = []\n", " \n", " df = pd.DataFrame(datas)\n", " df = df.rename(columns={0:'subreddit', 1:'created_utc', 2:'score', 3:'body'})\n", " df.to_parquet(f'{str(process_year) + os.sep}{filepath.split(os.sep)[-1].replace(\".zst\",\"\")}_{file_counter}.parquet') \n", " \n", "\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "import re\n", "\n", "def pred_lang(row):\n", " try:\n", " pred = model.predict(str(re.sub('\\n', '', str(row[3]))))\n", " row = row + (pred[0][0],)\n", " row = row + (pred[1][0],)\n", " except Exception as e:\n", " row = row + ('could_not_predict','could_not_predict')\n", " return row\n", "\n", "def pred_lang_pd(row):\n", " try:\n", " pred = model.predict(str(re.sub('\\n', '', str(row['body']))))\n", " row['predicted_language'] = pred[0][0]\n", " row['proba'] = pred[1][0]\n", " except Exception as e:\n", " row['predicted_language'] = 'could_not_predict'\n", " row['proba'] = 'could_not_predict'\n", " return row\n", "\n" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "2/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "3/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "4/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "5/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "6/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "7/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "8/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_16.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "9/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_17.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "10/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_18.parquet\n", "original len of read file: 3600265\n", "\n", "\n", "\n", "\n", "11/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "12/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "13/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "14/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "15/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "16/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "17/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "18/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-01_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "19/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "20/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "21/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "22/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "23/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "24/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "25/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_15.parquet\n", "original len of read file: 8082107\n", "\n", "\n", "\n", "\n", "26/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "27/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "28/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "29/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "30/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "31/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "32/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "33/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-02_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "34/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "35/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "36/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "37/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "38/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "39/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "40/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "41/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_16.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "42/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_17.parquet\n", "original len of read file: 2166114\n", "\n", "\n", "\n", "\n", "43/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "44/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "45/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "46/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "47/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "48/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "49/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "50/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-03_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "51/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "52/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "53/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "54/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "55/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "56/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "57/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "58/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_16.parquet\n", "original len of read file: 7580212\n", "\n", "\n", "\n", "\n", "59/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "60/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "61/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "62/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "63/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "64/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "65/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "66/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-04_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "67/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "68/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "69/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "70/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "71/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "72/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "73/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "74/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_16.parquet\n", "original len of read file: 9677905\n", "\n", "\n", "\n", "\n", "75/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "76/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "77/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "78/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "79/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "80/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "81/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "82/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-05_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "83/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "84/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "85/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "86/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "87/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "88/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "89/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "90/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_16.parquet\n", "original len of read file: 2232978\n", "\n", "\n", "\n", "\n", "91/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "92/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "93/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "94/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "95/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "96/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "97/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "98/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-06_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "99/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "100/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "101/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "102/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "103/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "104/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "105/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "106/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_16.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "107/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_17.parquet\n", "original len of read file: 7713277\n", "\n", "\n", "\n", "\n", "108/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "109/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "110/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "111/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "112/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "113/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "114/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "115/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-07_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "116/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "117/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "118/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "119/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "120/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "121/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "122/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "123/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_16.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "124/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_17.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "125/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_18.parquet\n", "original len of read file: 556106\n", "\n", "\n", "\n", "\n", "126/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "127/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "128/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "129/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "130/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "131/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "132/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "133/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-08_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "134/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "135/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "136/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "137/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "138/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "139/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "140/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "141/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_16.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "142/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_17.parquet\n", "original len of read file: 1191472\n", "\n", "\n", "\n", "\n", "143/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "144/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "145/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "146/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "147/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "148/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "149/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "150/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-09_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "151/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "152/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "153/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "154/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "155/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "156/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "157/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "158/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_16.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "159/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_17.parquet\n", "original len of read file: 3059217\n", "\n", "\n", "\n", "\n", "160/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "161/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "162/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "163/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "164/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "165/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "166/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "167/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-10_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "168/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "169/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "170/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "171/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "172/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "173/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "174/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "175/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_16.parquet\n", "original len of read file: 7883427\n", "\n", "\n", "\n", "\n", "176/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "177/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "178/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "179/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "180/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "181/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "182/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "183/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-11_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "184/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_1.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "185/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_10.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "186/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_11.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "187/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_12.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "188/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_13.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "189/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_14.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "190/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_15.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "191/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_16.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "192/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_17.parquet\n", "original len of read file: 3330060\n", "\n", "\n", "\n", "\n", "193/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_2.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "194/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_3.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "195/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_4.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "196/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_5.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "197/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_6.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "198/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_7.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "199/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_8.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n", "200/200\n", "loading file: i:\\NLP_Datasets\\Reddit\\2022\\RC_2022-12_9.parquet\n", "original len of read file: 10000001\n", "\n", "\n", "\n", "\n" ] } ], "source": [ "process_years = ['2022']\n", "file_counter = 1\n", "\n", "for process_year in process_years:\n", " filepaths = [os.getcwd() + os.sep + process_year + os.sep + filepath for filepath in os.listdir(os.getcwd() + os.sep + process_year) if filepath.endswith('.parquet') and 'processed' not in filepath]\n", " #filepaths = filepaths[6]\n", " filepaths_fi = [os.getcwd() + os.sep + 'finnish' + os.sep + process_year + os.sep + filepath for filepath in os.listdir(os.getcwd() + os.sep + process_year) if filepath.endswith('.parquet') and 'processed' not in filepath]\n", " #filepaths_fi = filepaths_fi[69:]\n", " for i, filepath in enumerate(filepaths):\n", " print(f'{i+1}/{len(filepaths)}')\n", " print(f'loading file: {filepaths[i]}')\n", " pl_df = pl.read_parquet(filepath)\n", " print(f'original len of read file: {len(pl_df)}')\n", " pl_df = pl_df.apply(lambda row: pred_lang(row))\n", " pl_df = pl_df.rename({'column_0': 'subreddit', 'column_1': 'created_utc', 'column_2': 'score', 'column_3': 'body', 'column_4':'predicted_language', 'column_5': 'probability'})\n", " pl_df = pl_df.filter(pl.col(\"probability\") > 0.7)\n", " pl_df.write_parquet(f'{filepath.replace(\".parquet\", \"_processed.parquet\")}')\n", " pl_df = pl_df.filter(pl.col(\"predicted_language\").str.contains('fi'))\n", " pl_df.write_parquet(f'{filepaths_fi[i].replace(\".parquet\", \"_processed.parquet\")}')\n", " print('\\n')\n", " print('\\n')" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3.8.8 64-bit ('Anaconda3')", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.8" }, "vscode": { "interpreter": { "hash": "f49206fcf84a9145e7e21228cbafa911d1ac18292303b01e865d8267a9c448f7" } } }, "nbformat": 4, "nbformat_minor": 2 }