{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Filters on the original amazon reviews dataset:\n",
    "1. coreset (reviewers who reviewed at least five things and products with at least five reviews)\n",
    "2. helpful (reviews with more helpful upvotes than unhelpful upvotes - requires at least one upvote)\n",
    "3. sentiment non-ambiguity (has to be rated 1, 3, or 5 -- no way to verify that a 2 is really a 2 ya know? its either positive middle or negative, but what really is a 4? so i drop all 2s and 4s)\n",
    "4. non-empty\n",
    "\n",
    "This results in ~ 10 million reviews."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# These imports enhance Python2/3 compatibility.\n",
    "from __future__ import print_function, absolute_import, division, unicode_literals, with_statement"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "from cleanlab.models.fasttext import FastTextClassifier, data_loader\n",
    "import cleanlab\n",
    "import numpy as np\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.model_selection import ParameterGrid\n",
    "import os\n",
    "import sys\n",
    "import multiprocessing\n",
    "from datetime import datetime as dt\n",
    "import pickle5 as pickle  # Helps with backporting"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def file_len(fname):\n",
    "    with open(fname) as f:\n",
    "        for i, l in enumerate(f):\n",
    "            pass\n",
    "    return i + 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "cpu_threads = 32\n",
    "cv_n_folds = 5 # Increasing more improves pyx, at great cost.\n",
    "seed = 0\n",
    "lr = .01\n",
    "ngram = 3\n",
    "epochs = 10 # Increasing more doesn't do much.\n",
    "dim = 100\n",
    "data_dir = '/datasets/datasets/amazon5core/'\n",
    "pyx_dir = '/datasets/cgn/pyx/amazon/'\n",
    "cur_dir = '/home/cgn/cgn/cleanlab/examples/amazon_reviews_dataset/'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Compute cross-validated pred probs on train set."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Compute 5-fold cross-validated predicted probabilities on train set.\n",
    "# Try two different train sets (every 10th vs 11th item sent to test set)\n",
    "#   to make sure the choice of test set doesn't influence the results.\n",
    "crossval_already_done = True\n",
    "if not crossval_already_done:\n",
    "    for test_split in [10, 11]:\n",
    "        train_fn = data_dir + 'train_{}_amazon5core.preprocessed.txt'.format(test_split)\n",
    "        # Get labels\n",
    "        noisy_labels = np.empty(file_len(train_fn), dtype=int)\n",
    "        bs = 1000000\n",
    "        label_map = {'__label__1':0, '__label__3':1, '__label__5':2}\n",
    "        for i, (l, t) in enumerate(data_loader(train_fn, batch_size=bs)):\n",
    "            noisy_labels[bs*i:bs*(i+1)] = [label_map[lab] for lab in l]\n",
    "\n",
    "        ftc = FastTextClassifier(\n",
    "            train_data_fn=train_fn, \n",
    "            batch_size=100000, \n",
    "            labels=[1, 3, 5],\n",
    "            kwargs_train_supervised = {\n",
    "                'epoch': epochs,\n",
    "                'thread': cpu_threads,\n",
    "                'lr': lr,\n",
    "                'wordNgrams': ngram,\n",
    "                'bucket': 200000,\n",
    "                'dim': dim,\n",
    "                'loss': 'softmax', #'softmax', # 'hs'\n",
    "            },\n",
    "        )\n",
    "        pyx = cleanlab.latent_estimation.estimate_cv_predicted_probabilities(\n",
    "            X=np.arange(len(noisy_labels)),\n",
    "            labels=noisy_labels,\n",
    "            clf=ftc,\n",
    "            cv_n_folds=cv_n_folds,\n",
    "            seed=seed,\n",
    "        )\n",
    "        # Write out\n",
    "        wfn = pyx_dir + 'amazon_pyx_train_{}_cv__folds_{}__epochs_{}__lr_{}__ngram_{}__dim_{}.npy'.format(\n",
    "            test_split, cv_n_folds, epochs, lr, ngram, dim)\n",
    "        with open(wfn, 'wb') as wf:\n",
    "            np.save(wf, pyx)\n",
    "\n",
    "        # Check that probabilities are good.\n",
    "        print(\"pyx finished. Writing:\", wfn)\n",
    "        acc = accuracy_score(noisy_labels, np.argmax(pyx, axis=1))\n",
    "        print('Acc: {:.2%}'.format(acc))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Find noise with confident learning"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "finding_noise_already_done = True\n",
    "if not finding_noise_already_done:\n",
    "    for test_split in [10, 11]:\n",
    "        print('Test split: every {}th item.'.format(test_split))\n",
    "        train_fn = data_dir + 'train_{}_amazon5core.preprocessed.txt'.format(test_split)\n",
    "        # Get labels\n",
    "        noisy_labels = np.empty(file_len(train_fn), dtype=int)\n",
    "        bs = 1000000\n",
    "        label_map = {'__label__1':0, '__label__3':1, '__label__5':2}\n",
    "        for i, (l, t) in enumerate(data_loader(train_fn, batch_size=bs)):\n",
    "            noisy_labels[bs*i:bs*(i+1)] = [label_map[lab] for lab in l]\n",
    "\n",
    "        # Read in cross-validated predicted probs\n",
    "        rfn = pyx_dir + 'amazon_pyx_train_{}_cv__folds_{}__epochs_{}__lr_{}__ngram_{}__dim_{}.npy'.format(\n",
    "            test_split, cv_n_folds, epochs, lr, ngram, dim)\n",
    "        with open(rfn, 'rb') as rf:\n",
    "            pyx = np.load(rf)\n",
    "        acc = accuracy_score(noisy_labels, np.argmax(pyx, axis=1))\n",
    "        print('Cross-val Acc: {:.2%}'.format(acc))\n",
    "\n",
    "        # Find noise masks with confident learning methods\n",
    "        # Estimate the confident joint, a proxy for the joint distribution of label noise.\n",
    "        cj, cj_only_label_error_indices = cleanlab.latent_estimation.compute_confident_joint(\n",
    "            noisy_labels, pyx,\n",
    "            return_indices_of_off_diagonals=True,\n",
    "        )\n",
    "        py, nm, inv = cleanlab.latent_estimation.estimate_latent(cj, noisy_labels)\n",
    "\n",
    "        # Five CL methods for finding label errors.\n",
    "        cj_only_bool_mask = np.zeros(len(noisy_labels), dtype=bool)\n",
    "        for idx in cj_only_label_error_indices:\n",
    "            cj_only_bool_mask[idx] = True\n",
    "\n",
    "        argmax_bool_mask = cleanlab.baseline_methods.baseline_argmax(pyx, noisy_labels)\n",
    "\n",
    "        cl_pbc_bool_mask = cleanlab.pruning.get_noise_indices(\n",
    "            noisy_labels, pyx, confident_joint=cj,\n",
    "            prune_method='prune_by_class')\n",
    "\n",
    "        cl_pbnr_bool_mask = cleanlab.pruning.get_noise_indices(\n",
    "            noisy_labels, pyx, confident_joint=cj,\n",
    "            prune_method='prune_by_noise_rate')\n",
    "\n",
    "        cl_both_bool_mask = cleanlab.pruning.get_noise_indices(\n",
    "            noisy_labels, pyx, confident_joint=cj,\n",
    "            prune_method='both')\n",
    "        \n",
    "        # True if clean data, False if noisy\n",
    "        cl_masks = {\n",
    "            'cj_only': ~cj_only_bool_mask,\n",
    "            'argmax': ~argmax_bool_mask,\n",
    "            'pbc': ~cl_pbc_bool_mask,\n",
    "            'pbnr': ~cl_pbnr_bool_mask,\n",
    "            'both': ~cl_both_bool_mask,\n",
    "        }\n",
    "\n",
    "        # Find the errors that all CL methods agree on.\n",
    "        common_errors = ~(list(cl_masks.values())[0])\n",
    "        for l in cl_masks.values():\n",
    "            common_errors = common_errors & ~l\n",
    "        cl_masks['cl_intersection_all_methods'] = ~common_errors\n",
    "        \n",
    "        with open(cur_dir + 'pickles/cl_masks_{}.p'.format(test_split), 'wb') as handle:\n",
    "            pickle.dump(cl_masks, handle, protocol=pickle.HIGHEST_PROTOCOL)\n",
    "\n",
    "        print('Generating cleaned versions of the dataset for each CL method.')\n",
    "        for name, mask in list(cl_masks.items()):\n",
    "            print(name, end=' | ')\n",
    "            with open(data_dir + 'train_{}_amazon5core.preprocessed.txt'.format(\n",
    "                    test_split), 'r') as rf:\n",
    "                with open(data_dir + 'train_{}_amazon5core.{}.txt'.format(\n",
    "                        test_split, name), 'w') as wf:          \n",
    "                    for i, line in enumerate(rf):\n",
    "                        if mask[i]:\n",
    "                            print(line, end='', file=wf)\n",
    "        print('Done!')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Benchmark CL methods versus Vanilla training\n",
    "* on a random 1 million example subset of Amazon Reviews data\n",
    "\n",
    "### Bash script example: running five trials of this script on five machines.\n",
    "\n",
    "```\n",
    "# On another machine\n",
    "seed=0\n",
    "trainsize=1000000\n",
    "epochs=5\n",
    "mkdir -p ~/amazon_reviews && cd ~/amazon_reviews\n",
    "{ time python /home/cgn/cgn/cleanlab/examples/amazon_reviews_dataset/compare_cl_vs_vanilla.py $seed $trainsize $epochs ; } &> \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\" & sleep 1 && tail -f \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\"\n",
    "\n",
    "# On another machine\n",
    "seed=1\n",
    "trainsize=1000000\n",
    "epochs=5\n",
    "mkdir -p ~/amazon_reviews && cd ~/amazon_reviews\n",
    "{ time python /home/cgn/cgn/cleanlab/examples/amazon_reviews_dataset/compare_cl_vs_vanilla.py $seed $trainsize $epochs ; } &> \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\" & sleep 1 && tail -f \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\"\n",
    "\n",
    "# On another machine\n",
    "seed=2\n",
    "trainsize=1000000\n",
    "epochs=5\n",
    "mkdir -p ~/amazon_reviews && cd ~/amazon_reviews\n",
    "{ time python /home/cgn/cgn/cleanlab/examples/amazon_reviews_dataset/compare_cl_vs_vanilla.py $seed $trainsize $epochs ; } &> \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\" & sleep 1 && tail -f \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\"\n",
    "\n",
    "# On another machine\n",
    "seed=3\n",
    "trainsize=1000000\n",
    "epochs=5\n",
    "mkdir -p ~/amazon_reviews && cd ~/amazon_reviews\n",
    "{ time python /home/cgn/cgn/cleanlab/examples/amazon_reviews_dataset/compare_cl_vs_vanilla.py $seed $trainsize $epochs ; } &> \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\" & sleep 1 && tail -f \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\"\n",
    "\n",
    "# On another machine\n",
    "seed=4\n",
    "trainsize=1000000\n",
    "epochs=5\n",
    "mkdir -p ~/amazon_reviews && cd ~/amazon_reviews\n",
    "{ time python /home/cgn/cgn/cleanlab/examples/amazon_reviews_dataset/compare_cl_vs_vanilla.py $seed $trainsize $epochs ; } &> \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\" & sleep 1 && tail -f \"out_seed_${seed}_trainsize_${trainsize}_epochs_${epochs}.log\"\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "try:\n",
    "    seed = int(sys.argv[1])\n",
    "except:\n",
    "    seed = 0\n",
    "try:\n",
    "    TRAIN_SIZE = int(sys.argv[2])\n",
    "except:\n",
    "    TRAIN_SIZE = int(1e6)  # 1 million examples\n",
    "try:\n",
    "    epochs = int(sys.argv[3])\n",
    "except:\n",
    "    epochs = 20\n",
    "try:\n",
    "    cpu_threads = int(sys.argv[4])\n",
    "except:\n",
    "    cpu_threads = multiprocessing.cpu_count() // 2\n",
    "    \n",
    "print('Seed:', seed)\n",
    "print('Train Size:', TRAIN_SIZE)\n",
    "print('CPU threads:', cpu_threads)\n",
    "print('Epochs:', epochs)\n",
    "sys.stdout.flush()\n",
    "\n",
    "lr = .01\n",
    "ngram = 3\n",
    "dim = 100\n",
    "data_dir = '/datasets/datasets/amazon5core/'\n",
    "pyx_dir = '/datasets/cgn/pyx/amazon/'\n",
    "\n",
    "is_cl_method = lambda x: x != 'preprocessed'\n",
    "\n",
    "results = []\n",
    "# 'preprocessed' == vanilla baseline training with no CL.\n",
    "cl_methods = ['preprocessed', 'cl_intersection_all_methods',\n",
    "              'cj_only', 'argmax', 'pbc', 'pbnr', 'both']\n",
    "for test_split in [10, 11]:\n",
    "    np.random.seed(seed)\n",
    "    # Prepare dataset\n",
    "    with open(cur_dir + 'pickles/cl_masks_{}.p'.format(test_split), 'rb') as handle:\n",
    "        cl_masks = pickle.load(handle)\n",
    "    common_errors = ~cl_masks['cl_intersection_all_methods']\n",
    "    print('Number of common errors among CL methods:', sum(common_errors))\n",
    "    # Choose random subset of 1 million examples from the train data\n",
    "    noisy_idx = np.arange(len(common_errors))[common_errors]\n",
    "    clean_idx = np.arange(len(common_errors))[~common_errors]\n",
    "    train_idx = np.concatenate([noisy_idx, np.random.choice(\n",
    "        clean_idx, size=TRAIN_SIZE - len(noisy_idx), replace=False)])\n",
    "    np.random.shuffle(train_idx)\n",
    "    # Train\n",
    "    for method in cl_methods:\n",
    "        train_fn = data_dir + 'train_{}_amazon5core.preprocessed.txt'.format(test_split)\n",
    "        test_fn = data_dir + 'test_{}_amazon5core.preprocessed.txt'.format(test_split)\n",
    "        if is_cl_method(method):\n",
    "            # Remove CL detected label errors\n",
    "            cl_noise_idx = np.arange(len(cl_masks[method]))[~cl_masks[method]]\n",
    "            clean_idx = np.array(list(set(train_idx).difference(cl_noise_idx)))\n",
    "        # Set-up fast-text classifer\n",
    "        ftc = FastTextClassifier(\n",
    "            train_data_fn=train_fn,\n",
    "            test_data_fn=test_fn,\n",
    "            batch_size=100000, \n",
    "            labels=[1, 3, 5],\n",
    "            kwargs_train_supervised = {\n",
    "                'epoch': epochs,\n",
    "                'thread': cpu_threads,\n",
    "                'lr': lr,\n",
    "                'wordNgrams': ngram,\n",
    "                'bucket': 200000,\n",
    "                'dim': dim,\n",
    "                'loss': 'softmax', #'softmax', # 'hs'\n",
    "            },\n",
    "        )\n",
    "        X = clean_idx if is_cl_method(method) else train_idx\n",
    "        X_size = len(X)\n",
    "        ftc.fit(X=X)\n",
    "#         pred, test_labels = ftc.predict(train_data=False, return_labels=True)\n",
    "#         acc = sum(pred == test_labels) / len(test_labels)\n",
    "        results.append({\n",
    "            'test_split': test_split,\n",
    "            'method': 'vanilla' if method == 'preprocessed' else method,\n",
    "            'acc': ftc.score(k=1),\n",
    "            'train_size': X_size,\n",
    "            'data_removed': TRAIN_SIZE - X_size,\n",
    "        })\n",
    "        print(results[-1])\n",
    "        sys.stdout.flush()\n",
    "print(results)\n",
    "sys.stdout.flush()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Notes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Results for entire 10 million Amazon Reviews dataset.\n",
    "# While CL does outperform vanilla training, there is too\n",
    "# much data (10 million excamples) and not enough noise\n",
    "# for it CL to matter much.\n",
    "results = [\n",
    "    {'test_split': 10,\n",
    "    'method': 'vanilla',\n",
    "    'acc': 0.9088874728277995,\n",
    "    },\n",
    "    {'test_split': 10,\n",
    "    'method': 'cj_only',\n",
    "    'acc': 0.9090925460389359,\n",
    "    },\n",
    "    {'test_split': 10,\n",
    "    'method': 'argmax',\n",
    "    'acc': 0.9082052292668482,\n",
    "    },\n",
    "    {'test_split': 10,\n",
    "    'method': 'pbc',\n",
    "    'acc': 0.9099428495973062,\n",
    "    },\n",
    "    {'test_split': 10,\n",
    "    'method': 'pbnr',\n",
    "    'acc': 0.9093226281782596,\n",
    "    }, \n",
    "    {'test_split': 10,\n",
    "    'method': 'both',\n",
    "    'acc': 0.9097417778146798,\n",
    "    },\n",
    "    {'test_split': 11,\n",
    "    'method': 'vanilla',\n",
    "    'acc': 0.9093695083558272,\n",
    "    },\n",
    "    {'test_split': 11,\n",
    "     'method': 'cj_only',\n",
    "     'acc': 0.9094795475627966,\n",
    "    },\n",
    "    {'test_split': 11,\n",
    "     'method': 'argmax',\n",
    "     'acc': 0.9084814919555838,\n",
    "    },\n",
    "    {'test_split': 11,\n",
    "     'method': 'pbc',\n",
    "     'acc': 0.9101496863332406,\n",
    "    },\n",
    "    {'test_split': 11,\n",
    "     'method': 'pbnr',\n",
    "     'acc': 0.9097271357784779,\n",
    "    },\n",
    "    {'test_split': 11,\n",
    "     'method': 'both',\n",
    "     'acc': 0.9102553239719312,\n",
    "    },\n",
    "]\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
