{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "470355ef-d489-4091-8ba3-382a20d27d53",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "# Setup Environment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a90c1a94-1016-4113-8c81-9aa15aaf6b17",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Std libraries\n",
    "import os\n",
    "import re\n",
    "import gc\n",
    "import html\n",
    "import time\n",
    "import io\n",
    "import hashlib\n",
    "import tarfile\n",
    "import json\n",
    "from collections import namedtuple\n",
    "\n",
    "# 3rd party libraries\n",
    "import joblib\n",
    "import torch\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import sklearn.metrics as skm\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import Dataset, DataLoader, TensorDataset\n",
    "from tqdm.auto import tqdm\n",
    "from sklearn.manifold import TSNE\n",
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "import matplotlib.pyplot as plt\n",
    "from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModel, DataCollatorWithPadding\n",
    "from torch.amp import GradScaler, autocast\n",
    "import torch.optim as optim\n",
    "from torch.optim.lr_scheduler import LinearLR\n",
    "import matplotlib.gridspec as gridspec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bad7e3df-33f3-4588-adff-e5ab3db12370",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define dataframe columns from source\n",
    "CAMEL_NOTE_ID = \"noteId\"\n",
    "RATER_PARTICIPANT_ID = \"raterParticipantId\"\n",
    "HELPFULNESS_LEVEL_KEY = \"helpfulnessLevel\"\n",
    "HELPFUL_NUM_KEY = \"helpfulNum\"\n",
    "CORE_NOTE_INTERCEPT = \"coreNoteIntercept\"\n",
    "EXPANSION_NOTE_INTERCEPT = \"expansionNoteIntercept\"\n",
    "EXPANSION_PLUS_NOTE_INTERCEPT = \"expansionPlusNoteIntercept\"\n",
    "CORE_NOTE_FACTOR = \"coreNoteFactor1\"\n",
    "EXPANSION_NOTE_FACTOR = \"expansionNoteFactor1\"\n",
    "EXPANSION_PLUS_NOTE_FACTOR = \"expansionPlusNoteFactor1\"\n",
    "EXPANSION_RATER_INTERCEPT = \"expansionRaterIntercept\"\n",
    "EXPANSION_RATER_FACTOR = \"expansionRaterFactor1\"\n",
    "CURRENT_LABEL = \"currentStatus\"\n",
    "AUTHOR_CLASSIFICATION = \"author_classification\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4a3fe281-f594-4993-9b08-145b3cde9814",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define dataframe columns for H and NH tags\n",
    "H_TAGS = [\n",
    "  \"helpfulOther\",\n",
    "  \"helpfulClear\",\n",
    "  \"helpfulGoodSources\",\n",
    "  \"helpfulAddressesClaim\",\n",
    "  \"helpfulImportantContext\",\n",
    "  \"helpfulUnbiasedLanguage\"\n",
    "]\n",
    "NH_TAGS = [\n",
    "  \"notHelpfulOther\",\n",
    "  \"notHelpfulIncorrect\",\n",
    "  \"notHelpfulSourcesMissingOrUnreliable\",\n",
    "  \"notHelpfulMissingKeyPoints\",\n",
    "  \"notHelpfulHardToUnderstand\",\n",
    "  \"notHelpfulArgumentativeOrBiased\",\n",
    "  \"notHelpfulSpamHarassmentOrAbuse\",\n",
    "  \"notHelpfulIrrelevantSources\",\n",
    "  \"notHelpfulOpinionSpeculation\",\n",
    "  \"notHelpfulNoteNotNeeded\"\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "34e2e5eb-ee4c-4375-b3fe-ad4b9c10814f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define dataframe values from source\n",
    "HELPFUL_VALUE_TSV = \"HELPFUL\"\n",
    "NOT_HELPFUL_VALUE_TSV = \"NOT_HELPFUL\"\n",
    "SOMEWHAT_HELPFUL_VALUE_TSV = \"SOMEWHAT_HELPFUL\"\n",
    "MISINFORMED_OR_POTENTIALLY_MISLEADING = \"MISINFORMED_OR_POTENTIALLY_MISLEADING\"\n",
    "CURRENTLY_RATED_HELPFUL = \"CURRENTLY_RATED_HELPFUL\"\n",
    "NEEDS_MORE_RATINGS = \"NEEDS_MORE_RATINGS\"\n",
    "CURRENTLY_RATED_NOT_HELPFUL = \"CURRENTLY_RATED_NOT_HELPFUL\"\n",
    "NOT_MISLEADING = \"NOT_MISLEADING\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0dbceab6-bd29-429b-8695-a7d6e8d3c26f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define dataframe notebook columns related to text and language\n",
    "NOTE_ID = \"note_id\"\n",
    "TWEET_ID = \"tweet_id\"\n",
    "NOTE_TEXT = \"note_text\"\n",
    "TWEET_TEXT = \"tweet_text\"\n",
    "TWEET_SHORTEN_URLS = \"tweet_shorten_urls\"\n",
    "TWEET_EXPANDED_URLS = \"tweet_expanded_urls\"\n",
    "NOTE_TEXT_UNESCAPED = \"note_text_unescaped\"\n",
    "TWEET_TEXT_UNESCAPED = \"tweet_text_unescaped\"\n",
    "NOTE_TEXT_FINAL = \"note_text_final\"\n",
    "TWEET_TEXT_FINAL = \"tweet_text_final\"\n",
    "NOTE_TEXT_NO_URLS = \"note_text_no_urls\"\n",
    "TWEET_TEXT_NO_URLS = \"tweet_text_no_urls\"\n",
    "NOTE_LANG = \"note_lang\"\n",
    "TWEET_LANG = \"tweet_lang\"\n",
    "NOTE_LANG_CONFIDENCE = \"note_lang_confidence\"\n",
    "TWEET_LANG_CONFIDENCE = \"tweet_lang_confidence\"\n",
    "NOTE_LANG_INFERRED = \"note_lang_inferred\"\n",
    "TWEET_LANG_INFERRED = \"tweet_lang_inferred\"\n",
    "CLUSTER_ID = \"cluster_id\"\n",
    "CLUSTER_TEXT = \"cluster_text\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "639af955-b82f-4f9a-9fc0-490578bae360",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define dataframe notebook columns related to labeling\n",
    "RATING_WEIGHT = \"rating_weight\"\n",
    "TOTAL_SIGNAL_WEIGHT = \"total_signal_weight\"\n",
    "TOTAL_RATINGS = \"total_ratings\"\n",
    "PREDICTED_HELPFULNESS = \"predicted_helpfulness\"\n",
    "RELEVANCE = \"relevance\"\n",
    "CLASSIFICATION = \"classification\"\n",
    "INTERCEPT = \"intercept\"\n",
    "FACTOR = \"factor\"\n",
    "CRH = \"crh\"\n",
    "NMR = \"nmr\"\n",
    "CRNH = \"crnh\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0125ef60-0200-4518-a886-169436e6694d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Additional constants\n",
    "CUDA = \"cuda\"\n",
    "CPU = \"cpu\"\n",
    "ROOT = os.path.expanduser(\"~/workspace\")\n",
    "HF_ROOT = os.path.join(ROOT, \"huggingface\")\n",
    "MODEL_ROOT = os.path.join(HF_ROOT, \"models\")\n",
    "MODEL_DIR = \"model\"\n",
    "TOKENIZER_DIR = \"tokenizer\"\n",
    "LANGUAGE_DETECTION_MODEL = \"xlm-roberta-base-language-detection\"\n",
    "DISTILROBERTA_BASE_MODEL = \"distilroberta-base\"\n",
    "ALL_MINILM_L6_V2 = \"all-MiniLM-L6-v2\"\n",
    "ALL_MPNET_BASE_V2 = \"all-mpnet-base-v2\"\n",
    "DATA_ROOT = os.path.join(ROOT, \"datasets/helpfulness\")\n",
    "EXPANSION_GLOBAL_BIAS = 0.17178\n",
    "SEED = 42"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fe5a9be5-a415-4e22-a961-69b9e548ed50",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Configure environment variables\n",
    "os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4bb5cf4b-41c6-4836-abb2-d61dfcdad893",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper for monitoring GPU memory usage\n",
    "def get_gpu_stats():\n",
    "  tmp = !nvidia-smi --query-gpu=index,name,memory.used,memory.total,utilization.gpu --format=csv\n",
    "  return pd.DataFrame([row.split(\",\") for row in tmp[1:]], columns=tmp[0].split(\",\"))\n",
    "\n",
    "get_gpu_stats()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cc67a414-4ef1-4031-8f79-b548aadb2a63",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "# Load and Prepare Dataset"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d211de21-1b8e-4841-aacd-bb7327f1c179",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Load and Prune Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4ac79aba-3d87-4d29-b1d7-18eedd81b56d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load scoring inputs\n",
    "ratings = pd.read_parquet(os.path.join(DATA_ROOT, \"ratings.parquet\"))\n",
    "notes = pd.read_parquet(os.path.join(DATA_ROOT, \"notes.parquet\"))\n",
    "nsh = pd.read_parquet(os.path.join(DATA_ROOT, \"note_status_history.parquet\"))                                   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "931dfb4c-054b-460e-8730-f9f0602b0ed1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load scoring outputs\n",
    "scoredRaters = pd.read_parquet(os.path.join(DATA_ROOT, \"scored_raters.parquet\"))\n",
    "scoredNotes = pd.read_parquet(os.path.join(DATA_ROOT, \"scored_notes.parquet\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3460a34c-3133-457f-b0f9-464285455c6e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load posts\n",
    "posts = pd.read_parquet(os.path.join(DATA_ROOT, \"posts.parquet\"))\n",
    "print(len(posts))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0d28d69c-84ee-4ed6-8475-7819b89c2558",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load clusters\n",
    "clusters = pd.read_parquet(os.path.join(DATA_ROOT, \"clusters_1753217043.parquet\"))\n",
    "with open(os.path.join(DATA_ROOT, \"top_words.json\"), \"r\") as handle:\n",
    "  topWords = json.load(handle)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf302ab5-c911-435d-b225-04b49d3db47a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Standardize types\n",
    "ratings[RATER_PARTICIPANT_ID] = ratings[RATER_PARTICIPANT_ID].astype(np.int64)\n",
    "scoredRaters[RATER_PARTICIPANT_ID] = scoredRaters[RATER_PARTICIPANT_ID].astype(np.int64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f5daa387-142c-4634-911b-a37d7a2a075c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Standardize note_id and classification column names\n",
    "ratings = ratings.rename(columns={CAMEL_NOTE_ID: NOTE_ID})\n",
    "notes = notes.rename(columns={CAMEL_NOTE_ID: NOTE_ID, \"classification\": AUTHOR_CLASSIFICATION})\n",
    "nsh = nsh.rename(columns={CAMEL_NOTE_ID: NOTE_ID})\n",
    "scoredNotes = scoredNotes.rename(columns={CAMEL_NOTE_ID: NOTE_ID})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "249d5337-466c-4b01-b099-f2d57f89e436",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Prune columns\n",
    "ratings = ratings[[NOTE_ID, RATER_PARTICIPANT_ID, HELPFULNESS_LEVEL_KEY] + H_TAGS + NH_TAGS]\n",
    "posts = posts[[NOTE_ID, TWEET_ID, NOTE_TEXT, TWEET_TEXT, TWEET_SHORTEN_URLS, TWEET_EXPANDED_URLS]]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eb0ddf21-80aa-435f-8c80-a66808d051c2",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Compute Weighted Tag Ratios"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0a4e2bb7-8dcb-4322-8035-15f95b23ffa3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Augment ratings with standardized helpfulness level and scoring results for notes and raters\n",
    "def add_level_and_scoring_results(ratings, scoredRaters, scoredNotes):\n",
    "  # Select columns and set helpfulNum\n",
    "  print(f\"Original ratings: {len(ratings)}\")\n",
    "  ratings[HELPFUL_NUM_KEY] = np.nan\n",
    "  ratings.loc[ratings[HELPFULNESS_LEVEL_KEY] == HELPFUL_VALUE_TSV, HELPFUL_NUM_KEY] = 1.0\n",
    "  ratings.loc[ratings[HELPFULNESS_LEVEL_KEY] == SOMEWHAT_HELPFUL_VALUE_TSV, HELPFUL_NUM_KEY] = 0.5\n",
    "  ratings.loc[ratings[HELPFULNESS_LEVEL_KEY] == NOT_HELPFUL_VALUE_TSV, HELPFUL_NUM_KEY] = 0.0\n",
    "  ratings = ratings[ratings[HELPFUL_NUM_KEY].notna()].drop(columns=HELPFULNESS_LEVEL_KEY)\n",
    "  print(f\"Ratings with helpfulNum: {len(ratings)}\")\n",
    "  # Augment with scoring results\n",
    "  ratings = ratings.merge(scoredRaters[[RATER_PARTICIPANT_ID, EXPANSION_RATER_FACTOR, EXPANSION_RATER_INTERCEPT]].dropna())\n",
    "  ratings = ratings.merge(scoredNotes[[NOTE_ID, EXPANSION_NOTE_FACTOR, EXPANSION_NOTE_INTERCEPT]].dropna())\n",
    "  print(f\"Ratings with scoring results: {len(ratings)}\")\n",
    "  assert ratings.isna().sum().sum() == 0\n",
    "  return ratings\n",
    "\n",
    "ratings = add_level_and_scoring_results(ratings.copy(), scoredRaters, scoredNotes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6d489582-86de-4bfe-a7dc-c411199a54f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate a prediction of how we expect a rater to rate a note based on the learned viewpoint\n",
    "# representation for the note and rater, as well as bias terms.\n",
    "# Notice that we add the mean of the note intercept to shift predictions appropriately without\n",
    "# actually incorporating quality signal specific to the note.\n",
    "def add_prediction(ratings):\n",
    "  ratings[PREDICTED_HELPFULNESS] = (\n",
    "    ratings[EXPANSION_RATER_FACTOR] * ratings[EXPANSION_NOTE_FACTOR]\n",
    "    + ratings[EXPANSION_RATER_INTERCEPT]\n",
    "    + ratings[EXPANSION_NOTE_INTERCEPT].mean()\n",
    "    + EXPANSION_GLOBAL_BIAS\n",
    "  )\n",
    "  return ratings\n",
    "\n",
    "ratings = add_prediction(ratings)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1a57e98d-b859-49ec-baf7-a25ad0946044",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Profile learned representation\n",
    "def profile_params(scoredRaters, scoredNotes):\n",
    "  fig, ax = plt.subplots(1, 4)\n",
    "  fig.set_figwidth(30)\n",
    "  fig.set_figheight(5)\n",
    "  scoredRaters[EXPANSION_RATER_FACTOR].plot.hist(bins=50, ax=ax[0], title=\"Rater Factors\")\n",
    "  scoredRaters[EXPANSION_RATER_INTERCEPT].plot.hist(bins=50, ax=ax[1], title=\"Rater Intercepts\")\n",
    "  scoredNotes[EXPANSION_NOTE_FACTOR].plot.hist(bins=50, ax=ax[2], title=\"Note Factors\")\n",
    "  scoredNotes[EXPANSION_NOTE_INTERCEPT].plot.hist(bins=50, ax=ax[3], title=\"Note Intercepts\")\n",
    "\n",
    "profile_params(scoredRaters, scoredNotes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d15c5529-c93a-4b02-8e28-950f5b4a58ee",
   "metadata": {},
   "outputs": [],
   "source": [
    "# View distribution of predictions\n",
    "def plot_predictions(predRatings):\n",
    "  fig, ax = plt.subplots(1, 4, sharex=True)\n",
    "  fig.set_figwidth(30)\n",
    "  fig.set_figheight(5)\n",
    "  predRatings[PREDICTED_HELPFULNESS].plot.hist(bins=50, ax=ax[0])\n",
    "  ax[0].set_title(\"All\")\n",
    "  predRatings[predRatings[HELPFUL_NUM_KEY] == 1.0][PREDICTED_HELPFULNESS].plot.hist(bins=50, ax=ax[1])\n",
    "  ax[1].set_title(\"Helpful\")\n",
    "  predRatings[predRatings[HELPFUL_NUM_KEY] == 0.5][PREDICTED_HELPFULNESS].plot.hist(bins=50, ax=ax[2])\n",
    "  ax[2].set_title(\"Somewhat Helpful\")\n",
    "  predRatings[predRatings[HELPFUL_NUM_KEY] == 0.0][PREDICTED_HELPFULNESS].plot.hist(bins=50, ax=ax[3])\n",
    "  ax[3].set_title(\"Not Helpful\")\n",
    "\n",
    "plot_predictions(ratings)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d7b35867-a005-47a8-871c-4261e4aea213",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot predictions passed through sigmoid for weighting\n",
    "def plot_pred_sigmoid(predRatings):\n",
    "  fig, ax = plt.subplots(1, 4, sharex=True)\n",
    "  fig.set_figwidth(30)\n",
    "  fig.set_figheight(5)\n",
    "  std = predRatings[PREDICTED_HELPFULNESS].std()\n",
    "  for i, multiplier in enumerate([1, 1.5, 2, 3]):\n",
    "    factor = multiplier / std\n",
    "    # Center the predicted helpfulness around .5 since the MF treats Helpful as 1 and Not Helpful as 0.\n",
    "    # Scale by the std deviation and a multiplicative factor to determine how strongly to weight ratings.\n",
    "    # Apply sigmoid.\n",
    "    ((1 + np.exp(-1 * factor * (predRatings[PREDICTED_HELPFULNESS] - .5))) ** -1).plot.hist(bins=50, title=f\"factor={multiplier}\", ax=ax[i])\n",
    "\n",
    "plot_pred_sigmoid(ratings)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "496db54e-8646-4be6-b4b6-3f0f3e024763",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Determine rating weights with multiplier=1\n",
    "def get_weighted_tag_ratios(ratings, multiplier):\n",
    "  ratings = ratings.copy()\n",
    "  factor = multiplier / ratings[PREDICTED_HELPFULNESS].std()\n",
    "  ratings[RATING_WEIGHT] = ((1 + np.exp(-1 * factor * (ratings[PREDICTED_HELPFULNESS] - .5))) ** -1)\n",
    "  for col in NH_TAGS:\n",
    "    ratings[col] = ratings[col] * ratings[RATING_WEIGHT]\n",
    "  for col in H_TAGS:\n",
    "    ratings[col] = ratings[col] * (1 - ratings[RATING_WEIGHT])\n",
    "  scores = ratings[[NOTE_ID, RATING_WEIGHT] + NH_TAGS + H_TAGS].groupby(NOTE_ID).sum().reset_index(drop=False).rename(\n",
    "    columns={RATING_WEIGHT: TOTAL_SIGNAL_WEIGHT})\n",
    "  for col in (NH_TAGS + H_TAGS):\n",
    "    scores[f\"{col}_ratio\"] = scores[col] / scores[TOTAL_SIGNAL_WEIGHT]\n",
    "  print(f\"Total notes: {len(scores)}\")\n",
    "  return scores\n",
    "\n",
    "weightedTagRatios = get_weighted_tag_ratios(ratings, 1)\n",
    "weightedTagRatios"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "36662356-59be-49d3-b690-e0d369f9bad9",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Assemble Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "34df4dc1-0088-470f-80ed-e9fc21af3e01",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Coalesce note factors\n",
    "def get_note_factor(scoredNotes):\n",
    "\n",
    "  def _get_factor(core, expansion, expansionPlus):\n",
    "    if not pd.isna(core):\n",
    "      return core\n",
    "    if not pd.isna(expansion):\n",
    "      return expansion\n",
    "    if not pd.isna(expansionPlus):\n",
    "      return expansionPlus\n",
    "    return np.nan\n",
    "  scoredNotes = scoredNotes[[NOTE_ID, CORE_NOTE_FACTOR, EXPANSION_NOTE_FACTOR, EXPANSION_PLUS_NOTE_FACTOR]].copy()\n",
    "  scoredNotes[FACTOR] = [_get_factor(core, expansion, expansionPlus) for (core, expansion, expansionPlus) in (\n",
    "    scoredNotes[[CORE_NOTE_FACTOR, EXPANSION_NOTE_FACTOR, EXPANSION_PLUS_NOTE_FACTOR]].values\n",
    "  )]\n",
    "  return scoredNotes[[NOTE_ID, FACTOR]].rename(columns={NOTE_ID: NOTE_ID})\n",
    "\n",
    "noteFactors = get_note_factor(scoredNotes)\n",
    "noteFactors.merge(scoredNotes[[NOTE_ID, CORE_NOTE_FACTOR, EXPANSION_NOTE_FACTOR, EXPANSION_PLUS_NOTE_FACTOR]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ae575ba1-7a25-412b-bf01-783b6a436837",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Coalesce note intercepts\n",
    "def get_note_intercept(scoredNotes):\n",
    "\n",
    "  def _get_intercept(core, expansion, expansionPlus):\n",
    "    if not pd.isna(core):\n",
    "      return core\n",
    "    if not pd.isna(expansion):\n",
    "      return expansion\n",
    "    if not pd.isna(expansionPlus):\n",
    "      return expansionPlus\n",
    "    return np.nan\n",
    "  scoredNotes = scoredNotes[[NOTE_ID, CORE_NOTE_INTERCEPT, EXPANSION_NOTE_INTERCEPT, EXPANSION_PLUS_NOTE_INTERCEPT]].copy()\n",
    "  scoredNotes[INTERCEPT] = [_get_intercept(core, expansion, expansionPlus) for (core, expansion, expansionPlus) in (\n",
    "    scoredNotes[[CORE_NOTE_INTERCEPT, EXPANSION_NOTE_INTERCEPT, EXPANSION_PLUS_NOTE_INTERCEPT]].values\n",
    "  )]\n",
    "  return scoredNotes[[NOTE_ID, INTERCEPT]].rename(columns={NOTE_ID: NOTE_ID})\n",
    "\n",
    "noteIntercepts = get_note_intercept(scoredNotes)\n",
    "noteIntercepts.merge(scoredNotes[[NOTE_ID, CORE_NOTE_INTERCEPT, EXPANSION_NOTE_INTERCEPT, EXPANSION_PLUS_NOTE_INTERCEPT]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "309f9db9-8677-4b6b-8391-677419bc019c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Combine all signals for a final export dataset\n",
    "def prepare_dataset(posts, noteFactors, noteIntercepts, nsh, notes, ratings, weightedTagRatios):\n",
    "  # Extract classifications and final note status\n",
    "  classifications = notes[[NOTE_ID, AUTHOR_CLASSIFICATION]]\n",
    "  finalStatus = nsh[[NOTE_ID, CURRENT_LABEL]]\n",
    "  ratingCounts = ratings[NOTE_ID].value_counts().to_frame().reset_index(drop=False).rename(columns={\"count\": TOTAL_RATINGS})\n",
    "  # Compose and return dataset\n",
    "  print(len(posts))\n",
    "  dataset = posts.merge(\n",
    "    classifications, on=NOTE_ID).merge(\n",
    "    noteIntercepts, on=NOTE_ID).merge(\n",
    "    noteFactors, on=NOTE_ID).merge(\n",
    "    finalStatus, on=NOTE_ID).merge(\n",
    "    ratingCounts, on=NOTE_ID).merge(\n",
    "    weightedTagRatios, on=NOTE_ID)\n",
    "  print(len(dataset))\n",
    "  return dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4581ab3b-63a7-46ec-bdf9-56c4fec54273",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(len(posts))\n",
    "print(len(nsh))\n",
    "print(len(notes))\n",
    "print()\n",
    "dataset = prepare_dataset(posts, noteFactors, noteIntercepts, nsh, notes, ratings, weightedTagRatios)\n",
    "print(len(dataset))\n",
    "dataset.dtypes"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cc2abbc0-c4d6-4638-9d79-f9285f0eb095",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "# Prepare Text"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6c9a80ca-2193-4113-8a00-03cc1fd9a970",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Inspect Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7bf42570-557f-4df4-91bf-db49402b8a4f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Validate that text is always present\n",
    "dataset[[NOTE_TEXT, TWEET_TEXT]].isna().sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0b18335e-2444-453f-8944-3a07c3ff2409",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Validate that text is always present\n",
    "(dataset[[NOTE_TEXT, NOTE_TEXT]] == \"\").sum()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4da0b188-93ac-4229-b4ca-d416419fc04f",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Unescape Text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a1f97571-53c9-42f2-9583-265d6e345673",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define unescape helper\n",
    "def unescape(text):\n",
    "  return html.unescape(html.unescape(text)) if isinstance(text, str) else text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8253622a-eb93-4fa4-b00a-dba527bcfdc3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Unescape notes and tweets\n",
    "dataset[NOTE_TEXT_UNESCAPED] = [unescape(text) for text in dataset[NOTE_TEXT]]\n",
    "dataset[TWEET_TEXT_UNESCAPED] = [unescape(text) for text in dataset[TWEET_TEXT]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fa838103-2993-42e3-9899-1c5fa26fcd60",
   "metadata": {},
   "outputs": [],
   "source": [
    "# show text sample\n",
    "for tmp in dataset[NOTE_TEXT_UNESCAPED].sample(10, random_state=SEED):\n",
    "  print(tmp)\n",
    "  print(\"------------------\"*3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "512c93fb-a81f-4d1c-8671-6552ad73e55c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# show text sample\n",
    "for tmp in dataset[TWEET_TEXT_UNESCAPED].sample(10, random_state=SEED):\n",
    "  print(tmp)\n",
    "  print(\"------------------\"*3)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "380f069e-fbe5-4a9a-ac66-56007cb882a5",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Prepare URLs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "23219942-3822-4d13-9193-a58e49c2e426",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to replace URLs with full text versions\n",
    "def replace_urls(text, shortUrls, fullUrls, maxLength=150):\n",
    "  if shortUrls is not None:\n",
    "    # Validate mapping and replace known links\n",
    "    assert len(shortUrls) == len(fullUrls)\n",
    "    for short, full in zip(shortUrls, fullUrls):\n",
    "      text = text.replace(short, full[:maxLength])\n",
    "  # Remove any remaining shortlinks\n",
    "  return re.sub(\"https://t.co/\\S+\", \"\", text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fb2f5474-6c4f-4dd8-a2ce-972edf98058a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate new dataset column with patched text\n",
    "dataset[TWEET_TEXT_FINAL] = [\n",
    "  replace_urls(text, shortUrls, fullUrls)\n",
    "  for (text, shortUrls, fullUrls)\n",
    "  in dataset[[TWEET_TEXT_UNESCAPED, TWEET_SHORTEN_URLS, TWEET_EXPANDED_URLS]].values\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c6f5c787-ca06-4348-8955-b05e41930257",
   "metadata": {},
   "outputs": [],
   "source": [
    "# View a sample of patched values\n",
    "for tmp in dataset[[NOTE_ID, TWEET_TEXT_FINAL]].sample(10).values:\n",
    "  print(tmp)\n",
    "  print(\"--------\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a0ce4354-8b0f-45b9-ba61-85f6db5cfe9f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to truncated URLs in note text\n",
    "def truncate_urls(noteText, maxLength=150):\n",
    "  assert maxLength >= 0\n",
    "  urlPattern = r'https?://[^\\s<>\"]+|www\\.[^\\s<>\"]+'\n",
    "  def truncate_match(match):\n",
    "    url = match.group(0)\n",
    "    return url[:maxLength] if len(url) > maxLength else url\n",
    "  return re.sub(urlPattern, truncate_match, noteText)\n",
    "\n",
    "print(truncate_urls(\"This note has no url\"))\n",
    "print(truncate_urls(\"This note has 1 url http://www.foobar.com/test/path and then more text\", maxLength=15))\n",
    "print(truncate_urls(\"This note has 1 url http://foobar.com/test/path and then more text\", maxLength=15))\n",
    "print(truncate_urls(\"This note has 1 url https://www.foobar.com/test/path and then more text\", maxLength=15))\n",
    "print(truncate_urls(\"This note has 1 url https://foobar.com/test/path and then more text\", maxLength=15))\n",
    "print(truncate_urls(\"This note has 2 url https://foobar.com/test/path and https://foobarbaz.com/test/path then more text\", maxLength=15))\n",
    "print(truncate_urls(\"This note has 2 url https://foobar.com/test/path and https://foobarbaz.com/test/path then more text\", maxLength=250))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a5bd2b32-c0cc-4714-8d16-dc08e2971c9e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Apply truncation to note text\n",
    "dataset[NOTE_TEXT_FINAL] = [truncate_urls(text) for text in dataset[NOTE_TEXT_UNESCAPED]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4754b1c1-04b1-471f-a94f-a6a1ec120ce0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# View a sample of patched values\n",
    "for tmp in dataset[NOTE_TEXT_FINAL].sample(10):\n",
    "  print(tmp)\n",
    "  print(\"--------\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3efa3305-dc58-4740-a576-657c4b7bb271",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Inspect Final Text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1e1bad7b-aa64-4b77-9f29-e003dab14b89",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Validate that text is always present\n",
    "dataset[[NOTE_TEXT_FINAL, TWEET_TEXT_FINAL]].isna().sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4b25c616-be20-4a8e-8072-6833ef7533e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Validate that text is always present\n",
    "(dataset[[NOTE_TEXT_FINAL, TWEET_TEXT_FINAL]] == \"\").sum()  # non-zero expected because some tweets only contain a media short link"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f348a588-d2c7-4ad9-b9a5-137c19908cf5",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Generate No-URL Variants"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a942b3cc-509f-4521-9245-1a81083728cc",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to generate no-URL variants for TF-IDF characterization of clusters\n",
    "def remove_urls(text):\n",
    "  urlPattern = r'https?://[^\\s<>\"]+|www\\.[^\\s<>\"]+'\n",
    "  return re.sub(urlPattern, \"\", text)\n",
    "\n",
    "print(remove_urls(\"This note has no url\"))\n",
    "print(remove_urls(\"This note has 1 url http://www.foobar.com/test/path and then more text\"))\n",
    "print(remove_urls(\"This note has 1 url http://foobar.com/test/path and then more text\"))\n",
    "print(remove_urls(\"This note has 1 url https://www.foobar.com/test/path and then more text\"))\n",
    "print(remove_urls(\"This note has 1 url https://foobar.com/test/path and then more text\"))\n",
    "print(remove_urls(\"This note has 2 url https://foobar.com/test/path and https://foobarbaz.com/test/path then more text\"))\n",
    "print(remove_urls(\"This note has 2 url https://foobar.com/test/path and https://foobarbaz.com/test/path then more text\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c2cb6b6b-51a8-42f4-b567-b7d4f62440e0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate no-URL variants\n",
    "dataset[NOTE_TEXT_NO_URLS] = [remove_urls(text) for text in dataset[NOTE_TEXT_FINAL]]\n",
    "dataset[TWEET_TEXT_NO_URLS] = [remove_urls(text) for text in dataset[TWEET_TEXT_FINAL]]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7f3d9020-75e3-4160-aa15-09d36d525474",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "# Detect Language"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "74c0c383-80d8-4efa-8e91-538c4bbcbe29",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Load Models and Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "70554504-dc1e-45ea-8a70-dc6b75b3a4f0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# load model and tokenizer\n",
    "langDetectionModel = nn.DataParallel(AutoModelForSequenceClassification.from_pretrained(os.path.join(MODEL_ROOT, LANGUAGE_DETECTION_MODEL, MODEL_DIR)).to(CUDA))\n",
    "langDetectionTokenizer = AutoTokenizer.from_pretrained(os.path.join(MODEL_ROOT, LANGUAGE_DETECTION_MODEL, TOKENIZER_DIR))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b9ed3b65-611c-49ab-82c5-4daf181e8b61",
   "metadata": {},
   "outputs": [],
   "source": [
    "get_gpu_stats()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a924ec47-61b1-4762-8ad2-16a94f9cdd45",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Apply Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45a99ea5-bfe7-48e5-b582-0f4c11b29fb2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# View model size\n",
    "print(f\"{sum(tmp.numel() for tmp in langDetectionModel.parameters())//(2**20)}M\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8d24f9ef-fd40-429a-bec3-cf091cfa39e5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to classify a chunk\n",
    "def classify_chunk(texts, model, tokenizer):\n",
    "  inputs = tokenizer(texts, padding=True, truncation=True, return_tensors=\"pt\")\n",
    "  with torch.no_grad():\n",
    "    logits = model(**inputs).logits\n",
    "  preds = torch.softmax(logits, dim=-1)\n",
    "  vals, idxs = torch.max(preds, dim=1)\n",
    "  # Map raw predictions to languages\n",
    "  id2lang = model.module.config.id2label\n",
    "  return [(id2lang[k.item()], v.item()) for k, v in zip(idxs, vals)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7898be17-53c0-4225-b36e-b02e2fa23422",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to classify larger lists\n",
    "def classify_texts(texts, model, tokenizer, batchSize=1024):\n",
    "  start = 0\n",
    "  numBatches = int(np.ceil(len(texts) / batchSize))\n",
    "  results = []\n",
    "  progressBar = tqdm(range(numBatches))\n",
    "  while start < len(texts):\n",
    "    end = start + batchSize\n",
    "    results.extend(classify_chunk(texts[start:end], model, tokenizer))\n",
    "    progressBar.update(1)\n",
    "    start = end\n",
    "  return list(zip(*results))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d422c0b4-106e-4a79-9399-84e3275695aa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Compute note languages\n",
    "noteLangs, noteConfidence = classify_texts(list(dataset[NOTE_TEXT_FINAL]), langDetectionModel, langDetectionTokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "15b2eb19-511b-4430-b089-8d2cf136f03e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Compute tweet languages\n",
    "tweetLangs, tweetConfidence = classify_texts(list(dataset[TWEET_TEXT_FINAL]), langDetectionModel, langDetectionTokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "46ac432b-1821-4a95-8656-a0856d81ba85",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Augment dataset\n",
    "dataset[NOTE_LANG] = noteLangs\n",
    "dataset[NOTE_LANG_CONFIDENCE] = noteConfidence\n",
    "dataset[TWEET_LANG] = tweetLangs\n",
    "dataset[TWEET_LANG_CONFIDENCE] = tweetConfidence"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "26751086-0c0e-4569-ab95-cdd4e4ddf48e",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Inspect Results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d20f4995-cfac-44df-8a85-b83438fa8a72",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Profile note lang confidence\n",
    "dataset[NOTE_LANG_CONFIDENCE].plot.hist(bins=50, logy=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "415bab34-0edf-4c89-8931-bea7dd880bb0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Profile tweet lang confidence\n",
    "dataset[TWEET_LANG_CONFIDENCE].plot.hist(bins=50, logy=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8d1ae15a-21ca-428e-b6c8-d0d3fde4475e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Profile note lang\n",
    "dataset[NOTE_LANG].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f94542a0-6e54-485c-bc5e-27e22af60d69",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Profile tweet lang\n",
    "dataset[TWEET_LANG].value_counts()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "df3d413c-7e07-4448-9f05-b28a286e9758",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Set Inferred Languages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9fd289b2-4a50-49ea-afa3-14b60bb00e39",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helpers to infer note langauge and tweet language\n",
    "def infer_note_lang(noteLang, noteConfidence, tweetLang, tweetConfidence):\n",
    "  if noteConfidence > .5:\n",
    "    return noteLang\n",
    "  elif tweetConfidence > .5:\n",
    "    return tweetLang\n",
    "  else:\n",
    "    return pd.NA\n",
    "\n",
    "def infer_tweet_lang(noteLang, noteConfidence, tweetLang, tweetConfidence):\n",
    "  if tweetConfidence > .5:\n",
    "    return tweetLang\n",
    "  elif noteConfidence > .5:\n",
    "    return noteLang\n",
    "  else:\n",
    "    return pd.NA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cf8c5c46-1cbe-4f8c-89c5-173b900337e8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Apply inference\n",
    "dataset[NOTE_LANG_INFERRED] = [\n",
    "  infer_note_lang(noteLang, noteConfidence, tweetLang, tweetConfidence)\n",
    "  for (noteLang, noteConfidence, tweetLang, tweetConfidence)\n",
    "  in dataset[[NOTE_LANG, NOTE_LANG_CONFIDENCE, TWEET_LANG, TWEET_LANG_CONFIDENCE]].values\n",
    "]\n",
    "dataset[TWEET_LANG_INFERRED] = [\n",
    "  infer_tweet_lang(noteLang, noteConfidence, tweetLang, tweetConfidence)\n",
    "  for (noteLang, noteConfidence, tweetLang, tweetConfidence)\n",
    "  in dataset[[NOTE_LANG, NOTE_LANG_CONFIDENCE, TWEET_LANG, TWEET_LANG_CONFIDENCE]].values\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d472826-0255-475b-8755-ad398f2af223",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Profile note lang\n",
    "dataset[NOTE_LANG_INFERRED].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c5baae07-0f50-44e0-86b8-8cc1377d5b34",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Profile note lang\n",
    "dataset[TWEET_LANG_INFERRED].value_counts()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e4ba43a1-6867-43ae-9f7d-c6d389ce1fc7",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Prune By Language"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b909de2f-c2f3-49af-83e6-6b6e9f0924ea",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Restrict to EN notes and posts\n",
    "print(len(dataset))\n",
    "enDataset = dataset[\n",
    "  (dataset[NOTE_LANG_INFERRED] == \"en\")\n",
    "  & (dataset[TWEET_LANG_INFERRED] == \"en\")\n",
    "]\n",
    "print(len(enDataset))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f020ce88-a6d1-4227-8e9c-9e09b8129aaa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Save dataset with augmented text, labeling signals and language\n",
    "enDataset.to_parquet(os.path.join(DATA_ROOT, \"augmented_en_posts_with_signals_and_langs.parquet\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e5339878-641b-4ce2-a5af-9c0a4fd1f4b6",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "# Prepare Model Training"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "745436ed-cba3-4038-a261-d28a604f77cd",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Profile Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "562a0c0d-4d43-4352-a8e4-99be3c71d4f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "tmp = enDataset[TOTAL_RATINGS].clip(0, 200).plot.hist(bins=40, cumulative=True)\n",
    "tmp.axvline(16, color=\"red\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e56676c8-5f51-40fe-90e2-6b2482e72474",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Show rating distribution\n",
    "tmp = enDataset[[TOTAL_RATINGS, CURRENT_LABEL]].copy()\n",
    "tmp[CRH] = tmp[CURRENT_LABEL] == CURRENTLY_RATED_HELPFUL\n",
    "tmp[NMR] = tmp[CURRENT_LABEL] == NEEDS_MORE_RATINGS\n",
    "tmp[CRNH] = tmp[CURRENT_LABEL] == CURRENTLY_RATED_NOT_HELPFUL\n",
    "tmp[TOTAL_RATINGS] = [min(count, 200) // 4 for count in tmp[TOTAL_RATINGS]]\n",
    "tmp = tmp[[TOTAL_RATINGS, CRH, NMR, CRNH]]\n",
    "tmp.groupby(TOTAL_RATINGS).mean().sort_values(TOTAL_RATINGS)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9bd9a2fc-d9b2-4cf2-a1ee-0e8937666fb0",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Generate Labels & Splits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "268a5c18-3dfb-4a4e-af0a-01e360e4e92f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to apply thresholds and generate multitask labels\n",
    "def make_multitask_dataset(\n",
    "  dataset,\n",
    "  # Tag thresholds\n",
    "  minTotalSignal=3,\n",
    "  minPosNotHelpfulRatio=.25,\n",
    "  minPosHelpfulRatio=.5,\n",
    "  minPosSignal=2.5,\n",
    "  maxNegRatio=0.1,\n",
    "  # Thresholds for intercept, factor and status targets\n",
    "  crnhInterceptThreshold=0,\n",
    "  crhPosInterceptThreshold=0.4,\n",
    "  crhNegInterceptThreshold=0.3,\n",
    "  minStatusRatings=15,\n",
    "):\n",
    "  # Target labels: Helpful Tags, NotHelpful Tags, Relevance, Classification, Intercept, Factor, CRH, NMR, CRNH\n",
    "  print(f\"Initial dataset length: {len(dataset)}\")\n",
    "  dataset = dataset[dataset[AUTHOR_CLASSIFICATION].notna()]\n",
    "  print(f\"Dataset with classification: {len(dataset)}\")\n",
    "  output = dataset[[NOTE_ID, TWEET_ID, NOTE_TEXT_FINAL, TWEET_TEXT_FINAL]].copy()\n",
    "  # Set label for each tag column\n",
    "  for col in H_TAGS:\n",
    "    posRows = (\n",
    "      (dataset[TOTAL_SIGNAL_WEIGHT] >= minTotalSignal)\n",
    "      & (dataset[f\"{col}_ratio\"] >= minPosHelpfulRatio)\n",
    "      & (dataset[col] >= minPosSignal)\n",
    "      & (dataset[AUTHOR_CLASSIFICATION] == MISINFORMED_OR_POTENTIALLY_MISLEADING)\n",
    "    ).astype(np.bool).values\n",
    "    negRows = (\n",
    "      (dataset[TOTAL_SIGNAL_WEIGHT] >= minTotalSignal)\n",
    "      & (dataset[f\"{col}_ratio\"] <= maxNegRatio)\n",
    "      & (dataset[AUTHOR_CLASSIFICATION] == MISINFORMED_OR_POTENTIALLY_MISLEADING)\n",
    "    ).astype(np.bool).values\n",
    "    assert (posRows & negRows).sum() == 0\n",
    "    output[col] = np.nan\n",
    "    output.loc[posRows, col] = 1.0\n",
    "    output.loc[negRows, col] = 0.0\n",
    "  for col in NH_TAGS:\n",
    "    posRows = (\n",
    "      (dataset[TOTAL_SIGNAL_WEIGHT] >= minTotalSignal)\n",
    "      & (dataset[f\"{col}_ratio\"] >= minPosNotHelpfulRatio)\n",
    "      & (dataset[col] >= minPosSignal)\n",
    "      & (dataset[AUTHOR_CLASSIFICATION] == MISINFORMED_OR_POTENTIALLY_MISLEADING)\n",
    "    ).astype(np.bool).values\n",
    "    negRows = (\n",
    "      (dataset[TOTAL_SIGNAL_WEIGHT] >= minTotalSignal)\n",
    "      & (dataset[f\"{col}_ratio\"] <= maxNegRatio)\n",
    "      & (dataset[AUTHOR_CLASSIFICATION] == MISINFORMED_OR_POTENTIALLY_MISLEADING)\n",
    "    ).astype(np.bool).values\n",
    "    assert (posRows & negRows).sum() == 0\n",
    "    output[col] = np.nan\n",
    "    output.loc[posRows, col] = 1.0\n",
    "    output.loc[negRows, col] = 0.0\n",
    "  # Prepare relevance labels\n",
    "  output[RELEVANCE] = 1.0\n",
    "  output.loc[dataset[AUTHOR_CLASSIFICATION] == NOT_MISLEADING, RELEVANCE] = np.nan\n",
    "  output.loc[dataset[INTERCEPT] < crnhInterceptThreshold, RELEVANCE] = np.nan\n",
    "  output.loc[dataset[CURRENT_LABEL] == CURRENTLY_RATED_NOT_HELPFUL, RELEVANCE] = np.nan\n",
    "  # Prepare classification labels\n",
    "  posRows = dataset[AUTHOR_CLASSIFICATION] == MISINFORMED_OR_POTENTIALLY_MISLEADING\n",
    "  negRows = dataset[AUTHOR_CLASSIFICATION] == NOT_MISLEADING\n",
    "  assert (posRows & negRows).sum() == 0\n",
    "  assert (posRows | negRows).sum() == len(dataset)\n",
    "  output[CLASSIFICATION] = np.nan\n",
    "  output.loc[posRows, CLASSIFICATION] = 1.0\n",
    "  output.loc[negRows, CLASSIFICATION] = 0.0\n",
    "  # Prepare intercept labels\n",
    "  nanRows = dataset[TOTAL_RATINGS] < minStatusRatings\n",
    "  output[INTERCEPT] = dataset[INTERCEPT]\n",
    "  output.loc[nanRows, INTERCEPT] = np.nan\n",
    "  # Prepare factor labels\n",
    "  nanRows = dataset[TOTAL_RATINGS] < minStatusRatings\n",
    "  output[FACTOR] = dataset[FACTOR]\n",
    "  output.loc[nanRows, FACTOR] = np.nan\n",
    "  # Prepare CRNH labels\n",
    "  posRows = (\n",
    "    (dataset[CURRENT_LABEL] == CURRENTLY_RATED_NOT_HELPFUL)\n",
    "    & (dataset[INTERCEPT] < crnhInterceptThreshold)\n",
    "    & (dataset[TOTAL_RATINGS] >= minStatusRatings)\n",
    "  )\n",
    "  negRows = (\n",
    "    (dataset[CURRENT_LABEL] != CURRENTLY_RATED_NOT_HELPFUL)\n",
    "    & (dataset[INTERCEPT] > crnhInterceptThreshold)\n",
    "    & (dataset[TOTAL_RATINGS] >= minStatusRatings)\n",
    "  )\n",
    "  assert (posRows & negRows).sum() == 0\n",
    "  output[CRNH] = np.nan\n",
    "  output.loc[posRows, CRNH] = 1.0\n",
    "  output.loc[negRows, CRNH] = 0.0\n",
    "  # Prepare CRH labels\n",
    "  posRows = (\n",
    "    (dataset[CURRENT_LABEL] == CURRENTLY_RATED_HELPFUL)\n",
    "    & (dataset[INTERCEPT] >= crhPosInterceptThreshold)\n",
    "    & (dataset[TOTAL_RATINGS] >= minStatusRatings)\n",
    "  )\n",
    "  negRows = (\n",
    "    (dataset[CURRENT_LABEL] != CURRENTLY_RATED_HELPFUL)\n",
    "    & (dataset[INTERCEPT] < crhNegInterceptThreshold)\n",
    "    & (dataset[TOTAL_RATINGS] >= minStatusRatings)\n",
    "  )\n",
    "  assert (posRows & negRows).sum() == 0\n",
    "  output[CRH] = np.nan\n",
    "  output.loc[posRows, CRH] = 1.0\n",
    "  output.loc[negRows, CRH] = 0.0\n",
    "  # Summarize output\n",
    "  print(f\"Final dataset length: {len(output)}\")\n",
    "  return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f52382be-c191-4c9a-89ae-54e5c91b81de",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Present labeled dataset\n",
    "labeledDataset = make_multitask_dataset(enDataset)\n",
    "labeledDataset.dtypes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "17ddb911-d702-448c-b95b-74165ba9e668",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Summarize labels\n",
    "def count_values_per_column(df):\n",
    "  # Iterate through each column\n",
    "  nanCount = []\n",
    "  zeroCount = []\n",
    "  oneCount = []\n",
    "  for col in df.columns:\n",
    "    # Count values using value_counts, filling missing values with 0\n",
    "    if col not in [INTERCEPT, FACTOR]:\n",
    "      counts = df[col].value_counts(dropna=False)\n",
    "      zeroCount.append(counts.get(0, 0))\n",
    "      oneCount.append(counts.get(1, 0))\n",
    "      nanCount.append(counts.get(np.nan, 0))\n",
    "    else:\n",
    "      zeroCount.append(pd.NA)\n",
    "      oneCount.append(df[col].notna().sum())\n",
    "      nanCount.append(df[col].isna().sum())\n",
    "  # Create result DataFrame\n",
    "  result = pd.DataFrame({\n",
    "    \"columnName\": df.columns,\n",
    "    \"zeroCount\": zeroCount,\n",
    "    \"oneCount\": oneCount,\n",
    "    \"nanCount\": nanCount,\n",
    "  })  \n",
    "  return result[[\"columnName\", \"nanCount\", \"zeroCount\", \"oneCount\"]]\n",
    "\n",
    "count_values_per_column(labeledDataset.drop(columns=[NOTE_ID, TWEET_ID, NOTE_TEXT_FINAL, TWEET_TEXT_FINAL]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1c25a777-315c-4399-9984-3e9b4c3c9c2a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Split dataset for training and testing\n",
    "def split_dataset(dataset, trainFrac=.8):\n",
    "  trainTweets = dataset[TWEET_ID].drop_duplicates().sample(frac=trainFrac)\n",
    "  return (\n",
    "    dataset[dataset[TWEET_ID].isin(trainTweets)],\n",
    "    dataset[~dataset[TWEET_ID].isin(trainTweets)],\n",
    "  )\n",
    "\n",
    "trainSplit, testSplit = split_dataset(labeledDataset)\n",
    "print(len(trainSplit))\n",
    "print(len(testSplit))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c0a03e2-a32e-41fd-9640-9deb50708394",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to add synthetic relevance examples\n",
    "def add_relevance(dataset, negFactor=5, seed=42):\n",
    "  noteNegRows = pd.concat([dataset[[NOTE_ID, NOTE_TEXT_FINAL]]] * negFactor).reset_index(drop=True)\n",
    "  tweetNegRows = pd.concat([dataset[[TWEET_ID, TWEET_TEXT_FINAL]]] * negFactor).sample(frac=1., random_state=seed).reset_index(drop=True)\n",
    "  relevanceNegRows = pd.concat([noteNegRows, tweetNegRows], axis=1)\n",
    "  for col in dataset:\n",
    "    if col in [RELEVANCE, NOTE_ID, TWEET_ID, NOTE_TEXT_FINAL, TWEET_TEXT_FINAL]:\n",
    "      continue\n",
    "    relevanceNegRows[col] = np.nan\n",
    "  relevanceNegRows[RELEVANCE] = 0.0\n",
    "  return pd.concat([dataset, relevanceNegRows], axis=0).sample(frac=1., random_state=seed)\n",
    "\n",
    "trainDataset = add_relevance(trainSplit)\n",
    "print(len(trainDataset))\n",
    "testDataset = add_relevance(testSplit)\n",
    "print(len(testDataset))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9d8aeafd-1ceb-4f4d-9798-1e3521c89375",
   "metadata": {},
   "outputs": [],
   "source": [
    "count_values_per_column(trainDataset.drop(columns=[NOTE_ID, TWEET_ID, NOTE_TEXT_FINAL, TWEET_TEXT_FINAL]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "876678ba-82a3-4e85-a2d9-d028717068e7",
   "metadata": {},
   "outputs": [],
   "source": [
    "count_values_per_column(testDataset.drop(columns=[NOTE_ID, TWEET_ID, NOTE_TEXT_FINAL, TWEET_TEXT_FINAL]))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "32e1bd9d-e4ec-4a9b-a53e-1512e135581e",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "# All Signal Model"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "476b4b49-772e-4b03-99c6-79f4e79da151",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Create Tensors"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "27d8765a-0f3c-48eb-85cd-47ccb6b82341",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Profile the amount of signal for each label\n",
    "(trainDataset == 1).sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "808beec4-9c44-4c03-b1d1-d29513131968",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define which labels to include\n",
    "allSignalLabels = [\n",
    "  \"intercept\",\n",
    "  \"factor\",\n",
    "  \"crh\",\n",
    "  \"crnh\",\n",
    "  \"classification\",\n",
    "  \"relevance\",\n",
    "  \"notHelpfulNoteNotNeeded\",\n",
    "  \"notHelpfulOpinionSpeculation\",\n",
    "  \"notHelpfulIrrelevantSources\",\n",
    "  \"notHelpfulSpamHarassmentOrAbuse\",\n",
    "  \"notHelpfulArgumentativeOrBiased\",\n",
    "  \"notHelpfulMissingKeyPoints\",\n",
    "  \"notHelpfulSourcesMissingOrUnreliable\",\n",
    "  \"notHelpfulIncorrect\",\n",
    "  \"helpfulImportantContext\",\n",
    "  \"helpfulAddressesClaim\",\n",
    "  \"helpfulGoodSources\",\n",
    "  \"helpfulClear\",\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4f5789f8-3689-45e7-97e8-2dcadec73cc4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to extract embeddings\n",
    "def make_tensors(dataset, includedLabels, batchSize=1024):\n",
    "  # Prune columns\n",
    "  dataset = dataset[[NOTE_ID, TWEET_ID, NOTE_TEXT_FINAL, TWEET_TEXT_FINAL] + includedLabels].copy()\n",
    "  dataset[INTERCEPT] = (dataset[INTERCEPT] - dataset[INTERCEPT].mean()) / dataset[INTERCEPT].std()\n",
    "  dataset[FACTOR] = (dataset[FACTOR] - dataset[FACTOR].mean()) / dataset[FACTOR].std()\n",
    "  # Prepare tokenizer and inputs\n",
    "  tokenizer = AutoTokenizer.from_pretrained(os.path.join(MODEL_ROOT, DISTILROBERTA_BASE_MODEL, TOKENIZER_DIR))\n",
    "  noteTexts = list(dataset[NOTE_TEXT_FINAL].values)\n",
    "  tweetTexts = list(dataset[TWEET_TEXT_FINAL].values)\n",
    "  assert len(noteTexts) == len(tweetTexts)\n",
    "  # Tokenize all texts\n",
    "  numBatches = int(np.ceil(len(noteTexts) / batchSize))\n",
    "  progressBar = tqdm(range(numBatches))\n",
    "  inputIds = []\n",
    "  attentionMasks = []\n",
    "  start = 0\n",
    "  while start < len(noteTexts):\n",
    "    end = start + batchSize\n",
    "    batch = tokenizer(\n",
    "      list(zip(tweetTexts[start:end], noteTexts[start:end])),\n",
    "      max_length=512,\n",
    "      truncation=\"longest_first\",\n",
    "      padding=\"max_length\",  # Pad to max length since batches are large enough we effectively do this anyways.\n",
    "      return_tensors=\"pt\"\n",
    "    )\n",
    "    inputIds.append(batch[\"input_ids\"])\n",
    "    attentionMasks.append(batch[\"attention_mask\"])\n",
    "    start = end\n",
    "    progressBar.update(1)\n",
    "  # Generate labels and loss mask\n",
    "  labels = torch.tensor(dataset[includedLabels].fillna(0.5).values).to(torch.float32)\n",
    "  lossMask = torch.tensor(dataset[includedLabels].notna().values).to(torch.float32)\n",
    "  return (\n",
    "    torch.concat(inputIds, axis=0),\n",
    "    torch.concat(attentionMasks, axis=0),\n",
    "    labels,\n",
    "    lossMask,\n",
    "    torch.tensor(dataset[NOTE_ID].values),\n",
    "    torch.tensor(dataset[TWEET_ID].values),\n",
    "  )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b73968a4-f8c5-4159-834f-029ff663576e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Obtain tokens and masks\n",
    "trainTensors = make_tensors(trainDataset, allSignalLabels)\n",
    "print(tuple(tmp.shape for tmp in trainTensors))\n",
    "testTensors = make_tensors(testDataset, allSignalLabels)\n",
    "print(tuple(tmp.shape for tmp in testTensors))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5958f36c-2aa1-4f76-a667-6ffbc1626feb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Save tensors to disk\n",
    "def save_tensors(tensors, fileName):\n",
    "  inputIds, attentionMasks, labels, lossMask, noteIds, tweetIds = tensors\n",
    "  ts = int(time.time())\n",
    "  path = os.path.join(DATA_ROOT, f\"{ts}_{fileName}\")\n",
    "  print(f\"Saving checkpoint to {path}\")\n",
    "  torch.save({\n",
    "    \"inputIds\": inputIds,\n",
    "    \"attentionMasks\": attentionMasks,\n",
    "    \"labels\": labels,\n",
    "    \"lossMask\": lossMask,\n",
    "    \"noteIds\": noteIds,\n",
    "    \"tweetIds\": tweetIds,    \n",
    "  }, path)\n",
    "\n",
    "save_tensors(trainTensors, \"train_tensors.pt\")\n",
    "save_tensors(testTensors, \"test_tensors.pt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b8fe898c-4efd-46d1-8427-f8ad63290a9f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate small splits for testing\n",
    "trainTensorsSmall = tuple(tmp[:2000] for tmp in trainTensors)\n",
    "testTensorsSmall = tuple(tmp[:2000] for tmp in testTensors)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "28724469-1b9b-4b30-ac3d-91e81a978982",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate small splits for testing\n",
    "trainTensorsMedium = tuple(tmp[:20000] for tmp in trainTensors)\n",
    "testTensorsMedium = tuple(tmp[:20000] for tmp in testTensors)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cd8ee5fb-a851-40f9-9511-34f4ea07ed8c",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Define Training Helpers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4e38c118-bdb2-4fb6-9b12-7e7aecd9fd01",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define model\n",
    "class ParallelStack(nn.Module):\n",
    "\n",
    "  def __init__(self, hFactor, nHeads, dim=768, dropout=0.1):\n",
    "    super().__init__()\n",
    "    self.preclassifier = nn.Linear(dim, int(hFactor * dim))\n",
    "    self.dropout = nn.Dropout(dropout)\n",
    "    self.classifier = nn.Linear(int(hFactor * dim), nHeads)\n",
    "\n",
    "  def forward(self, embedding):\n",
    "    z = self.preclassifier(embedding)\n",
    "    a = self.dropout(nn.ReLU()(z))\n",
    "    return self.classifier(a)\n",
    "\n",
    "class MultiHeadMLPAllSignals(nn.Module):\n",
    "\n",
    "  def __init__(self, dim=768, dropout=0.1):\n",
    "    super().__init__()\n",
    "    self.roberta = AutoModel.from_pretrained(os.path.join(MODEL_ROOT, DISTILROBERTA_BASE_MODEL, MODEL_DIR))\n",
    "    self.interceptPredictor = ParallelStack(12, 1)\n",
    "    self.factorPredictor = ParallelStack(12, 1)\n",
    "    self.crhClassifier = ParallelStack(12, 1)\n",
    "    self.crnhClassifier = ParallelStack(12, 1)\n",
    "    self.relevanceClassifier = ParallelStack(12, 1)\n",
    "    self.classificationClassifier = ParallelStack(12, 1)\n",
    "    self.helpfulTagClassifier = ParallelStack(12, 4)\n",
    "    self.notHelpfulTagClassifier = ParallelStack(12, 8)\n",
    "\n",
    "  def forward(self, inputIds, attentionMask):\n",
    "    embedding = self.roberta(\n",
    "      input_ids=inputIds,\n",
    "      attention_mask=attentionMask,\n",
    "    ).last_hidden_state[:, 0]  # batch, token, dimension\n",
    "    return torch.concat([\n",
    "      self.interceptPredictor(embedding),\n",
    "      self.factorPredictor(embedding),\n",
    "      self.crhClassifier(embedding),\n",
    "      self.crnhClassifier(embedding),\n",
    "      self.classificationClassifier(embedding),\n",
    "      self.relevanceClassifier(embedding),\n",
    "      self.notHelpfulTagClassifier(embedding),\n",
    "      self.helpfulTagClassifier(embedding),\n",
    "    ], axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2a5591d3-bd4d-4e4e-a38f-58ccf02fb35e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define a helper to prepare loss weights\n",
    "def make_loss_weights(lossMask, objectiveWeights, numBatches):\n",
    "  # Compute weight to assign to each instance of training data for a particular objective\n",
    "  assert objectiveWeights.shape[0] == lossMask.shape[1]\n",
    "  instanceWeights = lossMask.sum(axis=0) ** -1\n",
    "  assert objectiveWeights.shape[0] == instanceWeights.shape[0]\n",
    "  assert np.abs(objectiveWeights.sum().item() - 1) < 1e-5\n",
    "  adjustedInstanceWeight = instanceWeights * objectiveWeights\n",
    "  # Compute weight to assign to each prediction loss\n",
    "  predictionLoss = lossMask * adjustedInstanceWeight\n",
    "  assert predictionLoss.shape == lossMask.shape\n",
    "  assert np.abs(predictionLoss.sum().item() - 1) < 1e-5\n",
    "  assert ((predictionLoss.sum(axis=0) - objectiveWeights).abs() < 1e-5).all().item()\n",
    "  # Apply batch scaling\n",
    "  return numBatches * predictionLoss \n",
    "\n",
    "make_loss_weights(\n",
    "  torch.tensor([\n",
    "    [1, 1, 1, 1],\n",
    "    [1, 1, 0, 0],\n",
    "    [1, 0, 0, 0],\n",
    "  ], dtype=torch.float32),\n",
    "  torch.tensor([0.2, 0.2, 0.5, .1], dtype=torch.float32),\n",
    "  10,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4a25459e-0e2b-44e4-ae35-c79c9c56e9a1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to compute loss\n",
    "def multihead_loss(logits, lossWeights, labels, mseMultiplier):\n",
    "  # Validate sizes match\n",
    "  assert logits.shape == lossWeights.shape\n",
    "  assert logits.shape == labels.shape\n",
    "  # Compute loss of each prediction\n",
    "  logitLoss = torch.concat([\n",
    "    nn.MSELoss(reduction=\"none\")(logits[:, :2], labels[:, :2]) * mseMultiplier,\n",
    "    nn.BCEWithLogitsLoss(reduction=\"none\")(logits[:, 2:], labels[:, 2:])\n",
    "  ], axis=1)\n",
    "  return logitLoss * lossWeights\n",
    "\n",
    "multihead_loss(\n",
    "  torch.arange(-3, 5, dtype=torch.float32).reshape(2, 4),\n",
    "  torch.tensor([1, 0, 1, 0, 0, 1, 0, 1], dtype=torch.float32).reshape(2, 4),\n",
    "  torch.ones(8, dtype=torch.float32).reshape(2, 4),\n",
    "  10,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7b071535-cab0-49e3-9618-6fdf69800d69",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper for applying model\n",
    "def apply_model(model, dataset, device, gpuBatchSize, frac=None):\n",
    "  # Configure batching\n",
    "  if device == CUDA and torch.cuda.device_count() > 1:\n",
    "    batchSize = gpuBatchSize * torch.cuda.device_count()\n",
    "  else:\n",
    "    batchSize = gpuBatchSize\n",
    "  # Prepare data\n",
    "  inputIds, attentionMask, labels, lossMask, noteIds, tweetIds = dataset\n",
    "  assert inputIds.shape[0] == attentionMask.shape[0] == labels.shape[0] == lossMask.shape[0] == noteIds.shape[0] == tweetIds.shape[0]\n",
    "  if frac != None:\n",
    "    assert 0 < frac <= 1.\n",
    "    size = int(frac * inputIds.shape[0])\n",
    "    indices = torch.randperm(inputIds.shape[0])[:size]\n",
    "    inputIds = inputIds[indices]\n",
    "    attentionMask = attentionMask[indices]\n",
    "    labels = labels[indices]\n",
    "    lossMask = lossMask[indices]\n",
    "    noteIds = noteIds[indices]\n",
    "    tweetIds = tweetIds[indices]\n",
    "  # Process chunks\n",
    "  start = 0\n",
    "  assert not model.training\n",
    "  preds = []\n",
    "  progress = tqdm(range(int(np.ceil(inputIds.shape[0] / batchSize))))\n",
    "  while start < inputIds.shape[0]:\n",
    "    end = start + batchSize\n",
    "    with torch.no_grad():\n",
    "      with autocast(device_type=device, dtype=torch.bfloat16):\n",
    "        preds.append(\n",
    "          model(\n",
    "            inputIds[start:end].to(device),\n",
    "            attentionMask[start:end].to(device),\n",
    "          ).to(CPU).detach()\n",
    "        )\n",
    "    start = end\n",
    "    progress.update(1)\n",
    "  preds = torch.concat(preds, axis=0)\n",
    "  return labels, preds, lossMask, noteIds, tweetIds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e721b7a6-6895-4bd9-b49c-9317a7b44655",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper for incremental eval\n",
    "def eval_model(model, trainDataset, testDataset, device, gpuBatchSize):\n",
    "  print(\"Forward pass on training data:\")\n",
    "  allTrainLabels, allTrainPreds, allTrainMask, _, _ = apply_model(model, trainDataset, device, gpuBatchSize=gpuBatchSize, frac=(1/9))\n",
    "  print(\"Forward pass on test data:\")\n",
    "  allTestLabels, allTestPreds, allTestMask, _, _ = apply_model(model, testDataset, device, gpuBatchSize=gpuBatchSize, frac=None)\n",
    "  results = []\n",
    "  for i in range(allTrainLabels.shape[1]):\n",
    "    trainLabels, trainPreds = allTrainLabels[allTrainMask[:, i] == 1, i], allTrainPreds[allTrainMask[:, i] == 1, i]\n",
    "    testLabels, testPreds = allTestLabels[allTestMask[:, i] == 1, i], allTestPreds[allTestMask[:, i] == 1, i]\n",
    "    if i < 2:\n",
    "      trainMSE = nn.MSELoss()(trainPreds, trainLabels)\n",
    "      testMSE = nn.MSELoss()(testPreds, testLabels)\n",
    "      results.append((trainMSE, testMSE, None, None, None, None))\n",
    "    else:\n",
    "      if trainLabels.sum().item() == 0 or testLabels.sum().item() == 0:\n",
    "        results.append((None, None, -1, -1, -1, -1))\n",
    "        continue\n",
    "      trainAuc = skm.roc_auc_score(trainLabels.numpy(), trainPreds.numpy())\n",
    "      testAuc = skm.roc_auc_score(testLabels.numpy(), testPreds.numpy())\n",
    "      fpr, tpr, _ = skm.roc_curve(testLabels.numpy(), testPreds.numpy())\n",
    "      tprAt1 = tpr[np.argmin(np.abs(fpr - .01))]\n",
    "      tprAt5 = tpr[np.argmin(np.abs(fpr - .05))]\n",
    "      results.append((None, None, trainAuc, testAuc, tprAt1, tprAt5))\n",
    "  return results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b12849d1-472f-4cf3-99e4-6fe68952a0ab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helpers to save checkpointed state\n",
    "def save_checkpoint(root, epoch, batch, loss, model, optimizer, scheduler, scaler, stats):\n",
    "  checkpoint = {\n",
    "    \"epoch\": epoch,\n",
    "    \"batch\": batch,\n",
    "    \"loss\": loss,\n",
    "    \"model\": model.module.state_dict(),\n",
    "    \"optimizer\": optimizer.state_dict() if optimizer is not None else None,\n",
    "    \"scheduler\": scheduler.state_dict() if scheduler is not None else None,\n",
    "    \"scaler\": scaler.state_dict() if scaler is not None else None,\n",
    "    \"stats\": stats,\n",
    "  }\n",
    "  checkpointId = str(int(time.time()))\n",
    "  path = os.path.join(root, f\"{checkpointId}.pt\")\n",
    "  print(f\"Saving checkpoint to {path}\")\n",
    "  torch.save(checkpoint, path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "85d603a5-2dfb-4105-90be-2ea15222b658",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define training loop\n",
    "def train_model(\n",
    "  model,\n",
    "  trainDataset,\n",
    "  testDataset,\n",
    "  includedLabels,\n",
    "  numEpochs=3,\n",
    "  device=CUDA,\n",
    "  deepLogEvery=1,\n",
    "  gpuBatchSize=32,\n",
    "  learningRate=1e-5,\n",
    "  logEvery=None,\n",
    "  learningSchedule=True,\n",
    "  objectiveWeights=None,\n",
    "  mseMultiplier=.2\n",
    "):\n",
    "  gc.collect()\n",
    "  torch.cuda.empty_cache()\n",
    "  # Set up checkpoint directory\n",
    "  modelId = str(int(time.time()))\n",
    "  print(f\"Beginning training run for {modelId}\")\n",
    "  modelRoot = os.path.join(DATA_ROOT, modelId)\n",
    "  os.mkdir(modelRoot)\n",
    "  # Prepare data and batching\n",
    "  print(\"Setting up training\")\n",
    "  model = model.to(device)\n",
    "  if device == CUDA and torch.cuda.device_count() > 1:\n",
    "    model = nn.DataParallel(model)\n",
    "    batchSize = gpuBatchSize * torch.cuda.device_count()\n",
    "  else:\n",
    "    batchSize = gpuBatchSize\n",
    "  inputIds, attentionMask, labels, lossMask, _, _ = trainDataset\n",
    "  numBatches = int(np.ceil(inputIds.shape[0] / batchSize))\n",
    "  progress = tqdm(range(numBatches * numEpochs))\n",
    "  if deepLogEvery is None:\n",
    "    deepLogEvery = max(1, int(np.ceil(numEpochs / 10)))\n",
    "  print(f\"Training data contains {inputIds.shape[0]} rows to be split into {numBatches} batches\")\n",
    "  # Prepare models and data\n",
    "  assert all([\n",
    "    (\n",
    "      n.startswith(\"module.roberta\")\n",
    "      or n.startswith(\"module.interceptPredictor\")\n",
    "      or n.startswith(\"module.factorPredictor\")\n",
    "      or n.startswith(\"module.crhClassifier\")\n",
    "      or n.startswith(\"module.crnhClassifier\")\n",
    "      or n.startswith(\"module.relevanceClassifier\")\n",
    "      or n.startswith(\"module.classificationClassifier\")\n",
    "      or n.startswith(\"module.helpfulTagClassifier\")\n",
    "      or n.startswith(\"module.notHelpfulTagClassifier\")\n",
    "    )\n",
    "    for n, _ in model.named_parameters()\n",
    "  ])\n",
    "  robertaParams = [p for n, p in model.named_parameters() if n.startswith(\"module.roberta\")]\n",
    "  singleHeadParams = [p for n, p in model.named_parameters() if (\n",
    "    n.startswith(\"module.interceptPredictor\")\n",
    "    or n.startswith(\"module.factorPredictor\")\n",
    "    or n.startswith(\"module.crhClassifier\")\n",
    "    or n.startswith(\"module.crnhClassifier\")\n",
    "    or n.startswith(\"module.relevanceClassifier\")\n",
    "    or n.startswith(\"module.classificationClassifier\")\n",
    "  )]\n",
    "  tagParams = [p for n, p in model.named_parameters() if n.startswith(\"module.helpfulTagClassifier\") or n.startswith(\"module.notHelpfulTagClassifier\")]\n",
    "  print(\"Parameter groups:\", len(robertaParams), len(singleHeadParams), len(tagParams))\n",
    "  optim = torch.optim.AdamW([\n",
    "    {\"params\": robertaParams, \"weight_decay\": .01},\n",
    "    {\"params\": singleHeadParams, \"weight_decay\": .1},\n",
    "    {\"params\": tagParams, \"weight_decay\": .25},\n",
    "  ], lr=learningRate)\n",
    "  if learningSchedule:\n",
    "    scheduler = LinearLR(\n",
    "      optim,\n",
    "      start_factor=1.0,  # Start at the initial learning rate\n",
    "      end_factor=0.0,    # End at 0\n",
    "      total_iters=(numBatches * numEpochs),  # Total number of training steps      \n",
    "    )\n",
    "  scaler = GradScaler()\n",
    "  if objectiveWeights is None:\n",
    "    objectiveWeights = torch.ones(labels.shape[1]) / labels.shape[1]\n",
    "  assert np.abs(objectiveWeights.sum().item() - 1) < 1e-5\n",
    "  lossWeights = make_loss_weights(lossMask, objectiveWeights, numBatches).to(device)\n",
    "  model.train()\n",
    "  for epoch in range(numEpochs):\n",
    "    gc.collect()\n",
    "    torch.cuda.empty_cache()\n",
    "    losses = []\n",
    "    base = 0\n",
    "    randOrder = np.random.permutation(np.arange(0, inputIds.shape[0]))\n",
    "    inputIds = inputIds[randOrder]\n",
    "    attentionMask = attentionMask[randOrder]\n",
    "    labels = labels[randOrder]\n",
    "    lossWeights = lossWeights[randOrder]\n",
    "    for batch in range(numBatches):\n",
    "      # Obtain batch\n",
    "      start = batch * batchSize\n",
    "      end = start + batchSize\n",
    "      y = labels[start:end].to(device)\n",
    "      with autocast(device_type=device, dtype=torch.bfloat16):\n",
    "        # Forward pass\n",
    "        y_hat = model(\n",
    "          inputIds[start:end].to(device),\n",
    "          attentionMask[start:end].to(device),\n",
    "        )\n",
    "        # Compute loss\n",
    "        loss = multihead_loss(y_hat, lossWeights[start:end], y, mseMultiplier).sum()\n",
    "      losses.append(loss.item())\n",
    "      # Backward pass\n",
    "      scaler.scale(loss).backward()\n",
    "      # Update weights\n",
    "      scaler.step(optim)\n",
    "      scaler.update()\n",
    "      if learningSchedule:\n",
    "        scheduler.step()\n",
    "      # Zero out gradients\n",
    "      optim.zero_grad()\n",
    "      # Update progress bar\n",
    "      progress.update(1)\n",
    "      if batch % logEvery == 0:\n",
    "        print(f\"epoch={epoch:<3d}  batch={batch:<5d}  loss={np.mean(losses[-logEvery:]):7.5f}\")\n",
    "    # Log loss\n",
    "    model.eval()\n",
    "    results = eval_model(model, trainDataset, testDataset, device, gpuBatchSize=gpuBatchSize)\n",
    "    stats = []\n",
    "    for (trainMSE, testMSE, trainAuc, testAuc, tprAt1, tprAt5), label in zip(results, includedLabels):\n",
    "      if trainMSE is None:\n",
    "        print(f\"{label:<40}  epoch={epoch:<3d}  loss={np.mean(losses):7.5f}  trainAuc={trainAuc:5.3f}  testAuc={testAuc:5.3f}  tpr@0.01={tprAt1:5.3f}  tpr@0.05={tprAt5:5.3f}\")\n",
    "      else:\n",
    "        print(f\"{label:<40}  epoch={epoch:<3d}  loss={np.mean(losses):7.5f}  trainMSE={trainMSE:7.5f}  testMSE={testMSE:7.5f}\")      \n",
    "      stats.append((label, trainMSE, testMSE, trainAuc, testAuc, tprAt1, tprAt5))\n",
    "    save_checkpoint(modelRoot, epoch, batch, np.mean(losses), model, optim, scheduler, scaler, stats)      \n",
    "    model.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b77b8eed-6815-4e48-852c-5d64fe3a8ef6",
   "metadata": {},
   "outputs": [],
   "source": [
    "def eval_hparams(\n",
    "  trainDataset,\n",
    "  testDataset,\n",
    "  includedLabels=allSignalLabels,\n",
    "  numEpochs=5,\n",
    "  gpuBatchSize=32,\n",
    "  learningRate=1e-5,\n",
    "  learningSchedule=True,\n",
    "  robertaWeightDecay=0.01,\n",
    "  helpfulnessWeightDecay=0.1,\n",
    "  relevanceWeightDecay=0.05,\n",
    "  abuseWeightDecay=0.25,\n",
    "  objectiveWeights=None,\n",
    "  logEvery=500,\n",
    "  mseMultiplier=.2\n",
    "):\n",
    "  model = MultiHeadMLPAllSignals()\n",
    "  train_model(\n",
    "    model,\n",
    "    trainDataset,\n",
    "    testDataset, \n",
    "    includedLabels,\n",
    "    numEpochs=numEpochs,\n",
    "    gpuBatchSize=gpuBatchSize,\n",
    "    learningRate=learningRate,\n",
    "    learningSchedule=learningSchedule,\n",
    "    objectiveWeights=objectiveWeights,\n",
    "    logEvery=logEvery,\n",
    "    mseMultiplier=mseMultiplier,\n",
    "  )"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "344e1ee5-9fb4-44db-a4c0-920f6a16912d",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Small Scale Tests"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55db3b8f-1272-41ca-80c7-f6db54ffceb6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test run at small scale\n",
    "eval_hparams(\n",
    "  trainTensorsSmall,\n",
    "  testTensorsSmall,\n",
    "  numEpochs=1,\n",
    "  logEvery=1,\n",
    "  gpuBatchSize=32\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6f72d59b-0e2b-4f1b-9380-d931ba5d2a84",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test run at medium scale\n",
    "eval_hparams(\n",
    "  trainTensorsMedium,\n",
    "  testTensorsMedium,\n",
    "  numEpochs=3,\n",
    "  logEvery=10,\n",
    "  gpuBatchSize=32\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7c802674-30a5-45be-9e88-caefd12bd305",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Full Scale Training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "49d1c9ec-d39d-4d36-bc09-f4638ce2b611",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Train at full scale\n",
    "eval_hparams(\n",
    "  trainTensors,\n",
    "  testTensors,\n",
    "  numEpochs=3,\n",
    "  logEvery=250,\n",
    "  gpuBatchSize=32\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5261b79d-3e31-4e1f-a50b-fc3bac8c760c",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "# Train Meta Model"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ce0fd971-a104-4776-8706-5c796a8fd148",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Apply Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fb6d84be-d7f7-4a07-8027-d5b94a4f7bcc",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Partition test tensors to create holdout dataset\n",
    "def split_test(testTensors):\n",
    "  print(tuple(tmp.shape for tmp in testTensors))\n",
    "  split = testTensors[0].shape[0] // 2\n",
    "  tuningTensors = tuple(tmp[:split] for tmp in testTensors)\n",
    "  holdoutTensors = tuple(tmp[split:] for tmp in testTensors)\n",
    "  print(tuple(tmp.shape for tmp in tuningTensors))\n",
    "  print(tuple(tmp.shape for tmp in holdoutTensors))\n",
    "  return tuningTensors, holdoutTensors\n",
    "\n",
    "tuningTensors, holdoutTensors = split_test(testTensors)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3871e633-7585-4fad-8bb1-ee2957ab97d4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Prune to only rows that \n",
    "def prune_tensors(tensors, labels):\n",
    "  classificationIdx = allSignalLabels.index(\"classification\")\n",
    "  crhIdx = allSignalLabels.index(\"crh\")\n",
    "  # Identify rows that represent real pairs based on 'classification' bit\n",
    "  inputIds, attentionMask, labels, lossMask, noteIds, tweetIds = tensors\n",
    "  realRows = (lossMask[:, classificationIdx] > 0)  # all real rows have the classification bit set\n",
    "  print(np.bincount(realRows.cpu().detach().numpy()))\n",
    "  assert lossMask[~realRows, crhIdx].sum().item() == 0  # non-real rows should never have loss for the CRH label\n",
    "  # Prune to only real data\n",
    "  return (\n",
    "    inputIds[realRows, :],\n",
    "    attentionMask[realRows, :],\n",
    "    labels[realRows, :],\n",
    "    lossMask[realRows, :],\n",
    "    noteIds[realRows],\n",
    "    tweetIds[realRows],\n",
    "  )\n",
    "\n",
    "trainTensorsPruned = prune_tensors(trainTensors, allSignalLabels)\n",
    "tuningTensorsPruned = prune_tensors(tuningTensors, allSignalLabels)  # Note that the ratio here isn't exactly 5x bcause the order was randomized after creating low relevance instances\n",
    "holdoutTensorsPruned = prune_tensors(holdoutTensors, allSignalLabels)  # Note that the ratio here isn't exactly 5x bcause the order was randomized after creating low relevance instances"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7c9d1bdc-d427-48a1-939e-106e76fe0a26",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load all signal model\n",
    "def load_full_signal_checkpoint(path):\n",
    "  # Log checkpoint state\n",
    "  print(f\"Loading checkpoint from {path}\")\n",
    "  checkpoint = torch.load(path, weights_only=False)\n",
    "  print(f\"  epoch={checkpoint['epoch']:<3d}\")\n",
    "  print(f\"  batch={checkpoint['batch']:<3d}\")\n",
    "  print(f\"  loss={checkpoint['loss']:7.5f}\")\n",
    "  for label, trainMSE, testMSE, trainAuc, testAuc, tprAt1, tprAt5 in checkpoint[\"stats\"]:\n",
    "    if trainMSE is None:\n",
    "      print(f\"{label:<40}  trainAuc={trainAuc:5.3f}  testAuc={testAuc:5.3f}  tpr@0.01={tprAt1:5.3f}  tpr@0.05={tprAt5:5.3f}\")\n",
    "    else:\n",
    "      print(f\"{label:<40}  trainMSE={trainMSE:7.5f}  testMSE={testMSE:7.5f}\")      \n",
    "  # load model\n",
    "  model = MultiHeadMLPAllSignals()\n",
    "  model.load_state_dict(checkpoint[\"model\"])\n",
    "  model.eval()\n",
    "  return model.to(CUDA)\n",
    "\n",
    "allSignalModel = load_full_signal_checkpoint(os.path.expanduser(\"~/workspace/datasets/helpfulness/1753341209/1753352848.pt\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2faac091-13fe-4c1d-8cef-6bc5f814d97e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper for applying model\n",
    "def apply_model(model, dataset, device, gpuBatchSize):\n",
    "  # Configure batching\n",
    "  if device == CUDA and torch.cuda.device_count() > 1:\n",
    "    batchSize = gpuBatchSize * torch.cuda.device_count()\n",
    "  else:\n",
    "    batchSize = gpuBatchSize\n",
    "  # Prepare data\n",
    "  inputIds, attentionMask, labels, lossMask, noteIds, tweetIds = dataset\n",
    "  assert inputIds.shape[0] == attentionMask.shape[0] == labels.shape[0] == lossMask.shape[0] == noteIds.shape[0] == tweetIds.shape[0]\n",
    "  # Process chunks\n",
    "  start = 0\n",
    "  assert not model.training\n",
    "  preds = []\n",
    "  progress = tqdm(range(int(np.ceil(inputIds.shape[0] / batchSize))))\n",
    "  while start < inputIds.shape[0]:\n",
    "    end = start + batchSize\n",
    "    with torch.no_grad():\n",
    "      with autocast(device_type=device, dtype=torch.bfloat16):\n",
    "        preds.append(\n",
    "          model(\n",
    "            inputIds[start:end].to(device),\n",
    "            attentionMask[start:end].to(device),\n",
    "          ).to(CPU).detach()\n",
    "        )\n",
    "    start = end\n",
    "    progress.update(1)\n",
    "  preds = torch.concat(preds, axis=0)\n",
    "  return labels, preds, lossMask, noteIds, tweetIds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67ea82f0-22de-4a2f-bf77-2e41333e427f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Extract train tensors\n",
    "trainTensorsSignals = apply_model(allSignalModel, trainTensorsPruned, CUDA, 32)\n",
    "tuningTensorsSignals = apply_model(allSignalModel, tuningTensorsPruned, CUDA, 32)\n",
    "holdoutTensorsSignals = apply_model(allSignalModel, holdoutTensorsPruned, CUDA, 32)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "628b3461-77b3-4c00-8a96-02198c4a34bc",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Profile Signals"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4fb263a9-fbf2-41ed-b353-cb29f676d416",
   "metadata": {},
   "outputs": [],
   "source": [
    "allSignalLabels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8a35cb99-b56b-4f99-9bd0-01a59f5d4d79",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to convert all-signal model results to dataframe\n",
    "def make_dataframe(allSignalTensors, colNames, dataset, clusters):\n",
    "  labels, preds, lossMask, noteIds, tweetIds = allSignalTensors\n",
    "  # Set note and tweet id\n",
    "  df = pd.DataFrame({\n",
    "    NOTE_ID: noteIds,\n",
    "    TWEET_ID: tweetIds,\n",
    "  })\n",
    "  # Set predictions nad labels\n",
    "  assert len(colNames) == preds.shape[1] == labels.shape[1] == lossMask.shape[1]\n",
    "  for i, col in enumerate(colNames):\n",
    "    df[f\"{col}_label\"] = labels[:, i].cpu().detach().to(torch.float32).numpy()\n",
    "    df.loc[(lossMask[:, i] == 0).numpy(), f\"{col}_label\"] = pd.NA\n",
    "    df[f\"{col}_pred\"] = preds[:, i].cpu().detach().to(torch.float32).numpy()\n",
    "    crhPreds = preds[:, i].cpu().detach().to(torch.float32).numpy()\n",
    "    if col == \"factor\":\n",
    "      crhPreds = np.abs(crhPreds)\n",
    "    if col.startswith(\"notHelpful\") or col == \"factor\" or col == \"crnh\":\n",
    "      crhPreds = crhPreds * -1    \n",
    "    df[f\"{col}_crh_pred\"] = crhPreds\n",
    "  # Merge in status\n",
    "  df = df.merge(enDataset[[NOTE_ID, TWEET_ID, CURRENT_LABEL]])\n",
    "  # Merge in clusters\n",
    "  df = df.merge(clusters)\n",
    "  # Make sure no rows were dropped and return\n",
    "  assert len(df) == labels.shape[0]\n",
    "  return df\n",
    "\n",
    "trainSignalDF = make_dataframe(trainTensorsSignals, allSignalLabels, enDataset, clusters)\n",
    "tuningSignalDF = make_dataframe(tuningTensorsSignals, allSignalLabels, enDataset, clusters)\n",
    "holdoutSignalDF = make_dataframe(holdoutTensorsSignals, allSignalLabels, enDataset, clusters)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "656fe8d5-a8ac-4199-9462-2e46f6c54b87",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper for calculating KL divergence\n",
    "def kl_divergence(pSeries, qSeries):\n",
    "  uniqueVals = np.unique(pd.concat([pSeries, qSeries]))\n",
    "  # Smooth to all values occurring at least once\n",
    "  probP = pSeries.value_counts(normalize=True).reindex(uniqueVals, fill_value=1).values\n",
    "  probQ = qSeries.value_counts(normalize=True).reindex(uniqueVals, fill_value=1).values\n",
    "  return np.sum(probP * np.log(probP / probQ))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5320c44a-6767-4dba-baf3-c301de67757d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Profile signals\n",
    "def profile_signals(df, labelCols):\n",
    "  tuples = []\n",
    "  for label in labelCols:\n",
    "    # Isolate predictions and labels\n",
    "    colPreds = df[f\"{label}_pred\"]\n",
    "    colLabels = df[f\"{label}_label\"]\n",
    "    colMask = df[f\"{label}_label\"].notna()\n",
    "    crhPreds = df[f\"{label}_crh_pred\"]\n",
    "    crhLabels = df[\"crh_label\"]\n",
    "    crhMask = df[\"crh_label\"].notna()\n",
    "    # Calculate AUCs\n",
    "    assert crhLabels[crhMask].isna().sum() == 0\n",
    "    assert colLabels[colMask].isna().sum() == 0\n",
    "    crhAuc = skm.roc_auc_score(crhLabels[crhMask] > .5, crhPreds[crhMask])\n",
    "    if label in [\"intercept\", \"factor\", \"relevance\"]:\n",
    "      colAuc = pd.NA\n",
    "    else:\n",
    "      colAuc = skm.roc_auc_score(colLabels[colMask] > .5, colPreds[colMask])\n",
    "    # Calculate KL divergences\n",
    "    klds = dict()\n",
    "    for cluster in [\"notes\", \"tweets\", \"joint\"]:\n",
    "      crhClusters = df[df[CURRENT_LABEL] == CURRENTLY_RATED_HELPFUL][f\"{cluster}_cluster_id\"]\n",
    "      colClusters = df.sort_values(f\"{label}_crh_pred\", ascending=False).head(len(crhClusters))[f\"{cluster}_cluster_id\"]\n",
    "      klds[cluster] = kl_divergence(crhClusters, colClusters)\n",
    "    tuples.append((label, crhAuc, colAuc, klds[\"notes\"], klds[\"tweets\"], klds[\"joint\"]))\n",
    "  return pd.DataFrame(tuples, columns=[\"Signal\", \"CRH AUC\", \"Label AUC\", \"KL (Notes)\", \"KL (Tweets)\", \"KL (Joint)\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "305b1f3f-0f88-49c1-b18e-6326127c2e82",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Show AUC and KL summary for tuning data\n",
    "profile_signals(tuningSignalDF, allSignalLabels).sort_values(\"KL (Notes)\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "de3e630a-ae84-4534-b5c4-1d99b693c3db",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Validate that holdout results align with tuning data\n",
    "profile_signals(holdoutSignalDF, allSignalLabels).sort_values(\"KL (Notes)\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ae3280aa-4358-4b16-baf1-fef31dac88a4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define plotting helpers\n",
    "def plot_crh_rates_single_with_pred(df, topWords, col, clusterCol, dataset=enDataset):\n",
    "  # Prepare DF\n",
    "  init = len(df)\n",
    "  crhBudget = (df[CURRENT_LABEL] == CURRENTLY_RATED_HELPFUL).sum()\n",
    "  crhThreshold = df[col].sort_values(ascending=False).values[crhBudget]\n",
    "  df[\"above\"] = df[col] > crhThreshold\n",
    "  df = df.merge(dataset[[NOTE_ID, AUTHOR_CLASSIFICATION]])\n",
    "  df = df[[clusterCol, CURRENT_LABEL, AUTHOR_CLASSIFICATION, \"above\"]]\n",
    "  assert len(df) == init\n",
    "  # Compute cluster ratios\n",
    "  proposedRatios = df[clusterCol].value_counts(normalize=True).to_frame().reset_index(drop=False).rename(columns={\"proportion\": \"proposed\"})\n",
    "  misleadingRatios = df[df[AUTHOR_CLASSIFICATION] == MISINFORMED_OR_POTENTIALLY_MISLEADING][clusterCol].value_counts(normalize=True).to_frame().reset_index(drop=False).rename(columns={\"proportion\": \"misleading\"})\n",
    "  crhRatios = df[df[CURRENT_LABEL] == CURRENTLY_RATED_HELPFUL][clusterCol].value_counts(normalize=True).to_frame().reset_index(drop=False).rename(columns={\"proportion\": \"crh\"})\n",
    "  aboveRatios = df[df[\"above\"]][clusterCol].value_counts(normalize=True).to_frame().reset_index(drop=False).rename(columns={\"proportion\": \"above\"})\n",
    "  merged = proposedRatios.merge(misleadingRatios, how=\"outer\").merge(crhRatios, how=\"outer\").merge(aboveRatios, how=\"outer\").fillna(0)\n",
    "  merged = merged.sort_values(\"crh\", ascending=False)\n",
    "  fig, ax = plt.subplots(1, 1)\n",
    "  fig.set_figwidth(40)\n",
    "  fig.set_figheight(6)\n",
    "  merged[clusterCol] = [topWords[str(clusterId)] for clusterId in merged[clusterCol]]\n",
    "  merged = merged[[clusterCol, \"proposed\", \"misleading\", \"crh\", \"above\"]].rename(columns={\n",
    "    \"above\": \"Predicted CRH Note Distribution\",\n",
    "    \"crh\": \"CRH Note Distribution\",\n",
    "    \"misleading\": \"Misleading Note Distribution\",\n",
    "    \"proposed\": \"Proposed Note Distribution\"\n",
    "  })\n",
    "  merged.plot(x=clusterCol, kind=\"bar\", ax=ax, rot=0)\n",
    "  ax.set_xlabel(\"Keywords\")\n",
    "  ax.set_ylabel(\"Rate\")\n",
    "  ax.grid(axis=\"y\", zorder=-1)\n",
    "  ax.set_axisbelow(True)\n",
    "  ax.legend(loc='upper right', ncol=4, fontsize=20)\n",
    "\n",
    "def plot_crh_rates_with_pred(df, col, topWords=topWords):\n",
    "  plot_crh_rates_single_with_pred(df, topWords[\"notes\"], col, \"notes_cluster_id\")\n",
    "  plot_crh_rates_single_with_pred(df, topWords[\"tweets\"], col, \"tweets_cluster_id\")\n",
    "  plot_crh_rates_single_with_pred(df, topWords[\"joint\"], col, \"joint_cluster_id\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "19ac8cb0-d0c1-48ba-ac77-579a68b0756c",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_crh_rates_with_pred(holdoutSignalDF, \"crh_crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "781e454c-0523-430d-8c02-aca5bdeb5fa4",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_crh_rates_with_pred(holdoutSignalDF, \"helpfulAddressesClaim_crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "31918231-42a9-40ce-9a07-4b704cfe9b0e",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_crh_rates_with_pred(holdoutSignalDF, \"helpfulClear_crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fe5488d5-8a41-4774-a43f-d721bc96b134",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_crh_rates_with_pred(holdoutSignalDF, \"notHelpfulOpinionSpeculation_crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "27c774c5-a921-4c1e-997e-35cbd7fa47cb",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_crh_rates_with_pred(holdoutSignalDF, \"helpfulImportantContext_crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ff97de6e-5d59-4255-b146-577bae40bbf1",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_crh_rates_with_pred(holdoutSignalDF, \"intercept_crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9d006f86-9b6d-4a98-adec-599e61fd0a83",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_crh_rates_with_pred(holdoutSignalDF, \"notHelpfulNoteNotNeeded_crh_pred\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "37b98c2f-8c63-4de3-87df-d4d72d91e37d",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Define Training Helpers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d27773a6-3fae-432c-b98d-11a13982cfc7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define a helper to prepare loss weights\n",
    "def make_loss_weights(lossMask, objectiveWeights, numBatches):\n",
    "  # Compute weight to assign to each instance of training data for a particular objective\n",
    "  assert objectiveWeights.shape[0] == lossMask.shape[1]\n",
    "  instanceWeights = lossMask.sum(axis=0) ** -1\n",
    "  assert objectiveWeights.shape[0] == instanceWeights.shape[0]\n",
    "  assert np.abs(objectiveWeights.sum().item() - 1) < 1e-5\n",
    "  adjustedInstanceWeight = instanceWeights * objectiveWeights\n",
    "  # Compute weight to assign to each prediction loss\n",
    "  predictionLoss = lossMask * adjustedInstanceWeight\n",
    "  assert predictionLoss.shape == lossMask.shape\n",
    "  assert np.abs(predictionLoss.sum().item() - 1) < 1e-5\n",
    "  assert ((predictionLoss.sum(axis=0) - objectiveWeights).abs() < 1e-5).all().item()\n",
    "  # Apply batch scaling\n",
    "  return numBatches * predictionLoss \n",
    "\n",
    "make_loss_weights(\n",
    "  torch.tensor([\n",
    "    [1, 1, 1, 1],\n",
    "    [1, 1, 0, 0],\n",
    "    [1, 0, 0, 0],\n",
    "  ], dtype=torch.float32),\n",
    "  torch.tensor([0.2, 0.2, 0.5, .1], dtype=torch.float32),\n",
    "  10,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7dd690c4-e330-447b-ac6d-eab295cdd6e5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to compute loss\n",
    "def multihead_loss(logits, lossWeights, labels, posWeight=None):\n",
    "  # Validate sizes match\n",
    "  assert logits.shape == lossWeights.shape\n",
    "  assert logits.shape == labels.shape\n",
    "  # Compute positive class weights\n",
    "  if posWeight is not None:\n",
    "    classWeights = (labels == 1)\n",
    "    classWeights = classWeights * posWeight\n",
    "    classWeights[classWeights == 0] = 1\n",
    "  else:\n",
    "    classWeights = 1\n",
    "  # Compute loss of each prediction\n",
    "  return (nn.BCEWithLogitsLoss(reduction=\"none\")(logits, labels) * lossWeights * classWeights)\n",
    "\n",
    "print(\n",
    "  multihead_loss(\n",
    "    torch.arange(-3, 5, dtype=torch.float32).reshape(2, 4),\n",
    "    torch.tensor([1, 0, 1, 0, 0, 1, 0, 1], dtype=torch.float32).reshape(2, 4),\n",
    "    torch.concat([torch.ones(1, 4, dtype=torch.float32), torch.zeros(1, 4, dtype=torch.float32)], axis=0),\n",
    "    None\n",
    "  )\n",
    ")\n",
    "print(\n",
    "  multihead_loss(\n",
    "    torch.arange(-3, 5, dtype=torch.float32).reshape(2, 4),\n",
    "    torch.tensor([1, 0, 1, 0, 0, 1, 0, 1], dtype=torch.float32).reshape(2, 4),\n",
    "    torch.concat([torch.ones(1, 4, dtype=torch.float32), torch.zeros(1, 4, dtype=torch.float32)], axis=0),\n",
    "    1\n",
    "  )\n",
    ")\n",
    "print(\n",
    "  multihead_loss(\n",
    "    torch.arange(-3, 5, dtype=torch.float32).reshape(2, 4),\n",
    "    torch.tensor([1, 0, 1, 0, 0, 1, 0, 1], dtype=torch.float32).reshape(2, 4),\n",
    "    torch.concat([torch.ones(1, 4, dtype=torch.float32), torch.zeros(1, 4, dtype=torch.float32)], axis=0),\n",
    "    10\n",
    "  )\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fdc380ad-59e5-4cce-b50e-8de498607633",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper for applying model\n",
    "def apply_model(model, dataset, device, gpuBatchSize, colLabels, frac=None):\n",
    "  # Configure batching\n",
    "  if device == CUDA and torch.cuda.device_count() > 1:\n",
    "    batchSize = gpuBatchSize * torch.cuda.device_count()\n",
    "  else:\n",
    "    batchSize = gpuBatchSize\n",
    "  # Prepare data\n",
    "  labels, inputs, lossMask, noteIds, tweetIds = dataset\n",
    "  crhIdx = colLabels.index(\"crh\")\n",
    "  assert crhIdx == 2\n",
    "  labels = labels[:, crhIdx:(crhIdx+1)]\n",
    "  lossMask = lossMask[:, crhIdx:(crhIdx+1)]\n",
    "  assert labels.shape[0] == inputs.shape[0] == lossMask.shape[0] == noteIds.shape[0] == tweetIds.shape[0]\n",
    "  if frac != None:\n",
    "    assert 0 < frac <= 1.\n",
    "    size = int(frac * inputs.shape[0])\n",
    "    indices = torch.randperm(inputs.shape[0])[:size]\n",
    "    inputs = inputs[indices]\n",
    "    labels = labels[indices]\n",
    "    lossMask = lossMask[indices]\n",
    "    noteIds = noteIds[indices]\n",
    "    tweetIds = tweetIds[indices]\n",
    "  # Process chunks\n",
    "  start = 0\n",
    "  assert not model.training\n",
    "  preds = []\n",
    "  progress = tqdm(range(int(np.ceil(inputs.shape[0] / batchSize))))\n",
    "  while start < inputs.shape[0]:\n",
    "    end = start + batchSize\n",
    "    with torch.no_grad():\n",
    "      with autocast(device_type=device, dtype=torch.bfloat16):\n",
    "        preds.append(\n",
    "          model(\n",
    "            inputs[start:end].to(device),\n",
    "          ).to(CPU).detach()\n",
    "        )\n",
    "    start = end\n",
    "    progress.update(1)\n",
    "  preds = torch.concat(preds, axis=0)\n",
    "  return labels, preds, lossMask, noteIds, tweetIds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8a7a6543-b182-4621-8f35-ba660ab91c09",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper for incremental eval\n",
    "def eval_model(model, trainDataset, testDataset, device, gpuBatchSize, colLabels):\n",
    "  print(\"Forward pass on training data:\")\n",
    "  allTrainLabels, allTrainPreds, allTrainMask, _, _ = apply_model(model, trainDataset, device, gpuBatchSize, colLabels, frac=(1/9))\n",
    "  print(\"Forward pass on test data:\")\n",
    "  allTestLabels, allTestPreds, allTestMask, testNoteIds, testTweetIds = apply_model(model, testDataset, device, gpuBatchSize, colLabels, frac=None)\n",
    "  results = []\n",
    "  for i in range(allTrainLabels.shape[1]):\n",
    "    trainLabels, trainPreds = allTrainLabels[allTrainMask[:, i] == 1, i], allTrainPreds[allTrainMask[:, i] == 1, i]\n",
    "    testLabels, testPreds = allTestLabels[allTestMask[:, i] == 1, i], allTestPreds[allTestMask[:, i] == 1, i]\n",
    "    if trainLabels.sum().item() == 0 or testLabels.sum().item() == 0:\n",
    "      results.append((-1, -1, -1, -1))\n",
    "      continue\n",
    "    trainAuc = skm.roc_auc_score(trainLabels.numpy(), trainPreds.numpy())\n",
    "    testAuc = skm.roc_auc_score(testLabels.numpy(), testPreds.numpy())\n",
    "    fpr, tpr, _ = skm.roc_curve(testLabels.numpy(), testPreds.numpy())\n",
    "    tprAt1 = tpr[np.argmin(np.abs(fpr - .01))]\n",
    "    tprAt5 = tpr[np.argmin(np.abs(fpr - .05))]\n",
    "    results.append((trainAuc, testAuc, tprAt1, tprAt5))\n",
    "  return results, (allTestLabels, allTestPreds, allTestMask, testNoteIds, testTweetIds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b45493e9-cfb2-46c7-bcf0-7a010de6d7a4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helpers to save checkpointed state\n",
    "def save_checkpoint(root, epoch, batch, loss, model, optimizer, scheduler, scaler, stats):\n",
    "  checkpoint = {\n",
    "    \"epoch\": epoch,\n",
    "    \"batch\": batch,\n",
    "    \"loss\": loss,\n",
    "    \"model\": model.module.state_dict(),\n",
    "    \"optimizer\": optimizer.state_dict() if optimizer is not None else None,\n",
    "    \"scheduler\": scheduler.state_dict() if scheduler is not None else None,\n",
    "    \"scaler\": scaler.state_dict() if scaler is not None else None,\n",
    "    \"stats\": stats,\n",
    "  }\n",
    "  checkpointId = str(int(time.time()))\n",
    "  path = os.path.join(root, f\"{checkpointId}.pt\")\n",
    "  print(f\"Saving checkpoint to {path}\")\n",
    "  torch.save(checkpoint, path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8ae93e72-71b1-4511-ae05-9fd3bb2b7399",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define training loop\n",
    "def train_model(\n",
    "  model,\n",
    "  trainDataset,\n",
    "  testDataset,\n",
    "  colLabels,\n",
    "  numEpochs=3,\n",
    "  device=CUDA,\n",
    "  gpuBatchSize=32,\n",
    "  learningRate=1e-5,\n",
    "  weightDecay=0.01,\n",
    "  learningSchedule=True,\n",
    "  objectiveWeights=None,\n",
    "  posWeight=None,\n",
    "):\n",
    "  gc.collect()\n",
    "  torch.cuda.empty_cache()\n",
    "  # Set up checkpoint directory\n",
    "  modelId = str(int(time.time()))\n",
    "  print(f\"Beginning training run for {modelId}\")\n",
    "  modelRoot = os.path.join(DATA_ROOT, modelId)\n",
    "  os.mkdir(modelRoot)\n",
    "  # Prepare data and batching\n",
    "  print(\"Setting up training\")\n",
    "  model = model.to(device)\n",
    "  if device == CUDA and torch.cuda.device_count() > 1:\n",
    "    model = nn.DataParallel(model)\n",
    "    batchSize = gpuBatchSize * torch.cuda.device_count()\n",
    "  else:\n",
    "    batchSize = gpuBatchSize\n",
    "  labels, inputs, lossMask, noteIds, tweetIds = trainDataset\n",
    "  crhIdx = colLabels.index(\"crh\")\n",
    "  assert crhIdx == 2\n",
    "  labels = labels[:, crhIdx:(crhIdx+1)]\n",
    "  lossMask = lossMask[:, crhIdx:(crhIdx+1)]\n",
    "  numBatches = int(np.ceil(inputs.shape[0] / batchSize))\n",
    "  progress = tqdm(range(numBatches * numEpochs))\n",
    "  print(f\"Training data contains {inputs.shape[0]} rows to be split into {numBatches} batches\")\n",
    "  # Prepare models and data\n",
    "  optim = torch.optim.AdamW(model.parameters(), lr=learningRate, weight_decay=weightDecay)\n",
    "  if learningSchedule:\n",
    "    scheduler = LinearLR(\n",
    "      optim,\n",
    "      start_factor=1.0,  # Start at the initial learning rate\n",
    "      end_factor=0.0,    # End at 0\n",
    "      total_iters=(numBatches * numEpochs),  # Total number of training steps      \n",
    "    )\n",
    "  else:\n",
    "    scheduler = None\n",
    "  scaler = GradScaler()\n",
    "  if objectiveWeights is None:\n",
    "    objectiveWeights = torch.ones(labels.shape[1]) / labels.shape[1]\n",
    "  assert np.abs(objectiveWeights.sum().item() - 1) < 1e-5\n",
    "  lossWeights = make_loss_weights(lossMask, objectiveWeights, numBatches).to(device)\n",
    "  model.train()\n",
    "  for epoch in range(numEpochs):\n",
    "    gc.collect()\n",
    "    torch.cuda.empty_cache()\n",
    "    losses = []\n",
    "    base = 0\n",
    "    randOrder = np.random.permutation(np.arange(0, inputs.shape[0]))\n",
    "    inputs = inputs[randOrder]\n",
    "    labels = labels[randOrder]\n",
    "    lossWeights = lossWeights[randOrder]\n",
    "    for batch in range(numBatches):\n",
    "      # Obtain batch\n",
    "      start = batch * batchSize\n",
    "      end = start + batchSize\n",
    "      y = labels[start:end].to(device)\n",
    "      with autocast(device_type=device, dtype=torch.bfloat16):\n",
    "        # Forward pass\n",
    "        y_hat = model(\n",
    "          inputs[start:end].to(device),\n",
    "        )\n",
    "        # Compute loss\n",
    "        loss = multihead_loss(y_hat, lossWeights[start:end], y, posWeight=posWeight).sum()\n",
    "      losses.append(loss.item())\n",
    "      # Backward pass\n",
    "      scaler.scale(loss).backward()\n",
    "      # Update weights\n",
    "      scaler.step(optim)\n",
    "      scaler.update()\n",
    "      if learningSchedule:\n",
    "        scheduler.step()\n",
    "      # Zero out gradients\n",
    "      optim.zero_grad()\n",
    "      # Update progress bar\n",
    "      progress.update(1)\n",
    "    print(f\"epoch={epoch:<3d}  loss={np.mean(losses):7.5f}\")\n",
    "  # Log loss\n",
    "  model.eval()\n",
    "  results, evalPreds = eval_model(model, trainDataset, testDataset, device, gpuBatchSize, colLabels)\n",
    "  stats = []\n",
    "  for (trainAuc, testAuc, tprAt1, tprAt5), label in zip(results, [CRH]):\n",
    "    print(f\"  epoch={epoch:<3d}  loss={np.mean(losses):7.5f}  trainAuc={trainAuc:5.3f}  testAuc={testAuc:5.3f}  tpr@0.01={tprAt1:5.3f}  tpr@0.05={tprAt5:5.3f}  ({label})\")\n",
    "    stats.append((label, trainAuc, testAuc, tprAt1, tprAt5))\n",
    "  save_checkpoint(modelRoot, epoch, batch, np.mean(losses), model, optim, scheduler, scaler, stats)      \n",
    "  return evalPreds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7649f878-321d-4bf4-ab27-6bad897909e9",
   "metadata": {},
   "outputs": [],
   "source": [
    "def eval_hparams(\n",
    "  model,\n",
    "  trainDataset,\n",
    "  testDataset,\n",
    "  colLabels,\n",
    "  numEpochs=5,\n",
    "  gpuBatchSize=1024,\n",
    "  learningRate=1e-5,\n",
    "  learningSchedule=True,\n",
    "  weightDecay=0.01,\n",
    "  objectiveWeights=None,\n",
    "  posWeight=None\n",
    "):\n",
    "  return train_model(\n",
    "    model,\n",
    "    trainDataset,\n",
    "    testDataset,\n",
    "    colLabels,\n",
    "    numEpochs=numEpochs,\n",
    "    gpuBatchSize=gpuBatchSize,\n",
    "    learningRate=learningRate,\n",
    "    learningSchedule=learningSchedule,\n",
    "    weightDecay=weightDecay,\n",
    "    objectiveWeights=objectiveWeights,\n",
    "    posWeight=posWeight,\n",
    "  )"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "80a930a5-902b-40fb-8ace-424ea31a81c9",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Train Models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "46e6060f-f21d-4b3c-a27d-62205781f06f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define model\n",
    "class MetaModel(nn.Module):\n",
    "\n",
    "  def __init__(self, colMask):\n",
    "    super().__init__()\n",
    "    totalCols = sum(colMask)\n",
    "    print(\"total cols:\", totalCols)\n",
    "    self.colMask = colMask\n",
    "    self.scale = nn.Parameter(torch.randn(totalCols))\n",
    "    self.shift = nn.Parameter(torch.randn(totalCols))\n",
    "    self.preclassifier = nn.Linear(totalCols, totalCols**2)\n",
    "    self.classifier = nn.Linear(totalCols**2, 1)\n",
    "\n",
    "  def forward(self, inputs):\n",
    "    inputs = inputs[:, self.colMask]\n",
    "    z = (inputs - self.shift) / self.scale\n",
    "    a = nn.Tanh()(z)\n",
    "    z = self.preclassifier(a)\n",
    "    a = nn.ReLU()(z)\n",
    "    return self.classifier(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5fcba547-a0b8-4516-9037-620dbe521e96",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Train model on all signals except CRH\n",
    "tmp = MetaModel([(col not in [\"crh\"]) for col in allSignalLabels])\n",
    "tmp = eval_hparams(tmp, trainTensorsSignals, holdoutTensorsSignals, allSignalLabels, learningRate=1e-3, learningSchedule=True, numEpochs=50)\n",
    "tmp = make_dataframe(tmp, [\"crh\"], dataset, clusters)\n",
    "print(profile_signals(tmp, [\"crh\"]))\n",
    "plot_crh_rates_with_pred(tmp, \"crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9192ed9c-c566-43be-81ca-14c06938aafe",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Train model on all signals except CRH\n",
    "tmp = MetaModel([(col in [\"helpfulAddressesClaim\", \"helpfulClear\", \"notHelpfulOpinionSpeculation\", \"helpfulImportantContext\"]) for col in allSignalLabels])\n",
    "tmp = eval_hparams(tmp, trainTensorsSignals, holdoutTensorsSignals, allSignalLabels, learningRate=1e-3, learningSchedule=True, numEpochs=50)\n",
    "tmp = make_dataframe(tmp, [\"crh\"], dataset, clusters)\n",
    "print(profile_signals(tmp, [\"crh\"]))\n",
    "plot_crh_rates_with_pred(tmp, \"crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5b4af862-c8f6-4f65-8f21-7d774a34bf84",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Train model on all signals except CRH\n",
    "tmp = MetaModel([(col in [\"helpfulAddressesClaim\", \"helpfulClear\", \"notHelpfulOpinionSpeculation\"]) for col in allSignalLabels])\n",
    "tmp = eval_hparams(tmp, trainTensorsSignals, holdoutTensorsSignals, allSignalLabels, learningRate=1e-3, learningSchedule=True, numEpochs=50)\n",
    "tmp = make_dataframe(tmp, [\"crh\"], dataset, clusters)\n",
    "print(profile_signals(tmp, [\"crh\"]))\n",
    "plot_crh_rates_with_pred(tmp, \"crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "13b8e4a9-7df6-461d-b1b8-70c89e0f7f08",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Train model on all signals except CRH\n",
    "tmp = MetaModel([(col in [\"helpfulAddressesClaim\", \"notHelpfulOpinionSpeculation\"]) for col in allSignalLabels])\n",
    "tmp = eval_hparams(tmp, trainTensorsSignals, holdoutTensorsSignals, allSignalLabels, learningRate=1e-3, learningSchedule=True, numEpochs=50)\n",
    "tmp = make_dataframe(tmp, [\"crh\"], dataset, clusters)\n",
    "print(profile_signals(tmp, [\"crh\"]))\n",
    "plot_crh_rates_with_pred(tmp, \"crh_pred\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7ee93560-041e-464e-8071-62f967c2e5ef",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "# Summarize Results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ccd95bc3-5055-4416-a731-8e1e619e4327",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Validate that holdout results align with tuning data\n",
    "tmp = profile_signals(holdoutSignalDF, allSignalLabels).sort_values(\"KL (Notes)\")\n",
    "tmp[tmp[\"Signal\"] == \"crh\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2518205-e260-4151-a7be-e9f3811b50eb",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_crh_rates_with_pred(holdoutSignalDF, \"crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "54735855-6b97-4cca-bd59-cdff2ca5bb6f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Train model on all signals except CRH\n",
    "tmp = MetaModel([(col in [\"helpfulAddressesClaim\", \"notHelpfulOpinionSpeculation\"]) for col in allSignalLabels])\n",
    "tmp = eval_hparams(tmp, trainTensorsSignals, holdoutTensorsSignals, allSignalLabels, learningRate=1e-3, learningSchedule=True, numEpochs=50)\n",
    "holdoutResultDf = make_dataframe(tmp, [\"crh\"], dataset, clusters)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8b9066bc-921d-46f5-b93a-870ef13851ab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# View KL divergence\n",
    "profile_signals(holdoutResultDf, [\"crh\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "183dd684-94ab-4c8e-ac97-fafaa2039f81",
   "metadata": {},
   "outputs": [],
   "source": [
    "# View plots\n",
    "plot_crh_rates_with_pred(holdoutResultDf, \"crh_pred\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bd43b87e-2872-43a7-97e6-a8a684c8b7ca",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate review samples\n",
    "tmp = holdoutResultDf[[NOTE_ID, TWEET_ID, \"crh_label\", CURRENT_LABEL, \"crh_pred\"]].merge(enDataset[[NOTE_ID, TWEET_ID, NOTE_TEXT_FINAL, TWEET_TEXT_FINAL, INTERCEPT]])\n",
    "tmp = tmp.rename(columns={\"crh_pred\": \"helpful_claim_and_opinion\"})\n",
    "reviewSample = tmp.merge(holdoutSignalDF[[NOTE_ID, TWEET_ID, \"crh_pred\"]].rename(columns={\"crh_pred\": \"helpful_direct\"}))\n",
    "reviewSample = reviewSample[[NOTE_ID, TWEET_ID, \"crh_label\", \"helpful_claim_and_opinion\", \"helpful_direct\", NOTE_TEXT_FINAL, TWEET_TEXT_FINAL, \"intercept\", CURRENT_LABEL]]\n",
    "reviewSample"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2f772264-f030-4647-99b3-149cf052bc5b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to confirm key stats\n",
    "def show_key_stats(scores, labels):\n",
    "  # Select non-NA rows\n",
    "  mask = labels.notna()\n",
    "  scores = scores[mask]\n",
    "  labels = labels[mask]\n",
    "  # Generate curves\n",
    "  auc = skm.roc_auc_score(labels, scores)\n",
    "  fpr, tpr, _ = skm.roc_curve(labels, scores)\n",
    "  tpr01 = tpr[np.argmin(np.abs(fpr - 0.01))]\n",
    "  tpr05 = tpr[np.argmin(np.abs(fpr - 0.05))]\n",
    "  return (auc, tpr01, tpr05)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c986f409-c2b6-4649-aa72-11984759b6ab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Recap direct stats\n",
    "show_key_stats(reviewSample[\"helpful_direct\"], reviewSample[\"crh_label\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2901cf7a-d3f0-48bd-9494-761b04df3932",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Recap claim and opinion stats\n",
    "show_key_stats(reviewSample[\"helpful_claim_and_opinion\"], reviewSample[\"crh_label\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3f1ca87e-454e-4383-ad78-908576be3a18",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Save review sample to disk\n",
    "tmp = os.path.join(DATA_ROOT, \"review_sample.parquet\")\n",
    "print(tmp)\n",
    "reviewSample.to_parquet(tmp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "66280935-029c-43e7-b2cb-9beadf083869",
   "metadata": {},
   "outputs": [],
   "source": [
    "nnnReviewSample = holdoutSignalDF[[NOTE_ID, TWEET_ID, \"notHelpfulNoteNotNeeded_label\", \"notHelpfulNoteNotNeeded_pred\"]].merge(\n",
    "  enDataset[[NOTE_ID, TWEET_ID, NOTE_TEXT_FINAL, TWEET_TEXT_FINAL]])\n",
    "tmp = os.path.join(DATA_ROOT, \"nnn_review_sample.parquet\")\n",
    "print(tmp)\n",
    "nnnReviewSample.to_parquet(tmp)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a6168d2d-d21b-4695-88df-d652cd8d4ec5",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "# Export Model"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ed6e96ec-f0e5-4ed4-a6e5-539d8274d773",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Load and Combine Models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "605e174f-4555-4182-8c19-f760dc4d19b1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load multi-head model\n",
    "tmp = torch.load(os.path.expanduser(\"~/workspace/datasets/helpfulness/1753341209/1753352848.pt\"), weights_only=False)\n",
    "multiHeadModel = MultiHeadMLPAllSignals()\n",
    "multiHeadModel.load_state_dict(tmp[\"model\"])\n",
    "multiHeadModel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2debb4bb-479f-4a6a-888e-18ef9844a7b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load CRH model\n",
    "tmp = torch.load(os.path.expanduser(\"~/workspace/datasets/helpfulness/1753377611/1753377685.pt\"), weights_only=False)\n",
    "crhModel = MetaModel([(col in [\"helpfulAddressesClaim\", \"notHelpfulOpinionSpeculation\"]) for col in allSignalLabels])\n",
    "crhModel.load_state_dict(tmp[\"model\"])\n",
    "crhModel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "63c6913f-972c-4260-a20e-a5ff836eaa09",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define composite model\n",
    "class CompositeModel(nn.Module):\n",
    "\n",
    "  def __init__(self, multiHeadModel, crhModel):\n",
    "    super().__init__()\n",
    "    # Set multiHeadModel parameters\n",
    "    self.roberta = multiHeadModel.roberta\n",
    "    self.interceptPredictor = multiHeadModel.interceptPredictor\n",
    "    self.factorPredictor = multiHeadModel.factorPredictor\n",
    "    self.crhClassifier = multiHeadModel.crhClassifier\n",
    "    self.crnhClassifier = multiHeadModel.crnhClassifier\n",
    "    self.relevanceClassifier = multiHeadModel.relevanceClassifier\n",
    "    self.classificationClassifier = multiHeadModel.classificationClassifier\n",
    "    self.helpfulTagClassifier = multiHeadModel.helpfulTagClassifier\n",
    "    self.notHelpfulTagClassifier = multiHeadModel.notHelpfulTagClassifier\n",
    "    # Set crhModel parameters\n",
    "    self.colMask = crhModel.colMask\n",
    "    self.scale = crhModel.scale\n",
    "    self.shift = crhModel.shift\n",
    "    self.preclassifier = crhModel.preclassifier\n",
    "    self.classifier = crhModel.classifier\n",
    "    \n",
    "  def forward(self, inputIds, attentionMask):\n",
    "    # Apply multiHeadModel\n",
    "    embedding = self.roberta(\n",
    "      input_ids=inputIds,\n",
    "      attention_mask=attentionMask,\n",
    "    ).last_hidden_state[:, 0]  # batch, token, dimension\n",
    "    heads = torch.concat([\n",
    "      self.interceptPredictor(embedding),\n",
    "      self.factorPredictor(embedding),\n",
    "      self.crhClassifier(embedding),\n",
    "      self.crnhClassifier(embedding),\n",
    "      self.classificationClassifier(embedding),\n",
    "      self.relevanceClassifier(embedding),\n",
    "      self.notHelpfulTagClassifier(embedding),\n",
    "      self.helpfulTagClassifier(embedding),\n",
    "    ], axis=1)\n",
    "    # Apply crhModel\n",
    "    z = (heads[:, self.colMask] - self.shift) / self.scale\n",
    "    a = nn.Tanh()(z)\n",
    "    z = self.preclassifier(a)\n",
    "    a = nn.ReLU()(z)\n",
    "    return torch.concat([heads, self.classifier(a)], axis=1)\n",
    "\n",
    "compositeModel = CompositeModel(multiHeadModel, crhModel).to(CUDA)\n",
    "compositeModel.eval()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "43a2031c-5ab8-4b13-832e-21e42a265f79",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Validate Combined Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "887cffe5-dea5-420e-90d8-e46743021298",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to apply composite model\n",
    "def apply_composite_model(model, dataset, device, gpuBatchSize, colLabels):\n",
    "  # Configure batching\n",
    "  if device == CUDA and torch.cuda.device_count() > 1:\n",
    "    batchSize = gpuBatchSize * torch.cuda.device_count()\n",
    "  else:\n",
    "    batchSize = gpuBatchSize\n",
    "  # Prepare data\n",
    "  inputIds, attentionMask, labels, lossMask, noteIds, tweetIds = dataset\n",
    "  assert inputIds.shape[0] == attentionMask.shape[0] == labels.shape[0] == lossMask.shape[0] == noteIds.shape[0] == tweetIds.shape[0]\n",
    "  # Process chunks\n",
    "  start = 0\n",
    "  assert not model.training\n",
    "  preds = []\n",
    "  progress = tqdm(range(int(np.ceil(inputIds.shape[0] / batchSize))))\n",
    "  while start < inputIds.shape[0]:\n",
    "    end = start + batchSize\n",
    "    with torch.no_grad():\n",
    "      with autocast(device_type=device, dtype=torch.bfloat16):\n",
    "        preds.append(\n",
    "          model(\n",
    "            inputIds[start:end].to(device),\n",
    "            attentionMask[start:end].to(device),\n",
    "          ).to(CPU).detach()\n",
    "        )\n",
    "    start = end\n",
    "    progress.update(1)\n",
    "  preds = torch.concat(preds, axis=0)\n",
    "  df = pd.DataFrame({\n",
    "    NOTE_ID: noteIds.cpu().numpy(),\n",
    "    TWEET_ID: tweetIds.cpu().numpy(),\n",
    "  })\n",
    "  df[colLabels + [\"claimsAndOpinion\"]] = preds.to(torch.float32).cpu().numpy()\n",
    "  return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9e5fa0ac-c796-4ccc-bdae-f1a31c703cd4",
   "metadata": {},
   "outputs": [],
   "source": [
    "validation = apply_composite_model(compositeModel, testTensors, CUDA, 32, allSignalLabels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "86db74f7-6e48-4108-917a-c03b371de71b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Confirm that validation passes\n",
    "holdoutResultDf[[NOTE_ID, TWEET_ID, \"crh_pred\"]].rename(columns={\"crh_pred\": \"claims_and_opinion_ref\"}).merge(\n",
    "  holdoutSignalDF[[NOTE_ID, TWEET_ID, \"relevance_pred\", \"notHelpfulSpamHarassmentOrAbuse_pred\", \"crh_pred\"]].rename(columns={\n",
    "    \"relevance_pred\": \"relevance_ref\",\n",
    "    \"notHelpfulSpamHarassmentOrAbuse_pred\": \"notHelpfulSpamHarassmentOrAbuse_ref\",\n",
    "    \"crh_pred\": \"crh_ref\"})).merge(\n",
    "  validation[[NOTE_ID, TWEET_ID, \"claimsAndOpinion\", \"relevance\", \"notHelpfulSpamHarassmentOrAbuse\", \"crh\"]]\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7da7e30d-0d6a-46d0-bd8b-c15d1cfb55bd",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Generate Trace"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "677f7d25-4bbc-474c-873e-2f7b48cb0acc",
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "# Apply model to see sample results\n",
    "compositeModel.eval()\n",
    "compositeModel.to(CUDA)(testTensors[0][:3].to(CUDA), testTensors[1][:3].to(CUDA))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "32640ad4-bca5-49f8-b668-d36d382aa56b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper for applying model\n",
    "def get_jit_trace(model, tensors):\n",
    "  # Prepare data\n",
    "  model = model.to(CPU)\n",
    "  inputIds, attentionMask, labels, lossMask, noteIds, tweetIds = tensors\n",
    "  assert inputIds.shape[0] == attentionMask.shape[0] == labels.shape[0] == lossMask.shape[0] == noteIds.shape[0] == tweetIds.shape[0]\n",
    "  assert not model.training\n",
    "  with torch.no_grad():\n",
    "    return torch.jit.trace(model, (inputIds[:1], attentionMask[:1]))\n",
    "\n",
    "torch.jit.save(get_jit_trace(compositeModel, testTensors), os.path.join(DATA_ROOT, \"composite_model.jit\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d3d7232-60cc-44d8-a7b5-2576ef10dfa7",
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "# Validate model loaded from disk\n",
    "def validate_jit_model(path, testTensors):\n",
    "  model = torch.jit.load(path)\n",
    "  assert not model.training\n",
    "  return model(testTensors[0][:3].to(CPU), testTensors[1][:3].to(CPU))\n",
    "\n",
    "validate_jit_model(os.path.join(DATA_ROOT, \"composite_model.jit\"), testTensors)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7b263bb9-fbfd-49cb-ad25-55cfe42f58c0",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "## Pack Tarball"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "23da3bd8-6c7e-4ab0-bc45-a923752e84ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to create a tarball from a list of pairs\n",
    "def pack_tarball(data: list[tuple[str, bytes]]) -> bytes:\n",
    "  tarBytes = io.BytesIO()\n",
    "  with tarfile.open(fileobj=tarBytes, mode='w') as tar:\n",
    "    for name, content in data:\n",
    "      print(name)\n",
    "      # Encode content if it's a string\n",
    "      assert isinstance(content, bytes)\n",
    "      # Create TarInfo object\n",
    "      info = tarfile.TarInfo(name=name)\n",
    "      info.size = len(content)\n",
    "      # Add file to tar\n",
    "      tar.addfile(info, io.BytesIO(content))\n",
    "  \n",
    "  # Reset the stream position to the beginning\n",
    "  tarBytes.seek(0)\n",
    "  return tarBytes.getvalue()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ab69caf7-8112-411e-990b-167c7f48febd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define helper to prepare test data\n",
    "def prepare_test_dataset(dataset, tensors, model, colLabels, size=50):\n",
    "  # Select test samples\n",
    "  inputIds, attentionMask, labels, lossMask, noteIds, tweetIds = tensors\n",
    "  preds = model(inputIds[:size].to(CPU), attentionMask[:size].to(CPU))\n",
    "  result = pd.DataFrame({\n",
    "    NOTE_ID: noteIds[:size].numpy(),\n",
    "    TWEET_ID: tweetIds[:size].numpy(),\n",
    "  })\n",
    "  result[colLabels + [\"claimsAndOpinion\"]] = preds.detach().to(CPU).numpy()\n",
    "  result = result[[NOTE_ID, TWEET_ID, RELEVANCE, \"notHelpfulSpamHarassmentOrAbuse\", CRH, \"claimsAndOpinion\"]]\n",
    "  # Merge with raw inputs.  Note that any synthetic note/tweet pairs will be dropped\n",
    "  # because they don't occur in the dataset\n",
    "  result = dataset[[NOTE_ID, TWEET_ID, NOTE_TEXT, TWEET_TEXT, TWEET_SHORTEN_URLS, TWEET_EXPANDED_URLS]].merge(result)\n",
    "  assert len(result) > 0\n",
    "  return result\n",
    "\n",
    "prepare_test_dataset(dataset, testTensors, compositeModel.to(CPU), allSignalLabels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d71a84d5-f241-4a57-938f-5e9720bc4d86",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Return a tarball containing all modeling resources\n",
    "def create_tarball(testData, colLabels):\n",
    "  # List of {path, resource} pairs\n",
    "  pairs = []\n",
    "  # Add tokenizer resources\n",
    "  tokenizerDir = os.path.join(MODEL_ROOT, DISTILROBERTA_BASE_MODEL, TOKENIZER_DIR)\n",
    "  for fileName in os.listdir(tokenizerDir):\n",
    "    if fileName.startswith(\".\"):\n",
    "      continue\n",
    "    with open(os.path.join(tokenizerDir, fileName), \"rb\") as handle:\n",
    "      resource = handle.read()\n",
    "    pairs.append((f\"tokenizer/{fileName}\", resource))\n",
    "  # Add jit model\n",
    "  with open(os.path.join(DATA_ROOT, \"composite_model.jit\"), \"rb\") as handle:\n",
    "    jitModel = handle.read()\n",
    "  pairs.append((\"model/model.jit\", jitModel))\n",
    "  # Add labels\n",
    "  labels = b\"\".join(f\"{label}\\n\".encode(\"utf-8\") for label in colLabels + [\"claimsAndOpinion\"])\n",
    "  pairs.append((\"model/labels.txt\", labels))\n",
    "  # Add test data\n",
    "  buf = io.BytesIO()\n",
    "  testData.to_parquet(buf)\n",
    "  buf.seek(0)\n",
    "  pairs.append((\"test_data.parquet\", buf.getvalue()))\n",
    "  return pack_tarball(pairs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1aade477-8ef3-4e75-8bb8-956707ceb364",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create and store tarball\n",
    "tarball = create_tarball(prepare_test_dataset(dataset, testTensors, compositeModel.to(CPU), allSignalLabels), allSignalLabels)\n",
    "print(hashlib.sha256(tarball).hexdigest())\n",
    "with open(os.path.join(DATA_ROOT, \"composite_model.tar\"), \"wb\") as handle:\n",
    "  handle.write(tarball)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ef28afd8-fd04-40c6-af33-7a5617ddb503",
   "metadata": {},
   "outputs": [],
   "source": [
    "allSignalLabels + [\"claimsAndOpinion\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c5173faf-83ca-441a-89a8-ea225eedc614",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
