{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load the raw data into numpy nd-array\n",
    "\n",
    "data = np.loadtxt('higgs/data.csv', delimiter=',', dtype=np.float32)\n",
    "print('Raw data:', data.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Split the raw data to features, labels and weights\n",
    "\n",
    "features, labels, weights = data[:, 1:-2], data[:, -1], data[:, -2]\n",
    "print('Features', features.shape)\n",
    "print('Labels', labels.shape)\n",
    "print('Weights', weights.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Fill missing data\n",
    "\n",
    "features[features == -999] = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot the signal and background distributions for each feature\n",
    "\n",
    "signal_features, signal_weights = features[labels == 1], weights[labels == 1]\n",
    "background_features, background_weights = features[labels == 0], weights[labels == 0]\n",
    "print(signal_features.shape, background_features.shape)\n",
    "\n",
    "plt.figure(figsize=(20, 20))\n",
    "for i in range(30):\n",
    "    plt.subplot(6, 5, i + 1)\n",
    "    plt.hist(signal_features[:, i], bins=50, weights=signal_weights, density=True, color='r', histtype='step', label='s')\n",
    "    plt.hist(background_features[:, i], bins=50, weights=background_weights, density=True, color='b', histtype='step', label='b')\n",
    "    plt.title(str(i))\n",
    "    plt.yticks([])\n",
    "    plt.legend()\n",
    "\n",
    "plt.tight_layout()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Normalize features\n",
    "\n",
    "features[:, 0] = features[:, 0] / 300\n",
    "features[:, 1] = features[:, 1] / 200\n",
    "features[:, 2] = features[:, 2] / 250\n",
    "features[:, 3] = features[:, 3] / 300\n",
    "features[:, 4] = features[:, 4] / 8\n",
    "features[:, 5] = features[:, 5] / 1000\n",
    "features[:, 6] = features[:, 6] / 10\n",
    "features[:, 7] = features[:, 7] / 5\n",
    "features[:, 8] = features[:, 8] / 150\n",
    "features[:, 9] = features[:, 9] / 500\n",
    "features[:, 10] = features[:, 10] / 5\n",
    "features[:, 11] = features[:, 11] / 2\n",
    "features[:, 12] = features[:, 12] / 1\n",
    "features[:, 13] = features[:, 13] / 100\n",
    "features[:, 14] = features[:, 14] / 3\n",
    "features[:, 15] = features[:, 15] / 3\n",
    "features[:, 16] = features[:, 16] / 100\n",
    "features[:, 17] = features[:, 17] / 3\n",
    "features[:, 18] = features[:, 18] / 3\n",
    "features[:, 19] = features[:, 19] / 200\n",
    "features[:, 20] = features[:, 20] / 3\n",
    "features[:, 21] = features[:, 21] / 500\n",
    "features[:, 22] = features[:, 22] / 3\n",
    "features[:, 23] = features[:, 23] / 250\n",
    "features[:, 24] = features[:, 24] / 4\n",
    "features[:, 25] = features[:, 25] / 3\n",
    "features[:, 26] = features[:, 26] / 150\n",
    "features[:, 27] = features[:, 27] / 5\n",
    "features[:, 28] = features[:, 28] / 3\n",
    "features[:, 29] = features[:, 29] / 300"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Permutate data\n",
    "\n",
    "indices = np.random.permutation(len(features))\n",
    "print(indices)\n",
    "\n",
    "features, labels, weights = features[indices], labels[indices], weights[indices]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Split dataset\n",
    "\n",
    "number_train_examples = int(len(features) * 0.5)\n",
    "print('number of training examples', number_train_examples)\n",
    "print('number of validation examples', len(features) - number_train_examples)\n",
    "\n",
    "train_features, train_labels, train_weights = features[:number_train_examples], labels[:number_train_examples], weights[:number_train_examples]\n",
    "validation_features, validation_labels, validation_weights = features[number_train_examples:], labels[number_train_examples:], weights[number_train_examples:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Check if GPU CUDA is available\n",
    "DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "\n",
    "# Convert the features and labels to pytorch arrays and transfer them to device\n",
    "train_features, train_labels = torch.from_numpy(train_features).to(DEVICE), torch.from_numpy(train_labels).to(DEVICE)\n",
    "validation_features, validation_labels = torch.from_numpy(validation_features).to(DEVICE), torch.from_numpy(validation_labels).to(DEVICE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build model\n",
    "\n",
    "model = nn.Sequential(\n",
    "    nn.Linear(30, 64),\n",
    "    nn.ReLU(),\n",
    "    nn.Linear(64, 128),\n",
    "    nn.ReLU(),\n",
    "    nn.Linear(128, 64),\n",
    "    nn.ReLU(),\n",
    "    nn.Linear(64, 1),\n",
    ").to(DEVICE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Initialize optimizer\n",
    "\n",
    "optimizer = optim.Adam(model.parameters(), lr=1e-3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Train\n",
    "\n",
    "train_losses, validation_losses = [], []\n",
    "min_validation_loss = 1e100\n",
    "\n",
    "# Up to 1000 epoches\n",
    "for epoch in range(50):\n",
    "    # 1. Use a mini-batch of 1000 examples in each training step\n",
    "    for i in range(0, len(train_features), 1000):\n",
    "        # (1) Get mini-batch data\n",
    "        features, labels = train_features[i: i + 1000], train_labels[i: i + 1000]\n",
    "        # (2) Forward\n",
    "        predictions = model(features).view(-1)\n",
    "        # (3) Calculate loss\n",
    "        loss = F.binary_cross_entropy_with_logits(predictions, labels)\n",
    "        # (4) Zero gradient accumulation\n",
    "        optimizer.zero_grad()\n",
    "        # (5) Backpropagation\n",
    "        loss.backward()\n",
    "        # (6) Update model parameters\n",
    "        optimizer.step()\n",
    "\n",
    "    with torch.no_grad():\n",
    "        # 2. Calculate training loss\n",
    "        predictions = model(train_features).view(-1)\n",
    "        train_loss = F.binary_cross_entropy_with_logits(predictions, train_labels).item()\n",
    "        train_losses.append(train_loss)\n",
    "\n",
    "        # 3. Make predictions on validation set and calculate validation loss\n",
    "        predictions = model(validation_features).view(-1)\n",
    "        validation_loss = F.binary_cross_entropy_with_logits(predictions, validation_labels).item()\n",
    "        validation_losses.append(validation_loss)\n",
    "\n",
    "        predictions = torch.sigmoid(predictions)\n",
    "\n",
    "        # 4. Save the best model\n",
    "        if validation_loss < min_validation_loss:\n",
    "            min_validation_loss = validation_loss\n",
    "            torch.save(model, 'best_model.torch')\n",
    "\n",
    "    print(epoch, train_loss, validation_loss)\n",
    "\n",
    "# Plot the losses\n",
    "plt.plot(train_losses, 'b', label='train')\n",
    "plt.plot(validation_losses, 'r', label='validation')\n",
    "plt.yscale('log')\n",
    "plt.legend()\n",
    "plt.grid(which='both')\n",
    "plt.tight_layout()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Make predictions on the validation set\n",
    "\n",
    "# Load the best model\n",
    "model = torch.load('best_model.torch').to(DEVICE)\n",
    "\n",
    "with torch.no_grad():\n",
    "    scores = torch.sigmoid(model(validation_features)).view(-1)\n",
    "\n",
    "scores = scores.cpu().numpy()\n",
    "labels = validation_labels.cpu().numpy()\n",
    "weights = validation_weights\n",
    "\n",
    "print(scores.shape)\n",
    "print(scores)\n",
    "print(labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot score distribution\n",
    "\n",
    "signal_scores, signal_weights = scores[labels == 1], weights[labels == 1]\n",
    "background_scores, background_weights = scores[labels == 0], weights[labels == 0]\n",
    "\n",
    "plt.hist(signal_scores, bins=50, weights=signal_weights, density=False, histtype='step', color='r', label='s')\n",
    "plt.hist(background_scores, bins=50, weights=background_weights, density=False, histtype='step', color='b', label='b')\n",
    "plt.xlim(0, 1)\n",
    "plt.ylim(bottom=0)\n",
    "plt.legend()\n",
    "plt.grid()\n",
    "\n",
    "plt.ylim(0, 1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build table [threshold, s, b, Z]\n",
    "\n",
    "# Sort the predictions by score descendingly\n",
    "indices = np.argsort(scores)[::-1]\n",
    "scores, labels, weights = scores[indices], labels[indices], weights[indices]\n",
    "\n",
    "table = np.empty([len(scores) + 1, 4])\n",
    "s, b = 0, 0\n",
    "table[0] = [1, s, b, 0]\n",
    "\n",
    "for i in range(len(scores)):\n",
    "    if labels[i] == 1:\n",
    "        s += weights[i]\n",
    "    else:\n",
    "        b += weights[i]\n",
    "    Z = np.sqrt(2 * ((s + b + 10) * np.log(1 + s / (b + 10)) - s))\n",
    "\n",
    "    table[i + 1] = [scores[i], s, b, Z]\n",
    "\n",
    "np.savetxt('table.txt', table, fmt='%8.3f')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.figure(figsize=(8, 6))\n",
    "\n",
    "plt.subplot(2, 2, 1)\n",
    "plt.plot(table[:, 0], table[:, 1])\n",
    "plt.xlim(0, 1)\n",
    "plt.ylim(bottom=0)\n",
    "plt.grid()\n",
    "plt.xlabel('Score threshold')\n",
    "plt.ylabel('Number of signal events')\n",
    "\n",
    "plt.subplot(2, 2, 2)\n",
    "plt.plot(table[:, 0], table[:, 2])\n",
    "plt.xlim(0, 1)\n",
    "plt.ylim(bottom=0)\n",
    "plt.grid()\n",
    "plt.xlabel('Score threshold')\n",
    "plt.ylabel('Number of background events')\n",
    "\n",
    "plt.subplot(2, 2, 3)\n",
    "plt.plot(table[:, 2] / table[-1, 2], table[:, 1] / table[-1, 1])\n",
    "plt.xlim(0, 1)\n",
    "plt.ylim(0, 1)\n",
    "plt.grid()\n",
    "plt.xlabel('Background efficiency (false positive rate)')\n",
    "plt.ylabel('Signal efficiency (true positive rate)')\n",
    "\n",
    "plt.subplot(2, 2, 4)\n",
    "plt.plot(table[:, 0], table[:, 3])\n",
    "plt.xlim(0, 1)\n",
    "plt.ylim(bottom=0)\n",
    "plt.grid()\n",
    "plt.xlabel('Score threshold')\n",
    "plt.ylabel('Significance')\n",
    "\n",
    "plt.tight_layout()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.2"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
