{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "%pip install torch transformers pandas datasets accelerate scikit-learn mlflow tensorboard ray[all]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "import IPython\n",
    "\n",
    "IPython.Application.instance().kernel.do_shutdown(True) #automatically restarts kernel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-07-06T12:10:56.065286Z",
     "iopub.status.busy": "2022-07-06T12:10:56.064981Z",
     "iopub.status.idle": "2022-07-06T12:28:38.790606Z",
     "shell.execute_reply": "2022-07-06T12:28:38.790034Z",
     "shell.execute_reply.started": "2022-07-06T12:10:56.065256Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-07-06 12:11:01,206\tINFO trainer.py:243 -- Trainer logs will be logged in: /home/emr-notebook/ray_results/train_2022-07-06_12-11-01\n",
      "2022-07-06 12:11:08,796\tINFO trainer.py:249 -- Run results will be logged in: /home/emr-notebook/ray_results/train_2022-07-06_12-11-01/run_001\n",
      "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13483, ip=172.31.19.159)\u001b[0m 2022-07-06 12:11:08,790\tINFO torch.py:347 -- Setting up process group for: env:// [rank=0, world_size=1]\n",
      "100%|██████████| 2/2 [00:00<00:00, 763.78it/s].19.159)\u001b[0m \n",
      "Running tokenizer on dataset:   0%|          | 0/6 [00:00<?, ?ba/s]\n",
      "Running tokenizer on dataset:  33%|███▎      | 2/6 [00:00<00:00, 11.91ba/s]\n",
      "Running tokenizer on dataset:  67%|██████▋   | 4/6 [00:00<00:00, 12.69ba/s]\n",
      "Running tokenizer on dataset: 100%|██████████| 6/6 [00:00<00:00, 13.85ba/s]\n",
      "Running tokenizer on dataset: 100%|██████████| 1/1 [00:00<00:00, 38.16ba/s]\n",
      "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13483, ip=172.31.19.159)\u001b[0m /tmp/ray/session_2022-07-06_02-59-12_340846_29218/runtime_resources/pip/cd84599724d6bddefa1f21463b30c7087483d3c0/virtualenv/lib/python3.7/site-packages/transformers/optimization.py:310: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
      "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13483, ip=172.31.19.159)\u001b[0m   FutureWarning,\n",
      "  0%|          | 0/100 [00:00<?, ?it/s]=172.31.19.159)\u001b[0m \n",
      "  1%|          | 1/100 [00:11<18:41, 11.33s/it]19.159)\u001b[0m \n",
      "  2%|▏         | 2/100 [00:22<18:03, 11.05s/it]19.159)\u001b[0m \n",
      "  3%|▎         | 3/100 [00:32<17:15, 10.68s/it]19.159)\u001b[0m \n",
      "  4%|▍         | 4/100 [00:42<16:52, 10.54s/it]19.159)\u001b[0m \n",
      "  5%|▌         | 5/100 [00:52<16:24, 10.37s/it]19.159)\u001b[0m \n",
      "  6%|▌         | 6/100 [01:03<16:14, 10.36s/it]19.159)\u001b[0m \n",
      "  7%|▋         | 7/100 [01:13<15:55, 10.28s/it]19.159)\u001b[0m \n",
      "  8%|▊         | 8/100 [01:23<15:40, 10.22s/it]19.159)\u001b[0m \n",
      "  9%|▉         | 9/100 [01:33<15:24, 10.16s/it]19.159)\u001b[0m \n",
      " 10%|█         | 10/100 [01:43<15:14, 10.16s/it]9.159)\u001b[0m \n",
      " 11%|█         | 11/100 [01:53<15:04, 10.17s/it]9.159)\u001b[0m \n",
      " 12%|█▏        | 12/100 [02:03<14:55, 10.17s/it]9.159)\u001b[0m \n",
      " 13%|█▎        | 13/100 [02:14<14:45, 10.18s/it]9.159)\u001b[0m \n",
      " 14%|█▍        | 14/100 [02:24<14:36, 10.19s/it]9.159)\u001b[0m \n",
      " 15%|█▌        | 15/100 [02:34<14:25, 10.18s/it]9.159)\u001b[0m \n",
      " 16%|█▌        | 16/100 [02:44<14:11, 10.14s/it]9.159)\u001b[0m \n",
      " 17%|█▋        | 17/100 [02:54<14:00, 10.13s/it]9.159)\u001b[0m \n",
      " 18%|█▊        | 18/100 [03:04<13:52, 10.15s/it]9.159)\u001b[0m \n",
      " 19%|█▉        | 19/100 [03:14<13:39, 10.11s/it]9.159)\u001b[0m \n",
      " 20%|██        | 20/100 [03:24<13:27, 10.09s/it]9.159)\u001b[0m \n",
      " 21%|██        | 21/100 [03:34<13:15, 10.07s/it]9.159)\u001b[0m \n",
      " 22%|██▏       | 22/100 [03:44<13:05, 10.07s/it]9.159)\u001b[0m \n",
      " 23%|██▎       | 23/100 [03:55<12:55, 10.07s/it]9.159)\u001b[0m \n",
      " 24%|██▍       | 24/100 [04:05<12:44, 10.06s/it]9.159)\u001b[0m \n",
      " 25%|██▌       | 25/100 [04:15<12:34, 10.07s/it]9.159)\u001b[0m \n",
      " 26%|██▌       | 26/100 [04:25<12:25, 10.07s/it]9.159)\u001b[0m \n",
      " 27%|██▋       | 27/100 [04:35<12:14, 10.06s/it]9.159)\u001b[0m \n",
      " 28%|██▊       | 28/100 [04:45<12:05, 10.07s/it]9.159)\u001b[0m \n",
      " 29%|██▉       | 29/100 [04:55<11:54, 10.07s/it]9.159)\u001b[0m \n",
      " 30%|███       | 30/100 [05:05<11:44, 10.07s/it]9.159)\u001b[0m \n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m [2022-07-06 12:16:23,113 C 18274 18274] (raylet) node_manager.cc:170: This node has beem marked as dead.\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m *** StackTrace Information ***\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     ray::SpdLogMessage::Flush()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     ray::RayLog::~RayLog()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     std::_Function_handler<>::_M_invoke()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     std::_Function_handler<>::_M_invoke()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     std::_Function_handler<>::_M_invoke()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     ray::rpc::ClientCallImpl<>::OnReplyReceived()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     std::_Function_handler<>::_M_invoke()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     EventTracker::RecordExecution()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     std::_Function_handler<>::_M_invoke()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     boost::asio::detail::completion_handler<>::do_complete()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     boost::asio::detail::scheduler::do_run_one()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     boost::asio::detail::scheduler::run()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     boost::asio::io_context::run()\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     main\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m     __libc_start_main\n",
      "\u001b[2m\u001b[33m(raylet, ip=172.31.29.141)\u001b[0m \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,750 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,750 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,750 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,750 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,750 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,750 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,750 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,750 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,750 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,751 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,752 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,753 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,753 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,753 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,753 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,753 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,753 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,753 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,753 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      "\u001b[2m\u001b[36m(pid=gcs_server)\u001b[0m [2022-07-06 12:16:23,753 E 29692 29692] (gcs_server) gcs_server.cc:283: Failed to get the resource load: GrpcUnavailable: RPC Error message: Socket closed; RPC Error details: \n",
      " 31%|███       | 31/100 [05:15<11:34, 10.06s/it]9.159)\u001b[0m \n",
      " 32%|███▏      | 32/100 [05:25<11:23, 10.05s/it]9.159)\u001b[0m \n",
      " 33%|███▎      | 33/100 [05:35<11:12, 10.04s/it]9.159)\u001b[0m \n",
      " 34%|███▍      | 34/100 [05:45<11:02, 10.04s/it]9.159)\u001b[0m \n",
      " 35%|███▌      | 35/100 [05:55<10:53, 10.06s/it]9.159)\u001b[0m \n",
      " 36%|███▌      | 36/100 [06:05<10:44, 10.06s/it]9.159)\u001b[0m \n",
      " 37%|███▋      | 37/100 [06:15<10:33, 10.06s/it]9.159)\u001b[0m \n",
      " 38%|███▊      | 38/100 [06:25<10:22, 10.04s/it]9.159)\u001b[0m \n",
      " 39%|███▉      | 39/100 [06:35<10:12, 10.04s/it]9.159)\u001b[0m \n",
      " 40%|████      | 40/100 [06:45<10:02, 10.03s/it]9.159)\u001b[0m \n",
      " 41%|████      | 41/100 [06:55<09:51, 10.03s/it]9.159)\u001b[0m \n",
      " 42%|████▏     | 42/100 [07:05<09:41, 10.03s/it]9.159)\u001b[0m \n",
      " 43%|████▎     | 43/100 [07:16<09:32, 10.04s/it]9.159)\u001b[0m \n",
      " 44%|████▍     | 44/100 [07:26<09:21, 10.03s/it]9.159)\u001b[0m \n",
      " 45%|████▌     | 45/100 [07:36<09:13, 10.07s/it]9.159)\u001b[0m \n",
      " 46%|████▌     | 46/100 [07:46<09:05, 10.11s/it]9.159)\u001b[0m \n",
      " 47%|████▋     | 47/100 [07:56<08:54, 10.09s/it]9.159)\u001b[0m \n",
      " 48%|████▊     | 48/100 [08:06<08:43, 10.07s/it]9.159)\u001b[0m \n",
      " 49%|████▉     | 49/100 [08:16<08:33, 10.07s/it]9.159)\u001b[0m \n",
      " 50%|█████     | 50/100 [08:26<08:25, 10.11s/it]9.159)\u001b[0m \n",
      " 51%|█████     | 51/100 [08:36<08:15, 10.11s/it]9.159)\u001b[0m \n",
      " 52%|█████▏    | 52/100 [08:46<08:04, 10.10s/it]9.159)\u001b[0m \n",
      " 53%|█████▎    | 53/100 [08:56<07:53, 10.08s/it]9.159)\u001b[0m \n",
      " 54%|█████▍    | 54/100 [09:07<07:44, 10.09s/it]9.159)\u001b[0m \n",
      " 55%|█████▌    | 55/100 [09:17<07:34, 10.09s/it]9.159)\u001b[0m \n",
      " 56%|█████▌    | 56/100 [09:27<07:25, 10.12s/it]9.159)\u001b[0m \n",
      " 57%|█████▋    | 57/100 [09:37<07:14, 10.11s/it]9.159)\u001b[0m \n",
      " 58%|█████▊    | 58/100 [09:47<07:06, 10.14s/it]9.159)\u001b[0m \n",
      " 59%|█████▉    | 59/100 [09:57<06:55, 10.13s/it]9.159)\u001b[0m \n",
      " 60%|██████    | 60/100 [10:07<06:45, 10.13s/it]9.159)\u001b[0m \n",
      " 61%|██████    | 61/100 [10:18<06:34, 10.12s/it]9.159)\u001b[0m \n",
      " 62%|██████▏   | 62/100 [10:28<06:24, 10.12s/it]9.159)\u001b[0m \n",
      " 63%|██████▎   | 63/100 [10:38<06:14, 10.11s/it]9.159)\u001b[0m \n",
      " 64%|██████▍   | 64/100 [10:48<06:03, 10.11s/it]9.159)\u001b[0m \n",
      " 65%|██████▌   | 65/100 [10:58<05:54, 10.12s/it]9.159)\u001b[0m \n",
      " 66%|██████▌   | 66/100 [11:08<05:44, 10.12s/it]9.159)\u001b[0m \n",
      " 67%|██████▋   | 67/100 [11:18<05:33, 10.11s/it]9.159)\u001b[0m \n",
      " 68%|██████▊   | 68/100 [11:28<05:23, 10.11s/it]9.159)\u001b[0m \n",
      " 69%|██████▉   | 69/100 [11:38<05:13, 10.11s/it]9.159)\u001b[0m \n",
      " 70%|███████   | 70/100 [11:48<05:02, 10.09s/it]9.159)\u001b[0m \n",
      " 71%|███████   | 71/100 [11:59<04:52, 10.08s/it]9.159)\u001b[0m \n",
      " 72%|███████▏  | 72/100 [12:09<04:41, 10.06s/it]9.159)\u001b[0m \n",
      " 73%|███████▎  | 73/100 [12:19<04:31, 10.07s/it]9.159)\u001b[0m \n",
      " 74%|███████▍  | 74/100 [12:29<04:22, 10.08s/it]9.159)\u001b[0m \n",
      " 75%|███████▌  | 75/100 [12:39<04:11, 10.07s/it]9.159)\u001b[0m \n",
      " 76%|███████▌  | 76/100 [12:49<04:01, 10.08s/it]9.159)\u001b[0m \n",
      " 77%|███████▋  | 77/100 [12:59<03:51, 10.09s/it]9.159)\u001b[0m \n",
      " 78%|███████▊  | 78/100 [13:09<03:42, 10.10s/it]9.159)\u001b[0m \n",
      " 79%|███████▉  | 79/100 [13:19<03:32, 10.11s/it]9.159)\u001b[0m \n",
      " 80%|████████  | 80/100 [13:29<03:22, 10.11s/it]9.159)\u001b[0m \n",
      " 81%|████████  | 81/100 [13:39<03:11, 10.10s/it]9.159)\u001b[0m \n",
      " 82%|████████▏ | 82/100 [13:50<03:01, 10.11s/it]9.159)\u001b[0m \n",
      " 83%|████████▎ | 83/100 [14:00<02:51, 10.10s/it]9.159)\u001b[0m \n",
      " 84%|████████▍ | 84/100 [14:10<02:41, 10.10s/it]9.159)\u001b[0m \n",
      " 85%|████████▌ | 85/100 [14:20<02:31, 10.10s/it]9.159)\u001b[0m \n",
      " 86%|████████▌ | 86/100 [14:30<02:21, 10.10s/it]9.159)\u001b[0m \n",
      " 87%|████████▋ | 87/100 [14:40<02:11, 10.10s/it]9.159)\u001b[0m \n",
      " 88%|████████▊ | 88/100 [14:50<02:01, 10.10s/it]9.159)\u001b[0m \n",
      " 89%|████████▉ | 89/100 [15:00<01:51, 10.11s/it]9.159)\u001b[0m \n",
      " 90%|█████████ | 90/100 [15:10<01:41, 10.12s/it]9.159)\u001b[0m \n",
      " 91%|█████████ | 91/100 [15:21<01:31, 10.16s/it]9.159)\u001b[0m \n",
      " 92%|█████████▏| 92/100 [15:31<01:21, 10.16s/it]9.159)\u001b[0m \n",
      " 93%|█████████▎| 93/100 [15:41<01:10, 10.14s/it]9.159)\u001b[0m \n",
      " 94%|█████████▍| 94/100 [15:51<01:00, 10.13s/it]9.159)\u001b[0m \n",
      " 95%|█████████▌| 95/100 [16:01<00:50, 10.13s/it]9.159)\u001b[0m \n",
      " 96%|█████████▌| 96/100 [16:11<00:40, 10.12s/it]9.159)\u001b[0m \n",
      " 97%|█████████▋| 97/100 [16:21<00:30, 10.13s/it]9.159)\u001b[0m \n",
      " 98%|█████████▊| 98/100 [16:32<00:20, 10.13s/it]9.159)\u001b[0m \n",
      " 99%|█████████▉| 99/100 [16:42<00:10, 10.17s/it]9.159)\u001b[0m \n",
      "100%|██████████| 100/100 [16:52<00:00, 10.15s/it].159)\u001b[0m \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[2m\u001b[36m(BaseWorkerMixin pid=13483, ip=172.31.19.159)\u001b[0m epoch 0: {'accuracy': 0.6351791530944625}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 100/100 [17:24<00:00, 10.44s/it].159)\u001b[0m \n"
     ]
    }
   ],
   "source": [
    "import ray\n",
    "\n",
    "ray.shutdown()\n",
    "\n",
    "#\"\"\" Finetuning a 🤗 Transformers model for sequence classification.\"\"\"\n",
    "import argparse\n",
    "import logging\n",
    "import math\n",
    "import os\n",
    "os.environ[\"TOKENIZERS_PARALLELISM\"] = \"true\"\n",
    "\n",
    "from typing import Dict, Any\n",
    "import random\n",
    "import torch\n",
    "\n",
    "#import mlflow\n",
    "#mlflow.set_tracking_uri('http://172.31.28.127:5001')\n",
    "\n",
    "import datasets\n",
    "import ray\n",
    "import transformers\n",
    "from accelerate import Accelerator\n",
    "from datasets import load_dataset, load_metric\n",
    "from ray.train import Trainer\n",
    "from torch.utils.data.dataloader import DataLoader\n",
    "from tqdm.auto import tqdm\n",
    "from transformers import (\n",
    "    AdamW,\n",
    "    AutoConfig,\n",
    "    AutoModelForSequenceClassification,\n",
    "    AutoTokenizer,\n",
    "    DataCollatorWithPadding,\n",
    "    PretrainedConfig,\n",
    "    SchedulerType,\n",
    "    default_data_collator,\n",
    "    get_scheduler,\n",
    "    set_seed,\n",
    ")\n",
    "from transformers.utils.versions import require_version\n",
    "\n",
    "logging.basicConfig(level=logging.ERROR)\n",
    "logger = logging.getLogger(__name__)\n",
    "\n",
    "\n",
    "def parse_args():\n",
    "    parser = argparse.ArgumentParser(\n",
    "        description=\"Finetune a transformers model on a text classification task\"\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"-f\",\n",
    "        type=str,\n",
    "        default=None,\n",
    "        help=\"Ignore this!\",\n",
    "    )    \n",
    "    parser.add_argument(\n",
    "        \"--train_file\",\n",
    "        type=str,\n",
    "        default=\"data/train/part-algo-1-womens_clothing_ecommerce_reviews.csv\",\n",
    "        help=\"A csv or a json file containing the training data.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--validation_file\",\n",
    "        type=str,\n",
    "        default=\"data/validation/part-algo-1-womens_clothing_ecommerce_reviews.csv\",\n",
    "        help=\"A csv or a json file containing the validation data.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--max_length\",\n",
    "        type=int,\n",
    "        default=64,\n",
    "        help=(\n",
    "            \"The maximum total input sequence length after tokenization. \"\n",
    "            \"Sequences longer than this will be truncated, sequences shorter \"\n",
    "            \"will be padded if `--pad_to_max_lengh` is passed.\"\n",
    "        ),\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--pad_to_max_length\",\n",
    "        action=\"store_true\",\n",
    "        help=\"If passed, pad all samples to `max_length`. Otherwise, dynamic \"\n",
    "        \"padding is used.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--model_name_or_path\",\n",
    "        type=str,\n",
    "        help=\"Path to pretrained model or model identifier from \"\n",
    "        \"huggingface.co/models.\",\n",
    "        default=\"roberta-base\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--use_slow_tokenizer\",\n",
    "        action=\"store_true\",\n",
    "        help=\"If passed, will use a slow tokenizer (not backed by the 🤗 \"\n",
    "        \"Tokenizers library).\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--per_device_train_batch_size\",\n",
    "        type=int,\n",
    "        default=32,\n",
    "        help=\"Batch size (per device) for the training dataloader.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--per_device_eval_batch_size\",\n",
    "        type=int,\n",
    "        default=8,\n",
    "        help=\"Batch size (per device) for the evaluation dataloader.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--learning_rate\",\n",
    "        type=float,\n",
    "        default=5e-5,\n",
    "        help=\"Initial learning rate (after the potential warmup period) to use.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\"\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--num_train_epochs\",\n",
    "        type=int,\n",
    "        default=3,\n",
    "        help=\"Total number of training epochs to perform.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--max_train_steps\",\n",
    "        type=int,\n",
    "        default=100,\n",
    "        help=\"Total number of training steps to perform. If provided, \"\n",
    "        \"overrides num_train_epochs.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--gradient_accumulation_steps\",\n",
    "        type=int,\n",
    "        default=1,\n",
    "        help=\"Number of updates steps to accumulate before performing a \"\n",
    "        \"backward/update pass.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--lr_scheduler_type\",\n",
    "        type=SchedulerType,\n",
    "        default=\"linear\",\n",
    "        help=\"The scheduler type to use.\",\n",
    "        choices=[\n",
    "            \"linear\",\n",
    "            \"cosine\",\n",
    "            \"cosine_with_restarts\",\n",
    "            \"polynomial\",\n",
    "            \"constant\",\n",
    "            \"constant_with_warmup\",\n",
    "        ],\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--num_warmup_steps\",\n",
    "        type=int,\n",
    "        default=0,\n",
    "        help=\"Number of steps for the warmup in the lr scheduler.\",\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--output_dir\", type=str, default=None, help=\"Where to store the final model.\"\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--seed\", type=int, default=None, help=\"A seed for reproducible training.\"\n",
    "    )\n",
    "\n",
    "    # Ray arguments.\n",
    "    parser.add_argument(\n",
    "        \"--start_local\", action=\"store_true\", help=\"Starts Ray on local machine.\"\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--address\", \n",
    "        type=str, \n",
    "        default=\"ray://localhost:10001\", \n",
    "        help=\"Ray address to connect to.\"\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--num_workers\", \n",
    "        type=int, \n",
    "        default=1, \n",
    "        help=\"Number of workers to use.\"\n",
    "    )\n",
    "    parser.add_argument(\n",
    "        \"--use_gpu\", action=\"store_true\", help=\"If training should be done on GPUs.\"\n",
    "    )\n",
    "\n",
    "    args = parser.parse_args()\n",
    "\n",
    "    # Sanity checks\n",
    "    if (\n",
    "#        args.task_name is None\n",
    "        args.train_file is None\n",
    "        and args.validation_file is None\n",
    "    ):\n",
    "        raise ValueError(\"Need a training/validation file.\")\n",
    "    else:\n",
    "        if args.train_file is not None:\n",
    "            extension = args.train_file.split(\".\")[-1]\n",
    "            assert extension in [\n",
    "                \"csv\",\n",
    "                \"json\",\n",
    "            ], \"`train_file` should be a csv or a json file.\"\n",
    "        if args.validation_file is not None:\n",
    "            extension = args.validation_file.split(\".\")[-1]\n",
    "            assert extension in [\n",
    "                \"csv\",\n",
    "                \"json\",\n",
    "            ], \"`validation_file` should be a csv or a json file.\"\n",
    "\n",
    "    if args.output_dir is not None:\n",
    "        os.makedirs(args.output_dir, exist_ok=True)\n",
    "\n",
    "    return args\n",
    "\n",
    "\n",
    "def train_func(config: Dict[str, Any]):\n",
    "    args = config[\"args\"]\n",
    "    # Initialize the accelerator. We will let the accelerator handle device\n",
    "    # placement for us in this example.\n",
    "    accelerator = Accelerator()\n",
    "    # Make one log on every process with the configuration for debugging.\n",
    "    logging.basicConfig(\n",
    "        format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n",
    "        datefmt=\"%m/%d/%Y %H:%M:%S\",\n",
    "        level=logging.ERROR,\n",
    "    )\n",
    "    logger.info(accelerator.state)\n",
    "\n",
    "    # Setup logging, we only want one process per machine to log things on\n",
    "    # the screen. accelerator.is_local_main_process is only True for one\n",
    "    # process per machine.\n",
    "    logger.setLevel(\n",
    "        logging.ERROR if accelerator.is_local_main_process else logging.ERROR\n",
    "    )\n",
    "    if accelerator.is_local_main_process:\n",
    "        datasets.utils.logging.set_verbosity_error()\n",
    "        transformers.utils.logging.set_verbosity_error()\n",
    "    else:\n",
    "        datasets.utils.logging.set_verbosity_error()\n",
    "        transformers.utils.logging.set_verbosity_error()\n",
    "\n",
    "    # If passed along, set the training seed now.\n",
    "    if args.seed is not None:\n",
    "        set_seed(args.seed)\n",
    "\n",
    "    # Get the datasets: you can either provide your own CSV/JSON training and\n",
    "    # evaluation files (see below) or specify a GLUE benchmark task (the\n",
    "    # dataset will be downloaded automatically from the datasets Hub).\n",
    "\n",
    "    # For CSV/JSON files, this script will use as labels the column called\n",
    "    # 'label' and as pair of sentences the sentences in columns called\n",
    "    # 'sentence1' and 'sentence2' if such column exists or the first two\n",
    "    # columns not named label if at least two columns are provided.\n",
    "\n",
    "    # If the CSVs/JSONs contain only one non-label column, the script does\n",
    "    # single sentence classification on this single column. You can easily\n",
    "    # tweak this behavior (see below)\n",
    "\n",
    "    # In distributed training, the load_dataset function guarantee that only\n",
    "    # one local process can concurrently download the dataset.\n",
    "#    if args.task_name is not None:\n",
    "#        # Downloading and loading a dataset from the hub.\n",
    "#        raw_datasets = load_dataset(\"glue\", args.task_name)\n",
    "#    else:\n",
    "        # Loading the dataset from local csv or json file.\n",
    "    data_files = {}\n",
    "    if args.train_file is not None:\n",
    "        data_files[\"train\"] = args.train_file\n",
    "    if args.validation_file is not None:\n",
    "        data_files[\"validation\"] = args.validation_file\n",
    "    extension = (\n",
    "        args.train_file if args.train_file is not None else args.valid_file\n",
    "    ).split(\".\")[-1]\n",
    "\n",
    "    raw_datasets = load_dataset(extension, data_files=data_files)\n",
    "\n",
    "    label_list = raw_datasets[\"train\"].unique(\"sentiment\")\n",
    "    label_list.sort()  # Let's sort it for determinism\n",
    "    num_labels = len(label_list)\n",
    "\n",
    "    # Load pretrained model and tokenizer\n",
    "    #\n",
    "    # In distributed training, the .from_pretrained methods guarantee that\n",
    "    # only one local process can concurrently download model & vocab.\n",
    "    config = AutoConfig.from_pretrained(\n",
    "        args.model_name_or_path, num_labels=num_labels, \n",
    "    )\n",
    "    tokenizer = AutoTokenizer.from_pretrained(\n",
    "        args.model_name_or_path, use_fast=not args.use_slow_tokenizer\n",
    "    )\n",
    "    model = AutoModelForSequenceClassification.from_pretrained(\n",
    "        args.model_name_or_path,\n",
    "        config=config,\n",
    "    )\n",
    "\n",
    "    # Preprocessing the datasets\n",
    "    sentence1_key, sentence2_key = \"review_body\", None\n",
    "\n",
    "    # Some models have set the order of the labels to use,\n",
    "    # so let's make sure we do use it.\n",
    "    label_to_id = None\n",
    "    label_to_id = {v: i for i, v in enumerate(label_list)}\n",
    "\n",
    "    if label_to_id is not None:\n",
    "        model.config.label2id = label_to_id\n",
    "        model.config.id2label = {id: label for label, id in config.label2id.items()}\n",
    "\n",
    "    padding = \"max_length\" if args.pad_to_max_length else False\n",
    "\n",
    "    def preprocess_function(examples):\n",
    "        # Tokenize the texts\n",
    "        texts = (\n",
    "            (examples[sentence1_key],)\n",
    "            if sentence2_key is None\n",
    "            else (examples[sentence1_key], examples[sentence2_key])\n",
    "        )\n",
    "        result = tokenizer(\n",
    "            *texts, padding=padding, max_length=args.max_length, truncation=True\n",
    "        )\n",
    "\n",
    "        if \"sentiment\" in examples:\n",
    "            if label_to_id is not None:\n",
    "                # Map labels to IDs (not necessary for GLUE tasks)\n",
    "                result[\"labels\"] = [\n",
    "                    label_to_id[l] for l in examples[\"sentiment\"]  # noqa:E741\n",
    "                ]\n",
    "            else:\n",
    "                # In all cases, rename the column to labels because the model\n",
    "                # will expect that.\n",
    "                result[\"labels\"] = examples[\"sentiment\"]\n",
    "\n",
    "        return result\n",
    "\n",
    "    processed_datasets = raw_datasets.map(\n",
    "        preprocess_function,\n",
    "        batched=True,\n",
    "        remove_columns=raw_datasets[\"train\"].column_names,\n",
    "        desc=\"Running tokenizer on dataset\",\n",
    "    )\n",
    "\n",
    "    train_dataset = processed_datasets[\"train\"]\n",
    "    eval_dataset = processed_datasets[\"validation\"]\n",
    "\n",
    "    # Log a few random samples from the training set:\n",
    "    for index in random.sample(range(len(train_dataset)), 3):\n",
    "        logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n",
    "\n",
    "    # DataLoaders creation:\n",
    "    if args.pad_to_max_length:\n",
    "        # If padding was already done ot max length, we use the default data\n",
    "        # collator that will just convert everything to tensors.\n",
    "        data_collator = default_data_collator\n",
    "    else:\n",
    "        # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for\n",
    "        # us (by padding to the maximum length of the samples passed). When\n",
    "        # using mixed precision, we add `pad_to_multiple_of=8` to pad all\n",
    "        # tensors to multiple of 8s, which will enable the use of Tensor\n",
    "        # Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\n",
    "        data_collator = DataCollatorWithPadding(\n",
    "            tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)\n",
    "        )\n",
    "\n",
    "    train_dataloader = DataLoader(\n",
    "        train_dataset,\n",
    "        shuffle=True,\n",
    "        collate_fn=data_collator,\n",
    "        batch_size=args.per_device_train_batch_size,\n",
    "    )\n",
    "    eval_dataloader = DataLoader(\n",
    "        eval_dataset,\n",
    "        collate_fn=data_collator,\n",
    "        batch_size=args.per_device_eval_batch_size,\n",
    "    )\n",
    "\n",
    "    # Optimizer\n",
    "    # Split weights in two groups, one with weight decay and the other not.\n",
    "    no_decay = [\"bias\", \"LayerNorm.weight\"]\n",
    "    optimizer_grouped_parameters = [\n",
    "        {\n",
    "            \"params\": [\n",
    "                p\n",
    "                for n, p in model.named_parameters()\n",
    "                if not any(nd in n for nd in no_decay)\n",
    "            ],\n",
    "            \"weight_decay\": args.weight_decay,\n",
    "        },\n",
    "        {\n",
    "            \"params\": [\n",
    "                p\n",
    "                for n, p in model.named_parameters()\n",
    "                if any(nd in n for nd in no_decay)\n",
    "            ],\n",
    "            \"weight_decay\": 0.0,\n",
    "        },\n",
    "    ]\n",
    "    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n",
    "\n",
    "    # Prepare everything with our `accelerator`.\n",
    "    model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n",
    "        model, optimizer, train_dataloader, eval_dataloader\n",
    "    )\n",
    "\n",
    "#    model, optimizer, train_dataloader = accelerator.prepare(\n",
    "#        model, optimizer, train_dataloader\n",
    "#    )\n",
    "    # Note -> the training dataloader needs to be prepared before we grab\n",
    "    # his length below (cause its length will be shorter in multiprocess)\n",
    "\n",
    "    # Scheduler and math around the number of training steps.\n",
    "    num_update_steps_per_epoch = math.ceil(\n",
    "        len(train_dataloader) / args.gradient_accumulation_steps\n",
    "    )\n",
    "    if args.max_train_steps is None:\n",
    "        args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n",
    "    else:\n",
    "        args.num_train_epochs = math.ceil(\n",
    "            args.max_train_steps / num_update_steps_per_epoch\n",
    "        )\n",
    "\n",
    "    lr_scheduler = get_scheduler(\n",
    "        name=args.lr_scheduler_type,\n",
    "        optimizer=optimizer,\n",
    "        num_warmup_steps=args.num_warmup_steps,\n",
    "        num_training_steps=args.max_train_steps,\n",
    "    )\n",
    "\n",
    "    # Get the metric function\n",
    "#    if args.task_name is not None:\n",
    "#        metric = load_metric(\"glue\", args.task_name)\n",
    "#    else:\n",
    "    metric = load_metric(\"accuracy\")\n",
    "\n",
    "    # Train!\n",
    "    total_batch_size = (\n",
    "        args.per_device_train_batch_size\n",
    "        * accelerator.num_processes\n",
    "        * args.gradient_accumulation_steps\n",
    "    )\n",
    "\n",
    "    logger.info(\"***** Running training *****\")\n",
    "    logger.info(f\"  Num examples = {len(train_dataset)}\")\n",
    "    logger.info(f\"  Num epochs = {args.num_train_epochs}\")\n",
    "    logger.info(\n",
    "        f\"  Instantaneous batch size per device =\"\n",
    "        f\" {args.per_device_train_batch_size}\"\n",
    "    )\n",
    "    logger.info(\n",
    "        f\"  Total train batch size (w. parallel, distributed & accumulation) \"\n",
    "        f\"= {total_batch_size}\"\n",
    "    )\n",
    "    logger.info(f\"  Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n",
    "    logger.info(f\"  Total optimization steps = {args.max_train_steps}\")\n",
    "    # Only show the progress bar once on each machine.\n",
    "    progress_bar = tqdm(\n",
    "        range(args.max_train_steps), disable=not accelerator.is_local_main_process\n",
    "    )\n",
    "    completed_steps = 0\n",
    "\n",
    "    for epoch in range(args.num_train_epochs):\n",
    "        model.train()\n",
    "        for step, batch in enumerate(train_dataloader):\n",
    "            outputs = model(**batch)\n",
    "            loss = outputs.loss\n",
    "            loss = loss / args.gradient_accumulation_steps\n",
    "            accelerator.backward(loss)\n",
    "            if (\n",
    "                step % args.gradient_accumulation_steps == 0\n",
    "                or step == len(train_dataloader) - 1\n",
    "            ):\n",
    "                optimizer.step()\n",
    "                lr_scheduler.step()\n",
    "                optimizer.zero_grad()\n",
    "                progress_bar.update(1)\n",
    "                completed_steps += 1\n",
    "\n",
    "            if completed_steps >= args.max_train_steps:\n",
    "                break\n",
    "\n",
    "        model.eval()\n",
    "        for step, batch in enumerate(eval_dataloader):\n",
    "            outputs = model(**batch)\n",
    "            predictions = (\n",
    "                outputs.logits.argmax(dim=-1)\n",
    "#                if not is_regression\n",
    "#                else outputs.logits.squeeze()\n",
    "            )\n",
    "            metric.add_batch(\n",
    "                predictions=accelerator.gather(predictions),\n",
    "                references=accelerator.gather(batch[\"labels\"]),\n",
    "            )\n",
    "\n",
    "#         mlflow.log_param(\"use_gpu\", args.use_gpu)\n",
    "#         mlflow.log_param(\"batch_size\", args.per_device_train_batch_size)\n",
    "#         mlflow.log_param(\"learning_rate\", args.learning_rate)\n",
    "#         mlflow.log_param(\"weight_decay\", args.weight_decay)\n",
    "#         mlflow.log_param(\"max_length\", args.max_length)\n",
    "\n",
    "        eval_metric = metric.compute()\n",
    "#        mlflow.log_metric(\"accuracy\", eval_metric['accuracy'])\n",
    "        \n",
    "        logger.info(f\"epoch {epoch}: {eval_metric}\")\n",
    "        print(f\"epoch {epoch}: {eval_metric}\")\n",
    "\n",
    "    if args.output_dir is not None:\n",
    "        accelerator.wait_for_everyone()\n",
    "        unwrapped_model = accelerator.unwrap_model(model)\n",
    "        unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)\n",
    "\n",
    "\n",
    "def main():\n",
    "    args = parse_args()\n",
    "    config = {\"args\": args}\n",
    "    args.use_gpu = False\n",
    "\n",
    "    if args.start_local or args.address or args.num_workers > 1 or args.use_gpu:\n",
    "        if args.start_local:\n",
    "            # Start a local Ray runtime.\n",
    "            ray.init(num_cpus=args.num_workers)\n",
    "        else:\n",
    "            # Connect to a Ray cluster for distributed training.\n",
    "            ray.init(address=\"ray://localhost:10001\",\n",
    "                     runtime_env={\"pip\": [\n",
    "                                            \"torch\", \n",
    "                                            \"scikit-learn\",\n",
    "                                            \"transformers\",\n",
    "                                            \"pandas\",\n",
    "                                            \"datasets\",\n",
    "                                            \"accelerate\",\n",
    "                                            \"scikit-learn\",\n",
    "                                            \"mlflow\", \n",
    "                                            \"tensorboard\"                         \n",
    "                                         ]\n",
    "                                 }\n",
    "                    )\n",
    "            \n",
    "        trainer = Trainer(\"torch\", num_workers=args.num_workers, use_gpu=args.use_gpu,\n",
    "                          resources_per_worker={'CPU': 4, 'GPU': 0})\n",
    "        trainer.start()\n",
    "        trainer.run(train_func, config)\n",
    "    else:\n",
    "        # Run training locally.\n",
    "        train_func(config)\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
