{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\miniconda3\\envs\\pytorch-gpu-1.12.1\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import torch.optim as optim\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "from tqdm import tqdm\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "import math\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "\n",
    "\n",
    "train_data = pd.read_csv(\"train_data_mix.csv\").drop(columns='Unnamed: 0')\n",
    "features = train_data.iloc[:, 3:].values\n",
    "target = train_data.iloc[:, :3].values\n",
    "\n",
    "train_features, val_features, train_target, val_target = train_test_split(\n",
    "    features, target, test_size=0.01, random_state=42\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def precise(predict, target):\n",
    "    d_f = abs(predict[0] - target[0]) / (target[0] + 5)\n",
    "    d_c = abs(predict[1] - target[1]) / (target[1] + 3)\n",
    "    d_l = abs(predict[2] - target[2]) / (target[2] + 3)\n",
    "\n",
    "    return 1 - 0.5 * d_f - 0.25 * d_c - 0.25 * d_l\n",
    "\n",
    "def sgn(x):\n",
    "    if x > 0:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0 \n",
    "\n",
    "def total_precise(predicts, targets):\n",
    "    total_p = 0\n",
    "    total_c = 0\n",
    "    for predict,target in zip(predicts,targets):\n",
    "        count = min(100, sum(target))\n",
    "        p = precise(predict, target)\n",
    "        total_c += (count + 1)\n",
    "        total_p += (count + 1)*sgn(p-0.8)\n",
    "    return total_p/total_c\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precise:  0.26159521435692923\n"
     ]
    }
   ],
   "source": [
    "from sklearn.linear_model import LinearRegression\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "# rf_regressor = RandomForestRegressor(n_estimators=100, random_state=42)\n",
    "# rf_regressor.fit(train_features, train_target)\n",
    "# y_pred = rf_regressor.predict(val_features)\n",
    "# y_pred\n",
    "def round_and_none_neg(n):\n",
    "    if n < 0:\n",
    "        return 0\n",
    "    else:\n",
    "        return round(n)\n",
    "def convert_to_int(predicts):\n",
    "    result = []\n",
    "    for predict in predicts:\n",
    "        t = []\n",
    "        for item in predict:\n",
    "            t.append(round_and_none_neg(item))\n",
    "        result.append(t)\n",
    "    return result\n",
    "\n",
    "linear = LinearRegression()\n",
    "linear.fit(train_features, train_target)\n",
    "linear_pred = linear.predict(val_features)\n",
    "\n",
    "linear_pred = convert_to_int(linear_pred)\n",
    "print(\"precise: \",total_precise(linear_pred, val_target))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precise:  0.26159521435692923\n"
     ]
    }
   ],
   "source": [
    "from sklearn.ensemble import RandomForestRegressor\n",
    "rf_regressor = RandomForestRegressor(n_jobs=-1)\n",
    "rf_regressor.fit(train_features, train_target)\n",
    "rf_regressor_pred = linear.predict(val_features)\n",
    "\n",
    "rf_regressor_pred = convert_to_int(rf_regressor_pred)\n",
    "print(\"precise: \",total_precise(rf_regressor_pred, val_target))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "True\n"
     ]
    }
   ],
   "source": [
    "print(torch.cuda.is_available())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Model(nn.Module):\n",
    "    def __init__(self, input_output_size: list) -> None:\n",
    "        super(Model, self).__init__()\n",
    "\n",
    "        self.fc1 = nn.Linear(in_features=input_output_size[0], out_features=64)\n",
    "        self.fc2 = nn.Linear(in_features=64, out_features=32)\n",
    "        self.fc3 = nn.Linear(in_features=32, out_features=16)\n",
    "        self.fc4 = nn.Linear(in_features=16, out_features=input_output_size[1])\n",
    "\n",
    "    def forward(self, input):\n",
    "        x = torch.relu(self.fc1(input))\n",
    "        x = torch.relu(self.fc2(x))\n",
    "        x = torch.relu(self.fc3(x))\n",
    "        x = torch.relu(self.fc4(x))# 保证结果非负\n",
    "        return x\n",
    "\n",
    "\n",
    "class Loss(nn.Module):\n",
    "    def __init__(self) -> None:\n",
    "        super(Loss, self).__init__()\n",
    "\n",
    "    def forward(self, predicts, targets):\n",
    "        # # forward, comment, like\n",
    "        dev_forward = torch.abs(predicts[:, 0] - targets[:, 0])\n",
    "        dev_comment = torch.abs(predicts[:, 1] - targets[:, 1])\n",
    "        dev_like = torch.abs(predicts[:, 2] - targets[:, 2])\n",
    "        return torch.sum(dev_comment)+torch.sum(dev_forward)+torch.sum(dev_like)       \n",
    "        # dev_forward = torch.abs(predicts[:, 0] - targets[:, 0]) / (targets[:, 0] + 5)\n",
    "        # dev_comment = torch.abs(predicts[:, 1] - targets[:, 1]) / (targets[:, 1] + 3)\n",
    "        # dev_like = torch.abs(predicts[:, 2] - targets[:, 2]) / (targets[:, 2] + 3)\n",
    "\n",
    "        # # 计算每个数据点的精度\n",
    "        # precision = 1 - 0.5 * dev_forward - 0.25 * dev_comment - 0.25 * dev_like\n",
    "        # average_precision = torch.mean(precision)\n",
    "        # return -average_precision\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/3 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [1/3], Step [100], Loss: 1386.0\n",
      "Epoch [1/3], Step [200], Loss: 1275.0\n",
      "Epoch [1/3], Step [300], Loss: 5513.0\n",
      "Epoch [1/3], Step [400], Loss: 2016.0\n",
      "Epoch [1/3], Step [500], Loss: 1008.0\n",
      "Epoch [1/3], Step [600], Loss: 604.0\n",
      "Epoch [1/3], Step [700], Loss: 890.0\n",
      "Epoch [1/3], Step [800], Loss: 1158.0\n",
      "Epoch [1/3], Step [900], Loss: 3036.0\n",
      "Epoch [1/3], Step [1000], Loss: 713.0\n",
      "Epoch [1/3], Step [1100], Loss: 676.0\n",
      "Epoch [1/3], Step [1200], Loss: 944.0\n",
      "Epoch [1/3], Step [1300], Loss: 2499.0\n",
      "Epoch [1/3], Step [1400], Loss: 621.0\n",
      "Epoch [1/3], Step [1500], Loss: 3524.0\n",
      "Epoch [1/3], Step [1600], Loss: 862.0\n",
      "Epoch [1/3], Step [1700], Loss: 4462.0\n",
      "Epoch [1/3], Step [1800], Loss: 777.0\n",
      "Epoch [1/3], Step [1900], Loss: 2057.0\n",
      "Epoch [1/3], Step [2000], Loss: 523.0\n",
      "Epoch [1/3], Step [2100], Loss: 1200.0\n",
      "Epoch [1/3], Step [2200], Loss: 1690.0\n",
      "Epoch [1/3], Step [2300], Loss: 1736.0\n",
      "Epoch [1/3], Step [2400], Loss: 1778.0\n",
      "Epoch [1/3], Step [2500], Loss: 1231.0\n",
      "Epoch [1/3], Step [2600], Loss: 3400.0\n",
      "Epoch [1/3], Step [2700], Loss: 4077.0\n",
      "Epoch [1/3], Step [2800], Loss: 1663.0\n",
      "Epoch [1/3], Step [2900], Loss: 2530.0\n",
      "Epoch [1/3], Step [3000], Loss: 2065.0\n",
      "Epoch [1/3], Step [3100], Loss: 2729.0\n",
      "Epoch [1/3], Step [3200], Loss: 1863.0\n",
      "Epoch [1/3], Step [3300], Loss: 1009.0\n",
      "Epoch [1/3], Step [3400], Loss: 814.0\n",
      "Epoch [1/3], Step [3500], Loss: 1175.0\n",
      "Epoch [1/3], Step [3600], Loss: 2838.0\n",
      "Epoch [1/3], Step [3700], Loss: 1002.0\n",
      "Epoch [1/3], Step [3800], Loss: 1126.0\n",
      "Epoch [1/3], Step [3900], Loss: 1238.0\n",
      "Epoch [1/3], Step [4000], Loss: 1082.0\n",
      "Epoch [1/3], Step [4100], Loss: 938.0\n",
      "Epoch [1/3], Step [4200], Loss: 862.0\n",
      "Epoch [1/3], Step [4300], Loss: 1186.0\n",
      "Epoch [1/3], Step [4400], Loss: 3137.0\n",
      "Epoch [1/3], Step [4500], Loss: 1207.0\n",
      "Epoch [1/3], Step [4600], Loss: 2238.0\n",
      "Epoch [1/3], Step [4700], Loss: 826.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 33%|███▎      | 1/3 [00:54<01:48, 54.24s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [2/3], Step [100], Loss: 771.0\n",
      "Epoch [2/3], Step [200], Loss: 620.0\n",
      "Epoch [2/3], Step [300], Loss: 1831.0\n",
      "Epoch [2/3], Step [400], Loss: 1554.0\n",
      "Epoch [2/3], Step [500], Loss: 3484.0\n",
      "Epoch [2/3], Step [600], Loss: 1126.0\n",
      "Epoch [2/3], Step [700], Loss: 783.0\n",
      "Epoch [2/3], Step [800], Loss: 2327.0\n",
      "Epoch [2/3], Step [900], Loss: 11307.0\n",
      "Epoch [2/3], Step [1000], Loss: 935.0\n",
      "Epoch [2/3], Step [1100], Loss: 958.0\n",
      "Epoch [2/3], Step [1200], Loss: 1266.0\n",
      "Epoch [2/3], Step [1300], Loss: 791.0\n",
      "Epoch [2/3], Step [1400], Loss: 4396.0\n",
      "Epoch [2/3], Step [1500], Loss: 517.0\n",
      "Epoch [2/3], Step [1600], Loss: 962.0\n",
      "Epoch [2/3], Step [1700], Loss: 1183.0\n",
      "Epoch [2/3], Step [1800], Loss: 614.0\n",
      "Epoch [2/3], Step [1900], Loss: 1457.0\n",
      "Epoch [2/3], Step [2000], Loss: 3822.0\n",
      "Epoch [2/3], Step [2100], Loss: 773.0\n",
      "Epoch [2/3], Step [2200], Loss: 882.0\n",
      "Epoch [2/3], Step [2300], Loss: 1356.0\n",
      "Epoch [2/3], Step [2400], Loss: 768.0\n",
      "Epoch [2/3], Step [2500], Loss: 983.0\n",
      "Epoch [2/3], Step [2600], Loss: 914.0\n",
      "Epoch [2/3], Step [2700], Loss: 1629.0\n",
      "Epoch [2/3], Step [2800], Loss: 1117.0\n",
      "Epoch [2/3], Step [2900], Loss: 1529.0\n",
      "Epoch [2/3], Step [3000], Loss: 2409.0\n",
      "Epoch [2/3], Step [3100], Loss: 1254.0\n",
      "Epoch [2/3], Step [3200], Loss: 1303.0\n",
      "Epoch [2/3], Step [3300], Loss: 1218.0\n",
      "Epoch [2/3], Step [3400], Loss: 4149.0\n",
      "Epoch [2/3], Step [3500], Loss: 2131.0\n",
      "Epoch [2/3], Step [3600], Loss: 1470.0\n",
      "Epoch [2/3], Step [3700], Loss: 1059.0\n",
      "Epoch [2/3], Step [3800], Loss: 1065.0\n",
      "Epoch [2/3], Step [3900], Loss: 1058.0\n",
      "Epoch [2/3], Step [4000], Loss: 4837.0\n",
      "Epoch [2/3], Step [4100], Loss: 968.0\n",
      "Epoch [2/3], Step [4200], Loss: 3524.0\n",
      "Epoch [2/3], Step [4300], Loss: 700.0\n",
      "Epoch [2/3], Step [4400], Loss: 920.0\n",
      "Epoch [2/3], Step [4500], Loss: 1358.0\n",
      "Epoch [2/3], Step [4600], Loss: 808.0\n",
      "Epoch [2/3], Step [4700], Loss: 1255.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 67%|██████▋   | 2/3 [01:28<00:42, 42.35s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [3/3], Step [100], Loss: 1239.0\n",
      "Epoch [3/3], Step [200], Loss: 4038.0\n",
      "Epoch [3/3], Step [300], Loss: 1654.0\n",
      "Epoch [3/3], Step [400], Loss: 1218.0\n",
      "Epoch [3/3], Step [500], Loss: 1434.0\n",
      "Epoch [3/3], Step [600], Loss: 1227.0\n",
      "Epoch [3/3], Step [700], Loss: 1006.0\n",
      "Epoch [3/3], Step [800], Loss: 981.0\n",
      "Epoch [3/3], Step [900], Loss: 1398.0\n",
      "Epoch [3/3], Step [1000], Loss: 2216.0\n",
      "Epoch [3/3], Step [1100], Loss: 971.0\n",
      "Epoch [3/3], Step [1200], Loss: 1294.0\n",
      "Epoch [3/3], Step [1300], Loss: 1145.0\n",
      "Epoch [3/3], Step [1400], Loss: 949.0\n",
      "Epoch [3/3], Step [1500], Loss: 1501.0\n",
      "Epoch [3/3], Step [1600], Loss: 1274.0\n",
      "Epoch [3/3], Step [1700], Loss: 1128.0\n",
      "Epoch [3/3], Step [1800], Loss: 884.0\n",
      "Epoch [3/3], Step [1900], Loss: 1624.0\n",
      "Epoch [3/3], Step [2000], Loss: 733.0\n",
      "Epoch [3/3], Step [2100], Loss: 2404.0\n",
      "Epoch [3/3], Step [2200], Loss: 1208.0\n",
      "Epoch [3/3], Step [2300], Loss: 1254.0\n",
      "Epoch [3/3], Step [2400], Loss: 809.0\n",
      "Epoch [3/3], Step [2500], Loss: 1142.0\n",
      "Epoch [3/3], Step [2600], Loss: 1382.0\n",
      "Epoch [3/3], Step [2700], Loss: 972.0\n",
      "Epoch [3/3], Step [2800], Loss: 1656.0\n",
      "Epoch [3/3], Step [2900], Loss: 2318.0\n",
      "Epoch [3/3], Step [3000], Loss: 1273.0\n",
      "Epoch [3/3], Step [3100], Loss: 1868.0\n",
      "Epoch [3/3], Step [3200], Loss: 1926.0\n",
      "Epoch [3/3], Step [3300], Loss: 3827.0\n",
      "Epoch [3/3], Step [3400], Loss: 1577.0\n",
      "Epoch [3/3], Step [3500], Loss: 9481.0\n",
      "Epoch [3/3], Step [3600], Loss: 1943.0\n",
      "Epoch [3/3], Step [3700], Loss: 770.0\n",
      "Epoch [3/3], Step [3800], Loss: 980.0\n",
      "Epoch [3/3], Step [3900], Loss: 1038.0\n",
      "Epoch [3/3], Step [4000], Loss: 969.0\n",
      "Epoch [3/3], Step [4100], Loss: 1446.0\n",
      "Epoch [3/3], Step [4200], Loss: 731.0\n",
      "Epoch [3/3], Step [4300], Loss: 1342.0\n",
      "Epoch [3/3], Step [4400], Loss: 1023.0\n",
      "Epoch [3/3], Step [4500], Loss: 743.0\n",
      "Epoch [3/3], Step [4600], Loss: 1243.0\n",
      "Epoch [3/3], Step [4700], Loss: 919.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 3/3 [02:01<00:00, 40.52s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Valid precision: -1491.041748046875\n"
     ]
    }
   ],
   "source": [
    "input_size = features.shape[1]\n",
    "output_size = target.shape[1]\n",
    "\n",
    "loss = Loss().to(device)\n",
    "model = Model([input_size,output_size]).to(device)\n",
    "optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "\n",
    "train_features_tensor = torch.tensor(train_features, dtype=torch.float32).to(device)\n",
    "train_labels_tensor = torch.tensor(train_target, dtype=torch.float32).to(device)\n",
    "train_dataset = TensorDataset(train_features_tensor, train_labels_tensor)\n",
    "train_loader = DataLoader(dataset=train_dataset, batch_size=256, shuffle=True)\n",
    "\n",
    "validate_features_tensor = torch.tensor(val_features, dtype=torch.float32).to(device)\n",
    "validate_labels_tensor = torch.tensor(val_target, dtype=torch.float32).to(device)\n",
    "validate_dataset = TensorDataset(validate_features_tensor, validate_labels_tensor)\n",
    "validate_loader = DataLoader(dataset=validate_dataset, batch_size=256, shuffle=False)\n",
    "\n",
    "num_epochs = 3\n",
    "for epoch in tqdm(range(num_epochs)):\n",
    "    for i, (inputs, labels) in enumerate(train_loader):\n",
    "        # 清除梯度\n",
    "        optimizer.zero_grad()\n",
    "        # 前向传播\n",
    "        outputs = model(inputs)\n",
    "        l = loss(outputs, labels)\n",
    "        # 反向传播\n",
    "        l.backward()\n",
    "        # 更新权重\n",
    "        optimizer.step()\n",
    "        \n",
    "        if (i+1) % 100 == 0:\n",
    "            print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}], Loss: {l.item()}')\n",
    "def sgn(x):\n",
    "    if x > 0:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "\n",
    "torch.save(model, 'model.pth')\n",
    "with torch.no_grad():\n",
    "    p_sum = 0\n",
    "    for inputs, labels in validate_loader:\n",
    "        outputs = model(inputs)\n",
    "        p = -loss(outputs, labels)\n",
    "        p_sum += p\n",
    "        \n",
    "    print(f'Valid precision: {p_sum/len(validate_loader)}')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "predict_data = pd.read_csv(\"predict_data.csv\").drop(columns='Unnamed: 0').values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "177923"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model = torch.load(\"model.pth\")\n",
    "predict_features_tensor = torch.tensor(predict_data, dtype=torch.float32).to(device)\n",
    "predict_dataset = TensorDataset(predict_features_tensor)\n",
    "predict_loader = DataLoader(dataset=predict_dataset, batch_size=1024, shuffle=False)\n",
    "\n",
    "predict_result = []\n",
    "with torch.no_grad():\n",
    "    for i, (inputs,) in enumerate(predict_loader):\n",
    "        output = model(inputs)\n",
    "        predict_result.extend(output)\n",
    "predict_result = [item.tolist() for item in predict_result]\n",
    "len(predict_result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "177923"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "linear_pred = linear.predict(predict_data)\n",
    "len(linear_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "177923"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rf_regressor_pred = rf_regressor.predict(predict_data)\n",
    "len(rf_regressor_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 177923/177923 [00:12<00:00, 13755.87it/s]\n"
     ]
    }
   ],
   "source": [
    "input_predict = rf_regressor_pred\n",
    "weibo_predict_data = pd.read_csv(\"weibo_predict_data.txt\", sep=\"\\t\", header=None, names=[\"uid\", \"mid\", \"time\", \"content\"])\n",
    "file_out = []\n",
    "\n",
    "for index, row in tqdm(weibo_predict_data.iterrows(), total=len(weibo_predict_data)):\n",
    "    file_out.append(\"{0}\\t{1}\\t{2},{3},{4}\\n\".format(row[\"uid\"],row[\"mid\"],round_and_none_neg(input_predict[index][0]),round_and_none_neg(input_predict[index][1]),round_and_none_neg(input_predict[index][2])))\n",
    "\n",
    "with open(\"answer_rf_mix_data.txt\", \"w\", encoding=\"utf-8\") as f:\n",
    "    f.writelines(file_out)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch2",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
