{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入相关的库\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "from sklearn.preprocessing import StandardScaler\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入数据\n",
    "data_train = pd.read_csv(r\"G:\\Course Materials\\数据挖掘\\HW3 分类模型\\forest_train_data.csv\")\n",
    "data_test = pd.read_csv(r\"G:\\Course Materials\\数据挖掘\\HW3 分类模型\\forest_test_data.csv\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Unnamed: 0 is continuity fearure\n",
      "Elevation is continuity fearure\n",
      "Aspect is continuity fearure\n",
      "Slope is continuity fearure\n",
      "Horizontal_Distance_To_Hydrology is continuity fearure\n",
      "Vertical_Distance_To_Hydrology is continuity fearure\n",
      "Horizontal_Distance_To_Roadways is continuity fearure\n",
      "Hillshade_9am is continuity fearure\n",
      "Hillshade_Noon is continuity fearure\n",
      "Hillshade_3pm is continuity fearure\n",
      "Horizontal_Distance_To_Fire_Points is continuity fearure\n",
      "Wilderness_Area1 is discrete fearure\n",
      "Wilderness_Area2 is discrete fearure\n",
      "Wilderness_Area3 is discrete fearure\n",
      "Wilderness_Area4 is discrete fearure\n",
      "Soil_Type1 is discrete fearure\n",
      "Soil_Type2 is discrete fearure\n",
      "Soil_Type3 is discrete fearure\n",
      "Soil_Type4 is discrete fearure\n",
      "Soil_Type5 is discrete fearure\n",
      "Soil_Type6 is discrete fearure\n",
      "Soil_Type7 is discrete fearure\n",
      "Soil_Type8 is discrete fearure\n",
      "Soil_Type9 is discrete fearure\n",
      "Soil_Type10 is discrete fearure\n",
      "Soil_Type11 is discrete fearure\n",
      "Soil_Type12 is discrete fearure\n",
      "Soil_Type13 is discrete fearure\n",
      "Soil_Type14 is discrete fearure\n",
      "Soil_Type15 is discrete fearure\n",
      "Soil_Type16 is discrete fearure\n",
      "Soil_Type17 is discrete fearure\n",
      "Soil_Type18 is discrete fearure\n",
      "Soil_Type19 is discrete fearure\n",
      "Soil_Type20 is discrete fearure\n",
      "Soil_Type21 is discrete fearure\n",
      "Soil_Type22 is discrete fearure\n",
      "Soil_Type23 is discrete fearure\n",
      "Soil_Type24 is discrete fearure\n",
      "Soil_Type25 is discrete fearure\n",
      "Soil_Type26 is discrete fearure\n",
      "Soil_Type27 is discrete fearure\n",
      "Soil_Type28 is discrete fearure\n",
      "Soil_Type29 is discrete fearure\n",
      "Soil_Type30 is discrete fearure\n",
      "Soil_Type31 is discrete fearure\n",
      "Soil_Type32 is discrete fearure\n",
      "Soil_Type33 is discrete fearure\n",
      "Soil_Type34 is discrete fearure\n",
      "Soil_Type35 is discrete fearure\n",
      "Soil_Type36 is discrete fearure\n",
      "Soil_Type37 is discrete fearure\n",
      "Soil_Type38 is discrete fearure\n",
      "Soil_Type39 is discrete fearure\n",
      "Soil_Type40 is discrete fearure\n",
      "Cover_Type is discrete fearure\n",
      "continuity count: 11,\n",
      " discrete count: 45,\n",
      " all count: 56\n"
     ]
    }
   ],
   "source": [
    "continuity_count = 0\n",
    "discrete_count = 0\n",
    "\n",
    "for column_name, column_data in data_train.items():\n",
    "    if data_train[column_name].value_counts().count() > 10:\n",
    "        print(f'{column_name} is continuity fearure')\n",
    "        continuity_count += 1\n",
    "    else:\n",
    "        print(f'{column_name} is discrete fearure')\n",
    "        discrete_count += 1\n",
    "\n",
    "print(f'continuity count: {continuity_count},\\n discrete count: {discrete_count},\\n all count: {len(data_train.columns)}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pandas_summary import DataFrameSummary\n",
    "data_s = DataFrameSummary(data_train)\n",
    "data_s.summary().to_csv('summary.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Index(['Unnamed: 0', 'Elevation', 'Aspect', 'Slope',\n",
      "       'Horizontal_Distance_To_Hydrology', 'Vertical_Distance_To_Hydrology',\n",
      "       'Horizontal_Distance_To_Roadways', 'Hillshade_9am', 'Hillshade_Noon',\n",
      "       'Hillshade_3pm', 'Horizontal_Distance_To_Fire_Points',\n",
      "       'Wilderness_Area1', 'Wilderness_Area2', 'Wilderness_Area3',\n",
      "       'Wilderness_Area4', 'Soil_Type1', 'Soil_Type2', 'Soil_Type3',\n",
      "       'Soil_Type4', 'Soil_Type5', 'Soil_Type6', 'Soil_Type7', 'Soil_Type8',\n",
      "       'Soil_Type9', 'Soil_Type10', 'Soil_Type11', 'Soil_Type12',\n",
      "       'Soil_Type13', 'Soil_Type14', 'Soil_Type15', 'Soil_Type16',\n",
      "       'Soil_Type17', 'Soil_Type18', 'Soil_Type19', 'Soil_Type20',\n",
      "       'Soil_Type21', 'Soil_Type22', 'Soil_Type23', 'Soil_Type24',\n",
      "       'Soil_Type25', 'Soil_Type26', 'Soil_Type27', 'Soil_Type28',\n",
      "       'Soil_Type29', 'Soil_Type30', 'Soil_Type31', 'Soil_Type32',\n",
      "       'Soil_Type33', 'Soil_Type34', 'Soil_Type35', 'Soil_Type36',\n",
      "       'Soil_Type37', 'Soil_Type38', 'Soil_Type39', 'Soil_Type40',\n",
      "       'Cover_Type'],\n",
      "      dtype='object') <class 'pandas.core.frame.DataFrame'>\n",
      "Elevation\n"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 简单的数据可视化\n",
    "for i in range(1,11):\n",
    "    data = data_train.iloc[:,i]\n",
    "    sns.histplot(data, bins = 30, kde = True, color = 'blue', alpha = 0.5)\n",
    "    plt.title(f'{data_train.columns[i]}Histogram')\n",
    "    plt.xlabel(f'{data_train.columns[i]}')\n",
    "    plt.ylabel('Frequency')\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "def data_split(data):\n",
    "    train = data[data['ch'] == 'train']\n",
    "    test = data[data['ch'] == 'test']\n",
    "\n",
    "    X_train = train.iloc[:, :-2]\n",
    "    y_train = train.loc[:,['Cover_Type']]\n",
    "    X_test = test.iloc[:, :-2]\n",
    "    y_test = test.loc[:,['Cover_Type']]\n",
    "\n",
    "    tensor_dict = {}\n",
    "    for data ,name in zip([X_train,X_test,y_train,y_test],['X_train','X_test','y_train','y_test']):\n",
    "        if data.shape[1] > 1:\n",
    "            tensor_dict[f'{name}_tensor'] = torch.tensor(data.to_numpy(),dtype=torch.float32)\n",
    "        else:\n",
    "            tensor_dict[f'{name}_tensor'] = torch.tensor(data.to_numpy().reshape(-1,1),dtype=torch.float32).view(-1,1)\n",
    "    \n",
    "    train_X = tensor_dict['X_train_tensor']\n",
    "    train_y = tensor_dict['y_train_tensor']\n",
    "    test_X = tensor_dict['X_test_tensor']\n",
    "    test_y = tensor_dict['y_test_tensor']\n",
    "\n",
    "    return train_X, train_y, test_X, test_y\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将数据进行合并，标准化处理后，在分割为训练数据与测试数据。\n",
    "data_train['ch'] = 'train'\n",
    "data_test['ch'] = 'test'\n",
    "data = pd.concat([data_train, data_test], ignore_index=True)\n",
    "data = data.drop(columns=['Unnamed: 0'])\n",
    "stand_var = {}\n",
    "for column_name, column_data in data.items():\n",
    "    stand_var[f'{column_name}_scaler'] = StandardScaler()\n",
    "    if data[column_name].value_counts().count() > 10:\n",
    "        data[column_name]  = stand_var[f'{column_name}_scaler'].fit_transform(data[[column_name]])\n",
    "\n",
    "X_train, y_train, X_test, y_test = data_split(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_data(model, optimizer, criterion, \n",
    "               X_train, y_train, X_test, y_test,\n",
    "               batch_size = 32, epoches = 100):\n",
    "    train_losses = []\n",
    "    test_losses = []\n",
    "    grad_list = []\n",
    "    \n",
    "    train_dataset = TensorDataset(X_train, y_train)\n",
    "    train_loader = DataLoader(train_dataset, batch_size = batch_size, shuffle = True)\n",
    "\n",
    "    for rpoch in range(epoches):\n",
    "        model.train()\n",
    "        loss_process = 0\n",
    "        for batch_X, batch_y in train_loader:\n",
    "            optimizer.zero_grad()\n",
    "            batch_x = model(batch_X)\n",
    "            loss = criterion(batch_x, batch_y)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            loss_process += loss.item()\n",
    "        avg_loss = loss_process / len(train_loader)\n",
    "        train_losses.append(avg_loss)\n",
    "        \n",
    "        for param in model.parameters():\n",
    "            if param.grad is not None:\n",
    "                grad_list.append(param.grad.numpy())\n",
    "        \n",
    "        model.eval()\n",
    "        with torch.no_grad():\n",
    "            test_x = model(X_test)\n",
    "            test_loss = criterion(test_x, y_test)\n",
    "            test_losses.append(test_loss.item())\n",
    "    \n",
    "    return train_losses, test_losses, grad_list "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "根据上述的简单操作可以对数据集进行以下描述：\n",
    "数据集：\n",
    "1、数据集的第1列为编号数据，可以删除；第2列到第11列为连续型特征，第12列到第55列为离散型特征，最后一列为标识数据。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
