{
 "cells": [
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "# This Python 3 environment comes with many helpful analytics libraries installed\n",
    "# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n",
    "# For example, here's several helpful packages to load\n",
    "\n",
    "import numpy as np # linear algebra\n",
    "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
    "\n",
    "# Input data files are available in the read-only \"../input/\" directory\n",
    "# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n",
    "\n",
    "import os\n",
    "for dirname, _, filenames in os.walk('/kaggle/input'):\n",
    "    for filename in filenames:\n",
    "        print(os.path.join(dirname, filename))\n",
    "\n",
    "# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n",
    "# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session"
   ],
   "id": "4dfb179210e2e123"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "data_upgrade = pd.read_csv(\"/kaggle/input/playground-series-s5e7/sample_submission.csv\")\n",
    "data_train = pd.read_csv(\"/kaggle/input/playground-series-s5e7/train.csv\")\n",
    "data_test = pd.read_csv(\"/kaggle/input/playground-series-s5e7/test.csv\")\n",
    "print(data_train.head(10))"
   ],
   "id": "1205482d0cd6c6c0"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "import matplotlib.pyplot as plt\n",
    "# 计算各分类的计数\n",
    "value_counts = data_train[\"Personality\"].value_counts()\n",
    "\n",
    "# 绘制饼图\n",
    "plt.figure(figsize=(8, 6))\n",
    "plt.pie(value_counts, \n",
    "        labels=value_counts.index, \n",
    "        autopct='%1.1f%%',\n",
    "        startangle=90,\n",
    "        colors=['#ff9999','#66b3ff','#99ff99'])  # 突出第一块\n",
    "\n",
    "plt.title('Category列值分布扇形图', fontsize=15)\n",
    "plt.axis('equal')  # 保证是圆形\n",
    "plt.show()\n"
   ],
   "id": "2496345c2bcf0bde"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "# 定义按类别众数填充的函数\n",
    "def fill_with_group_mode(df,df2, group_col, fill_cols):\n",
    "    \"\"\"\n",
    "    按指定列分组，用各组的众数填充指定列的空值\n",
    "    \n",
    "    参数:\n",
    "    df: 要处理的数据框\n",
    "    group_col: 作为分组依据的列名\n",
    "    fill_cols: 需要填充的列名列表\n",
    "    \"\"\"\n",
    "    for col in fill_cols:\n",
    "        # 计算每个类别的众数\n",
    "        mode_values = df.groupby(group_col)[col].transform(lambda x: x.mode()[0] if not x.mode().empty else np.nan)\n",
    "        # 填充空值\n",
    "        df[col] = df[col].fillna(mode_values)\n",
    "        df2[col] = df2[col].fillna(mode_values)\n",
    "    return df,df2\n",
    "\n",
    "others = list(data_train.columns)[:-1]\n",
    "X_train,X_test = fill_with_group_mode(data_train,data_test, 'Personality', others)\n",
    "print(data_train.head(10))"
   ],
   "id": "5a8e68fddddafc82"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "from sklearn.preprocessing import LabelEncoder\n",
    "le = LabelEncoder()\n",
    "y_train = le.fit_transform(data_train['Personality'])\n",
    "X_train = X_train[list(X_train.columns)]\n",
    "X_train.drop(\"Personality\",axis=1,inplace=True)\n",
    "print(X_train.head())"
   ],
   "id": "133bd3dbb1700a10"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "from sklearn.preprocessing import OneHotEncoder\n",
    "\n",
    "# 初始化编码器\n",
    "ohe = OneHotEncoder(handle_unknown='ignore', sparse=False)\n",
    "category_column = [\"Stage_fear\",\"Drained_after_socializing\"]\n",
    "# 只使用训练集拟合\n",
    "train_cat = X_train[category_column]\n",
    "X_train = X_train[list(set(X_train.columns)-set(category_column))]\n",
    "ohe.fit(train_cat)\n",
    "\n",
    "# 转换训练集和测试集\n",
    "train_encoded = ohe.transform(train_cat)\n",
    "test_encoded = ohe.transform(X_test[category_column])\n",
    "X_test = X_test[list(set(X_test.columns)-set(category_column))]\n",
    "# 转换为DataFrame并保持列名一致\n",
    "categories = ohe.categories_[0]\n",
    "columns = [f\"{item}_{category}\" for item in category_column for category in categories]\n",
    "train_ohe = pd.DataFrame(train_encoded, columns=columns, index=X_train.index)\n",
    "test_ohe = pd.DataFrame(test_encoded, columns=columns, index=X_test.index)\n",
    "\n",
    "# 合并回原DataFrame\n",
    "X_train = pd.concat([X_train, train_ohe], axis=1)\n",
    "X_test = pd.concat([X_test, test_ohe], axis=1)\n",
    "print(X_train.head())"
   ],
   "id": "43df24ae061cd3bc"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "# 初始化标准化器\n",
    "scaler = StandardScaler()\n",
    "\n",
    "# 只在训练集上拟合\n",
    "scaler.fit(X_train)  # 计算训练集的均值和标准差\n",
    "\n",
    "# 转换训练集和测试集\n",
    "X_train = scaler.transform(X_train)\n",
    "X_test = scaler.transform(X_test)  # 使用训练集的参数\n",
    "print(X_train)"
   ],
   "id": "f529ce4601564716"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.datasets import load_iris\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "\n",
    "# 4. 创建并训练距离加权KNN模型\n",
    "knn_weighted = KNeighborsClassifier(\n",
    "    n_neighbors=5,        # 使用5个最近邻\n",
    "    weights='distance',   # 距离加权：近邻的投票权重更大\n",
    "    metric='minkowski',   # 默认距离度量（p=2时为欧氏距离）\n",
    "    p=2                  # p=2: 欧氏距离; p=1: 曼哈顿距离\n",
    ")\n",
    "knn_weighted.fit(X_train, y_train)\n",
    "\n",
    "# 5. 进行预测\n",
    "y_pred = knn_weighted.predict(X_train)\n",
    "\n",
    "# 6. 计算准确率\n",
    "accuracy = accuracy_score(y_train, y_pred)\n",
    "print(f\"模型准确率: {accuracy:.4f}\")\n",
    "\n",
    "# 7. 输出分类报告（更详细的评估）\n",
    "from sklearn.metrics import classification_report\n",
    "print(\"\\n分类报告:\")\n",
    "#print(classification_report(y_test, y_pred, target_names=iris.target_names))\n",
    "print(y_pred)\n",
    "print(y_train)"
   ],
   "id": "9b46f21fd746212"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "y_test =  knn_weighted.predict(X_test)\n",
    "result = pd.DataFrame(data_test[\"id\"])\n",
    "result[\"Personality\"] = le.inverse_transform(y_test)\n",
    "result.to_csv(\"/kaggle/working/submission.csv\",index=False)\n",
    "print(result)"
   ],
   "id": "cf2cbcb054f2e6be"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "#import matplotlib.pyplot as plt\n",
    "#import numpy as np\n",
    "#show_Data_x = data_train[[\"Time_spent_Alone\",\"Friends_circle_size\"]]\n",
    "#show_Data_y = data_train[\"Personality\"]\n",
    "#\n",
    "## 定义映射字典\n",
    "#mapping = {'Extrovert': 0, 'Introvert': 1}\n",
    "#show_Data_y = show_Data_y.map(mapping)\n",
    "## 训练数据\n",
    "#X_train = np.array(show_Data_x)\n",
    "#y_train = np.array(show_Data_y)  # 0: 蓝色, 1: 红色\n",
    "# \n",
    "#plt.scatter(X_train[y_train==0,0], X_train[y_train==0,1], color='blue', label='Class 0')\n",
    "#plt.scatter(X_train[y_train==1,0], X_train[y_train==1,1], color='red', label='Class 1')\n",
    "#plt.xlabel('X1')\n",
    "#plt.ylabel('X2')\n",
    "#plt.title('KNN Training Data')\n",
    "#plt.legend()\n",
    "#plt.show()"
   ],
   "id": "1e02ab3a398804a3"
  }
 ],
 "metadata": {},
 "nbformat": 4,
 "nbformat_minor": 5
}
