{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from pyspark.sql import Row,functions\n",
    "from pyspark.ml.linalg import Vector,Vectors\n",
    "from pyspark.ml.evaluation import MulticlassClassificationEvaluator\n",
    "from pyspark.ml import Pipeline\n",
    "from pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer,HashingTF, Tokenizer\n",
    "from pyspark.ml.classification import LogisticRegression,LogisticRegressionModel,BinaryLogisticRegressionSummary, LogisticRegression\n",
    "from pyspark.sql import SparkSession\n",
    "spark = SparkSession.builder.master(\"local\").appName(\"Word Count\").getOrCreate()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "data=spark.sparkContext.textFile(\"LoanStats3a.csv\")"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+---------+----------+-----+--------------+----------+------------------+----------+-----------+\n",
      "|int_rate|loan_amnt|      term|grade|home_ownership|annual_inc|           purpose|emp_length|loan_status|\n",
      "+--------+---------+----------+-----+--------------+----------+------------------+----------+-----------+\n",
      "|  10.65%|     5000| 36 months|    B|          RENT|   24000.0|       credit_card| 10+ years| Fully Paid|\n",
      "|  15.27%|     2500| 60 months|    C|          RENT|   30000.0|               car|  < 1 year|Charged Off|\n",
      "|  15.96%|     2400| 36 months|    C|          RENT|   12252.0|    small_business| 10+ years| Fully Paid|\n",
      "|  13.49%|    10000| 36 months|    C|          RENT|   49200.0|             other| 10+ years| Fully Paid|\n",
      "|  12.69%|     3000| 60 months|    B|          RENT|   80000.0|             other|    1 year| Fully Paid|\n",
      "|   7.90%|     5000| 36 months|    A|          RENT|   36000.0|           wedding|   3 years| Fully Paid|\n",
      "|  15.96%|     7000| 60 months|    C|          RENT|   47004.0|debt_consolidation|   8 years| Fully Paid|\n",
      "|  18.64%|     3000| 36 months|    E|          RENT|   48000.0|               car|   9 years| Fully Paid|\n",
      "|  21.28%|     5600| 60 months|    F|           OWN|   40000.0|    small_business|   4 years|Charged Off|\n",
      "|  12.69%|     5375| 60 months|    B|          RENT|   15000.0|             other|  < 1 year|Charged Off|\n",
      "|  14.65%|     6500| 60 months|    C|           OWN|   72000.0|debt_consolidation|   5 years| Fully Paid|\n",
      "|  12.69%|    12000| 36 months|    B|           OWN|   75000.0|debt_consolidation| 10+ years| Fully Paid|\n",
      "|  13.49%|     9000| 36 months|    C|          RENT|   30000.0|debt_consolidation|  < 1 year|Charged Off|\n",
      "|   9.91%|     3000| 36 months|    B|          RENT|   15000.0|       credit_card|   3 years| Fully Paid|\n",
      "|  10.65%|    10000| 36 months|    B|          RENT|  100000.0|             other|   3 years|Charged Off|\n",
      "|  16.29%|     1000| 36 months|    D|          RENT|   28000.0|debt_consolidation|  < 1 year| Fully Paid|\n",
      "|  15.27%|    10000| 36 months|    C|          RENT|   42000.0|  home_improvement|   4 years| Fully Paid|\n",
      "|   6.03%|     3600| 36 months|    A|      MORTGAGE|  110000.0|    major_purchase| 10+ years| Fully Paid|\n",
      "|  11.71%|     6000| 36 months|    B|      MORTGAGE|   84000.0|           medical|    1 year| Fully Paid|\n",
      "|   6.03%|     9200| 36 months|    A|          RENT|  77385.19|debt_consolidation|   6 years| Fully Paid|\n",
      "+--------+---------+----------+-----+--------------+----------+------------------+----------+-----------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark.sql.functions import regexp_replace\n",
    "\n",
    "# 创建SparkSession\n",
    "spark = SparkSession.builder.getOrCreate()\n",
    "\n",
    "# 读取CSV文件\n",
    "df = spark.read.csv('LoanStats3a.csv', header=True, inferSchema=True)\n",
    "\n",
    "# 保留指定的列\n",
    "selected_columns = ['int_rate', 'loan_amnt', 'term', 'grade', 'home_ownership', 'annual_inc', 'purpose','emp_length','loan_status']\n",
    "df = df.select(selected_columns)\n",
    "# 显示处理后的数据\n",
    "df.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|int_rate|loan_amnt|term|grade|home_ownership|annual_inc|           purpose|emp_length|loan_status|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|  10.65%|     5000|  36|    B|          RENT|   24000.0|       credit_card| 10+ years| Fully Paid|\n",
      "|  15.27%|     2500|  60|    C|          RENT|   30000.0|               car|  < 1 year|Charged Off|\n",
      "|  15.96%|     2400|  36|    C|          RENT|   12252.0|    small_business| 10+ years| Fully Paid|\n",
      "|  13.49%|    10000|  36|    C|          RENT|   49200.0|             other| 10+ years| Fully Paid|\n",
      "|  12.69%|     3000|  60|    B|          RENT|   80000.0|             other|    1 year| Fully Paid|\n",
      "|   7.90%|     5000|  36|    A|          RENT|   36000.0|           wedding|   3 years| Fully Paid|\n",
      "|  15.96%|     7000|  60|    C|          RENT|   47004.0|debt_consolidation|   8 years| Fully Paid|\n",
      "|  18.64%|     3000|  36|    E|          RENT|   48000.0|               car|   9 years| Fully Paid|\n",
      "|  21.28%|     5600|  60|    F|           OWN|   40000.0|    small_business|   4 years|Charged Off|\n",
      "|  12.69%|     5375|  60|    B|          RENT|   15000.0|             other|  < 1 year|Charged Off|\n",
      "|  14.65%|     6500|  60|    C|           OWN|   72000.0|debt_consolidation|   5 years| Fully Paid|\n",
      "|  12.69%|    12000|  36|    B|           OWN|   75000.0|debt_consolidation| 10+ years| Fully Paid|\n",
      "|  13.49%|     9000|  36|    C|          RENT|   30000.0|debt_consolidation|  < 1 year|Charged Off|\n",
      "|   9.91%|     3000|  36|    B|          RENT|   15000.0|       credit_card|   3 years| Fully Paid|\n",
      "|  10.65%|    10000|  36|    B|          RENT|  100000.0|             other|   3 years|Charged Off|\n",
      "|  16.29%|     1000|  36|    D|          RENT|   28000.0|debt_consolidation|  < 1 year| Fully Paid|\n",
      "|  15.27%|    10000|  36|    C|          RENT|   42000.0|  home_improvement|   4 years| Fully Paid|\n",
      "|   6.03%|     3600|  36|    A|      MORTGAGE|  110000.0|    major_purchase| 10+ years| Fully Paid|\n",
      "|  11.71%|     6000|  36|    B|      MORTGAGE|   84000.0|           medical|    1 year| Fully Paid|\n",
      "|   6.03%|     9200|  36|    A|          RENT|  77385.19|debt_consolidation|   6 years| Fully Paid|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 删除\"term\"列中的\"months\"\n",
    "df = df.withColumn(\"term\", regexp_replace(\"term\", \" months\", \"\"))\n",
    "df.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|int_rate|loan_amnt|term|grade|home_ownership|annual_inc|           purpose|emp_length|loan_status|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|  10.65%|     5000|  36|    B|          RENT|   24000.0|       credit_card|        10| Fully Paid|\n",
      "|  15.27%|     2500|  60|    C|          RENT|   30000.0|               car|         0|Charged Off|\n",
      "|  15.96%|     2400|  36|    C|          RENT|   12252.0|    small_business|        10| Fully Paid|\n",
      "|  13.49%|    10000|  36|    C|          RENT|   49200.0|             other|        10| Fully Paid|\n",
      "|  12.69%|     3000|  60|    B|          RENT|   80000.0|             other|         1| Fully Paid|\n",
      "|   7.90%|     5000|  36|    A|          RENT|   36000.0|           wedding|         3| Fully Paid|\n",
      "|  15.96%|     7000|  60|    C|          RENT|   47004.0|debt_consolidation|         8| Fully Paid|\n",
      "|  18.64%|     3000|  36|    E|          RENT|   48000.0|               car|         9| Fully Paid|\n",
      "|  21.28%|     5600|  60|    F|           OWN|   40000.0|    small_business|         4|Charged Off|\n",
      "|  12.69%|     5375|  60|    B|          RENT|   15000.0|             other|         0|Charged Off|\n",
      "|  14.65%|     6500|  60|    C|           OWN|   72000.0|debt_consolidation|         5| Fully Paid|\n",
      "|  12.69%|    12000|  36|    B|           OWN|   75000.0|debt_consolidation|        10| Fully Paid|\n",
      "|  13.49%|     9000|  36|    C|          RENT|   30000.0|debt_consolidation|         0|Charged Off|\n",
      "|   9.91%|     3000|  36|    B|          RENT|   15000.0|       credit_card|         3| Fully Paid|\n",
      "|  10.65%|    10000|  36|    B|          RENT|  100000.0|             other|         3|Charged Off|\n",
      "|  16.29%|     1000|  36|    D|          RENT|   28000.0|debt_consolidation|         0| Fully Paid|\n",
      "|  15.27%|    10000|  36|    C|          RENT|   42000.0|  home_improvement|         4| Fully Paid|\n",
      "|   6.03%|     3600|  36|    A|      MORTGAGE|  110000.0|    major_purchase|        10| Fully Paid|\n",
      "|  11.71%|     6000|  36|    B|      MORTGAGE|   84000.0|           medical|         1| Fully Paid|\n",
      "|   6.03%|     9200|  36|    A|          RENT|  77385.19|debt_consolidation|         6| Fully Paid|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark.sql.functions import regexp_replace, when\n",
    "\n",
    "\n",
    "# 使用 when 和 regexp_replace 函数对 emp_length 列进行编码\n",
    "df = df.withColumn('emp_length',\n",
    "                   when(df['emp_length'] == '10+ years', 10)\n",
    "                   .when(df['emp_length'] == '< 1 year', 0)\n",
    "                   .when(df['emp_length'] == '1 year', 1)\n",
    "                   .when(df['emp_length'] == '2 years', 2)\n",
    "                   .when(df['emp_length'] == '3 years', 3)\n",
    "                   .when(df['emp_length'] == '4 years', 4)\n",
    "                   .when(df['emp_length'] == '5 years', 5)\n",
    "                   .when(df['emp_length'] == '6 years', 6)\n",
    "                   .when(df['emp_length'] == '7 years', 7)\n",
    "                   .when(df['emp_length'] == '8 years', 8)\n",
    "                   .when(df['emp_length'] == '9 years', 9)\n",
    "                   .otherwise(None))\n",
    "\n",
    "# 将 emp_length 列的数据类型转换为整型\n",
    "df = df.withColumn('emp_length', df['emp_length'].cast('integer'))\n",
    "\n",
    "# 显示处理后的结果\n",
    "df.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+---------+----+-----+--------------+----------+-------+----------+-----------+\n",
      "|int_rate|loan_amnt|term|grade|home_ownership|annual_inc|purpose|emp_length|loan_status|\n",
      "+--------+---------+----+-----+--------------+----------+-------+----------+-----------+\n",
      "|3       |3        |3   |3    |3             |7         |3      |1115      |3          |\n",
      "+--------+---------+----+-----+--------------+----------+-------+----------+-----------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql.functions import isnan, when, count\n",
    "# 检查每行是否存在缺失值\n",
    "missing_values = df.select([count(when(isnan(c) | df[c].isNull(), c)).alias(c) for c in df.columns])\n",
    "# 显示每行缺失值的统计结果\n",
    "missing_values.show(truncate=False)"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|int_rate|loan_amnt|term|grade|home_ownership|annual_inc|           purpose|emp_length|loan_status|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|  10.65%|     5000|  36|    B|          RENT|   24000.0|       credit_card|        10| Fully Paid|\n",
      "|  15.27%|     2500|  60|    C|          RENT|   30000.0|               car|         0|Charged Off|\n",
      "|  15.96%|     2400|  36|    C|          RENT|   12252.0|    small_business|        10| Fully Paid|\n",
      "|  13.49%|    10000|  36|    C|          RENT|   49200.0|             other|        10| Fully Paid|\n",
      "|  12.69%|     3000|  60|    B|          RENT|   80000.0|             other|         1| Fully Paid|\n",
      "|   7.90%|     5000|  36|    A|          RENT|   36000.0|           wedding|         3| Fully Paid|\n",
      "|  15.96%|     7000|  60|    C|          RENT|   47004.0|debt_consolidation|         8| Fully Paid|\n",
      "|  18.64%|     3000|  36|    E|          RENT|   48000.0|               car|         9| Fully Paid|\n",
      "|  21.28%|     5600|  60|    F|           OWN|   40000.0|    small_business|         4|Charged Off|\n",
      "|  12.69%|     5375|  60|    B|          RENT|   15000.0|             other|         0|Charged Off|\n",
      "|  14.65%|     6500|  60|    C|           OWN|   72000.0|debt_consolidation|         5| Fully Paid|\n",
      "|  12.69%|    12000|  36|    B|           OWN|   75000.0|debt_consolidation|        10| Fully Paid|\n",
      "|  13.49%|     9000|  36|    C|          RENT|   30000.0|debt_consolidation|         0|Charged Off|\n",
      "|   9.91%|     3000|  36|    B|          RENT|   15000.0|       credit_card|         3| Fully Paid|\n",
      "|  10.65%|    10000|  36|    B|          RENT|  100000.0|             other|         3|Charged Off|\n",
      "|  16.29%|     1000|  36|    D|          RENT|   28000.0|debt_consolidation|         0| Fully Paid|\n",
      "|  15.27%|    10000|  36|    C|          RENT|   42000.0|  home_improvement|         4| Fully Paid|\n",
      "|   6.03%|     3600|  36|    A|      MORTGAGE|  110000.0|    major_purchase|        10| Fully Paid|\n",
      "|  11.71%|     6000|  36|    B|      MORTGAGE|   84000.0|           medical|         1| Fully Paid|\n",
      "|   6.03%|     9200|  36|    A|          RENT|  77385.19|debt_consolidation|         6| Fully Paid|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 删除含有缺失值的数据行\n",
    "df = df.dropna()\n",
    "# 显示处理后的数据\n",
    "df.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|int_rate|loan_amnt|term|grade|home_ownership|annual_inc|           purpose|emp_length|loan_status|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|   10.65|     5000|  36|    B|          RENT|   24000.0|       credit_card|        10| Fully Paid|\n",
      "|   15.27|     2500|  60|    C|          RENT|   30000.0|               car|         0|Charged Off|\n",
      "|   15.96|     2400|  36|    C|          RENT|   12252.0|    small_business|        10| Fully Paid|\n",
      "|   13.49|    10000|  36|    C|          RENT|   49200.0|             other|        10| Fully Paid|\n",
      "|   12.69|     3000|  60|    B|          RENT|   80000.0|             other|         1| Fully Paid|\n",
      "|     7.9|     5000|  36|    A|          RENT|   36000.0|           wedding|         3| Fully Paid|\n",
      "|   15.96|     7000|  60|    C|          RENT|   47004.0|debt_consolidation|         8| Fully Paid|\n",
      "|   18.64|     3000|  36|    E|          RENT|   48000.0|               car|         9| Fully Paid|\n",
      "|   21.28|     5600|  60|    F|           OWN|   40000.0|    small_business|         4|Charged Off|\n",
      "|   12.69|     5375|  60|    B|          RENT|   15000.0|             other|         0|Charged Off|\n",
      "|   14.65|     6500|  60|    C|           OWN|   72000.0|debt_consolidation|         5| Fully Paid|\n",
      "|   12.69|    12000|  36|    B|           OWN|   75000.0|debt_consolidation|        10| Fully Paid|\n",
      "|   13.49|     9000|  36|    C|          RENT|   30000.0|debt_consolidation|         0|Charged Off|\n",
      "|    9.91|     3000|  36|    B|          RENT|   15000.0|       credit_card|         3| Fully Paid|\n",
      "|   10.65|    10000|  36|    B|          RENT|  100000.0|             other|         3|Charged Off|\n",
      "|   16.29|     1000|  36|    D|          RENT|   28000.0|debt_consolidation|         0| Fully Paid|\n",
      "|   15.27|    10000|  36|    C|          RENT|   42000.0|  home_improvement|         4| Fully Paid|\n",
      "|    6.03|     3600|  36|    A|      MORTGAGE|  110000.0|    major_purchase|        10| Fully Paid|\n",
      "|   11.71|     6000|  36|    B|      MORTGAGE|   84000.0|           medical|         1| Fully Paid|\n",
      "|    6.03|     9200|  36|    A|          RENT|  77385.19|debt_consolidation|         6| Fully Paid|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql.functions import regexp_replace, col\n",
    "from pyspark.sql.types import FloatType\n",
    "# 删除\"int_rate\"列中的百分号\n",
    "df = df.withColumn(\"int_rate\", regexp_replace(col(\"int_rate\"), \"%\", \"\"))\n",
    "\n",
    "# 将\"int_rate\"列的数据格式设置为小数格式\n",
    "df = df.withColumn(\"int_rate\", df[\"int_rate\"].cast(FloatType()))\n",
    "\n",
    "# 显示处理后的数据\n",
    "df.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|int_rate|loan_amnt|term|grade|home_ownership|annual_inc|           purpose|emp_length|loan_status|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|   10.65|     5000|  36|    B|          RENT|   24000.0|       credit_card|        10| Fully Paid|\n",
      "|   15.27|     2500|  60|    C|          RENT|   30000.0|               car|         0|Charged Off|\n",
      "|   15.96|     2400|  36|    C|          RENT|   12252.0|    small_business|        10| Fully Paid|\n",
      "|   13.49|    10000|  36|    C|          RENT|   49200.0|             other|        10| Fully Paid|\n",
      "|   12.69|     3000|  60|    B|          RENT|   80000.0|             other|         1| Fully Paid|\n",
      "|     7.9|     5000|  36|    A|          RENT|   36000.0|           wedding|         3| Fully Paid|\n",
      "|   15.96|     7000|  60|    C|          RENT|   47004.0|debt_consolidation|         8| Fully Paid|\n",
      "|   18.64|     3000|  36|    E|          RENT|   48000.0|               car|         9| Fully Paid|\n",
      "|   21.28|     5600|  60|    F|           OWN|   40000.0|    small_business|         4|Charged Off|\n",
      "|   12.69|     5375|  60|    B|          RENT|   15000.0|             other|         0|Charged Off|\n",
      "|   14.65|     6500|  60|    C|           OWN|   72000.0|debt_consolidation|         5| Fully Paid|\n",
      "|   12.69|    12000|  36|    B|           OWN|   75000.0|debt_consolidation|        10| Fully Paid|\n",
      "|   13.49|     9000|  36|    C|          RENT|   30000.0|debt_consolidation|         0|Charged Off|\n",
      "|    9.91|     3000|  36|    B|          RENT|   15000.0|       credit_card|         3| Fully Paid|\n",
      "|   10.65|    10000|  36|    B|          RENT|  100000.0|             other|         3|Charged Off|\n",
      "|   16.29|     1000|  36|    D|          RENT|   28000.0|debt_consolidation|         0| Fully Paid|\n",
      "|   15.27|    10000|  36|    C|          RENT|   42000.0|  home_improvement|         4| Fully Paid|\n",
      "|    6.03|     3600|  36|    A|      MORTGAGE|  110000.0|    major_purchase|        10| Fully Paid|\n",
      "|   11.71|     6000|  36|    B|      MORTGAGE|   84000.0|           medical|         1| Fully Paid|\n",
      "|    6.03|     9200|  36|    A|          RENT|  77385.19|debt_consolidation|         6| Fully Paid|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "df.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "unhashable type: 'Column'",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mTypeError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[10], line 16\u001B[0m\n\u001B[0;32m      5\u001B[0m grade_mapping \u001B[38;5;241m=\u001B[39m {\n\u001B[0;32m      6\u001B[0m     \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mA\u001B[39m\u001B[38;5;124m'\u001B[39m: \u001B[38;5;241m1\u001B[39m,\n\u001B[0;32m      7\u001B[0m     \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mB\u001B[39m\u001B[38;5;124m'\u001B[39m: \u001B[38;5;241m2\u001B[39m,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m     12\u001B[0m     \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mG\u001B[39m\u001B[38;5;124m'\u001B[39m: \u001B[38;5;241m7\u001B[39m\n\u001B[0;32m     13\u001B[0m }\n\u001B[0;32m     15\u001B[0m \u001B[38;5;66;03m# 将\"grade\"列的值映射为整型\u001B[39;00m\n\u001B[1;32m---> 16\u001B[0m df \u001B[38;5;241m=\u001B[39m df\u001B[38;5;241m.\u001B[39mwithColumn(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mgrade\u001B[39m\u001B[38;5;124m\"\u001B[39m, when(col(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mgrade\u001B[39m\u001B[38;5;124m\"\u001B[39m)\u001B[38;5;241m.\u001B[39misin(\u001B[38;5;28mlist\u001B[39m(grade_mapping\u001B[38;5;241m.\u001B[39mkeys())), \u001B[43mgrade_mapping\u001B[49m\u001B[43m[\u001B[49m\u001B[43mcol\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mgrade\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m)\u001B[49m\u001B[43m]\u001B[49m)\u001B[38;5;241m.\u001B[39motherwise(col(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mgrade\u001B[39m\u001B[38;5;124m\"\u001B[39m)))\n\u001B[0;32m     18\u001B[0m \u001B[38;5;66;03m# 将\"grade\"列的数据格式设置为整型\u001B[39;00m\n\u001B[0;32m     19\u001B[0m df \u001B[38;5;241m=\u001B[39m df\u001B[38;5;241m.\u001B[39mwithColumn(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mgrade\u001B[39m\u001B[38;5;124m\"\u001B[39m, df[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mgrade\u001B[39m\u001B[38;5;124m\"\u001B[39m]\u001B[38;5;241m.\u001B[39mcast(IntegerType()))\n",
      "\u001B[1;31mTypeError\u001B[0m: unhashable type: 'Column'"
     ]
    }
   ],
   "source": [
    "from pyspark.sql.functions import when, col\n",
    "from pyspark.sql.types import IntegerType\n",
    "\n",
    "# 创建字典映射关系\n",
    "grade_mapping = {\n",
    "    'A': 1,\n",
    "    'B': 2,\n",
    "    'C': 3,\n",
    "    'D': 4,\n",
    "    'E': 5,\n",
    "    'F': 6,\n",
    "    'G': 7\n",
    "}\n",
    "\n",
    "# 将\"grade\"列的值映射为整型\n",
    "df = df.withColumn(\"grade\", when(col(\"grade\").isin(list(grade_mapping.keys())), grade_mapping[col(\"grade\")]).otherwise(col(\"grade\")))\n",
    "\n",
    "# 将\"grade\"列的数据格式设置为整型\n",
    "df = df.withColumn(\"grade\", df[\"grade\"].cast(IntegerType()))\n",
    "\n",
    "# 显示处理后的数据\n",
    "df.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "出现该错误是因为isin()函数在pyspark中不支持直接使用Python的集合类型作为参数。\n",
    "\n",
    "要解决这个问题，可以使用lit()函数将Python集合转换为pyspark支持的字面量类型。请更新代码如下："
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|int_rate|loan_amnt|term|grade|home_ownership|annual_inc|           purpose|emp_length|loan_status|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|   10.65|     5000|  36|    2|          RENT|   24000.0|       credit_card|        10| Fully Paid|\n",
      "|   15.27|     2500|  60|    3|          RENT|   30000.0|               car|         0|Charged Off|\n",
      "|   15.96|     2400|  36|    3|          RENT|   12252.0|    small_business|        10| Fully Paid|\n",
      "|   13.49|    10000|  36|    3|          RENT|   49200.0|             other|        10| Fully Paid|\n",
      "|   12.69|     3000|  60|    2|          RENT|   80000.0|             other|         1| Fully Paid|\n",
      "|     7.9|     5000|  36|    1|          RENT|   36000.0|           wedding|         3| Fully Paid|\n",
      "|   15.96|     7000|  60|    3|          RENT|   47004.0|debt_consolidation|         8| Fully Paid|\n",
      "|   18.64|     3000|  36|    5|          RENT|   48000.0|               car|         9| Fully Paid|\n",
      "|   21.28|     5600|  60|    6|           OWN|   40000.0|    small_business|         4|Charged Off|\n",
      "|   12.69|     5375|  60|    2|          RENT|   15000.0|             other|         0|Charged Off|\n",
      "|   14.65|     6500|  60|    3|           OWN|   72000.0|debt_consolidation|         5| Fully Paid|\n",
      "|   12.69|    12000|  36|    2|           OWN|   75000.0|debt_consolidation|        10| Fully Paid|\n",
      "|   13.49|     9000|  36|    3|          RENT|   30000.0|debt_consolidation|         0|Charged Off|\n",
      "|    9.91|     3000|  36|    2|          RENT|   15000.0|       credit_card|         3| Fully Paid|\n",
      "|   10.65|    10000|  36|    2|          RENT|  100000.0|             other|         3|Charged Off|\n",
      "|   16.29|     1000|  36|    4|          RENT|   28000.0|debt_consolidation|         0| Fully Paid|\n",
      "|   15.27|    10000|  36|    3|          RENT|   42000.0|  home_improvement|         4| Fully Paid|\n",
      "|    6.03|     3600|  36|    1|      MORTGAGE|  110000.0|    major_purchase|        10| Fully Paid|\n",
      "|   11.71|     6000|  36|    2|      MORTGAGE|   84000.0|           medical|         1| Fully Paid|\n",
      "|    6.03|     9200|  36|    1|          RENT|  77385.19|debt_consolidation|         6| Fully Paid|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql.functions import udf\n",
    "from pyspark.sql.functions import when, col\n",
    "from pyspark.sql.types import IntegerType\n",
    "# 创建SparkSession\n",
    "spark = SparkSession.builder.getOrCreate()\n",
    "# 创建字典映射关系\n",
    "grade_mapping = {\n",
    "    'A': 1,\n",
    "    'B': 2,\n",
    "    'C': 3,\n",
    "    'D': 4,\n",
    "    'E': 5,\n",
    "    'F': 6,\n",
    "    'G': 7\n",
    "}\n",
    "# 创建UDF函数进行映射转换\n",
    "mapping_udf = udf(lambda grade: grade_mapping.get(grade), IntegerType())\n",
    "# 将\"grade\"列的值映射为整型\n",
    "df = df.withColumn(\"grade\", mapping_udf(col(\"grade\")))\n",
    "# 显示处理后的数据\n",
    "df.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "用减法+ascii编码完成"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|int_rate|loan_amnt|term|grade|home_ownership|annual_inc|           purpose|emp_length|loan_status|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "|   10.65|     5000|  36|  -14|          RENT|   24000.0|       credit_card|        10| Fully Paid|\n",
      "|   15.27|     2500|  60|  -13|          RENT|   30000.0|               car|         0|Charged Off|\n",
      "|   15.96|     2400|  36|  -13|          RENT|   12252.0|    small_business|        10| Fully Paid|\n",
      "|   13.49|    10000|  36|  -13|          RENT|   49200.0|             other|        10| Fully Paid|\n",
      "|   12.69|     3000|  60|  -14|          RENT|   80000.0|             other|         1| Fully Paid|\n",
      "|     7.9|     5000|  36|  -15|          RENT|   36000.0|           wedding|         3| Fully Paid|\n",
      "|   15.96|     7000|  60|  -13|          RENT|   47004.0|debt_consolidation|         8| Fully Paid|\n",
      "|   18.64|     3000|  36|  -11|          RENT|   48000.0|               car|         9| Fully Paid|\n",
      "|   21.28|     5600|  60|  -10|           OWN|   40000.0|    small_business|         4|Charged Off|\n",
      "|   12.69|     5375|  60|  -14|          RENT|   15000.0|             other|         0|Charged Off|\n",
      "|   14.65|     6500|  60|  -13|           OWN|   72000.0|debt_consolidation|         5| Fully Paid|\n",
      "|   12.69|    12000|  36|  -14|           OWN|   75000.0|debt_consolidation|        10| Fully Paid|\n",
      "|   13.49|     9000|  36|  -13|          RENT|   30000.0|debt_consolidation|         0|Charged Off|\n",
      "|    9.91|     3000|  36|  -14|          RENT|   15000.0|       credit_card|         3| Fully Paid|\n",
      "|   10.65|    10000|  36|  -14|          RENT|  100000.0|             other|         3|Charged Off|\n",
      "|   16.29|     1000|  36|  -12|          RENT|   28000.0|debt_consolidation|         0| Fully Paid|\n",
      "|   15.27|    10000|  36|  -13|          RENT|   42000.0|  home_improvement|         4| Fully Paid|\n",
      "|    6.03|     3600|  36|  -15|      MORTGAGE|  110000.0|    major_purchase|        10| Fully Paid|\n",
      "|   11.71|     6000|  36|  -14|      MORTGAGE|   84000.0|           medical|         1| Fully Paid|\n",
      "|    6.03|     9200|  36|  -15|          RENT|  77385.19|debt_consolidation|         6| Fully Paid|\n",
      "+--------+---------+----+-----+--------------+----------+------------------+----------+-----------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark.sql.functions import col, expr\n",
    "\n",
    "# 在PySpark DataFrame上进行映射操作\n",
    "df = df.withColumn('grade', expr(\"ascii(grade) - ascii('A') + 1\"))\n",
    "\n",
    "# 将\"grade\"列的数据格式设置为整型\n",
    "df = df.withColumn(\"grade\", df[\"grade\"].cast(IntegerType()))\n",
    "# 显示处理后的数据\n",
    "df.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "df = df.withColumn(\"grade\", df[\"grade\"].cast(IntegerType()))\n",
    "df.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.sql.functions import col, expr\n",
    "from pyspark.ml.feature import StringIndexer\n",
    "# 创建字典映射关系\n",
    "home_ownership_mapping = {\n",
    "    'MORTGAGE': 0,\n",
    "    'RENT': 1,\n",
    "    'OWN': 2\n",
    "}\n",
    "# 在 PySpark DataFrame 上进行映射操作\n",
    "df = df.withColumn('home_ownership', expr(\"CASE WHEN home_ownership = 'MORTGAGE' THEN 0 \"\n",
    "                                                      \"WHEN home_ownership = 'RENT' THEN 1 \"\n",
    "                                                      \"WHEN home_ownership = 'OWN' THEN 2 END\"))\n",
    "# # 将\"grade\"列的数据格式设置为整型\n",
    "# df = df.withColumn(\"home_ownership\", df[\"home_ownership\"].cast(IntegerType()))\n",
    "\n",
    "# 将 home_ownership 列编码为整型\n",
    "indexer = StringIndexer(inputCol='home_ownership', outputCol='home_ownership_index', handleInvalid='keep')\n",
    "df_encoded = indexer.fit(df).transform(df)\n",
    "# 显示处理后的数据\n",
    "df.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 将\"term\"列的数据格式设置为整型\n",
    "df = df.withColumn(\"term\", df[\"term\"].cast(IntegerType()))"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.feature import StringIndexer\n",
    "\n",
    "# 创建 StringIndexer 转换器\n",
    "string_indexer = StringIndexer(inputCol='purpose', outputCol='purpose_index')\n",
    "string_indexer.setStringOrderType(\"frequencyDesc\")  # 设置索引顺序按频率降序\n",
    "\n",
    "# 在 PySpark DataFrame 上进行映射操作\n",
    "df = string_indexer.fit(df).transform(df)\n",
    "df = df.withColumn('purpose_index', df['purpose_index'])  # 索引结果从0开始\n",
    "\n",
    "# 将\"purpose_index\"列的数据格式设置为整型\n",
    "df = df.withColumn(\"purpose_index\", df[\"purpose_index\"].cast(IntegerType()))\n",
    "# 显示处理后的数据\n",
    "df.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 删除 'purpose' 列\n",
    "df = df.drop('purpose')\n",
    "\n",
    "# # 将 'purpose_index' 列重命名为 'purpose'\n",
    "df = df.withColumnRenamed('purpose_index', 'purpose')\n",
    "\n",
    "# 显示处理后的数据\n",
    "df.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.sql.functions import when\n",
    "\n",
    "# 使用 when 和 otherwise 函数将 loan_status 映射为 label 列\n",
    "df = df.withColumn('label', when((df['loan_status'] == 'Fully Paid') | (df['loan_status'] == 'Does not meet the credit policy. Status:Fully Paid'), 1).otherwise(0))\n",
    "\n",
    "# 显示 DataFrame\n",
    "df.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 删除 loan_status 列\n",
    "df = df.drop('loan_status')\n",
    "# 显示删除列后的结果\n",
    "df.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.feature import MinMaxScaler\n",
    "from pyspark.ml.feature import VectorAssembler\n",
    "\n",
    "# 将要归一化的列放入一个特征向量中\n",
    "assembler = VectorAssembler(inputCols=['int_rate','loan_amnt', 'term', 'grade','annual_inc','emp_length','purpose'], outputCol='features')\n",
    "df_assembled = assembler.transform(df)\n",
    "\n",
    "# 创建 MinMaxScaler 转换器\n",
    "scaler = MinMaxScaler(inputCol='features', outputCol='scaled_features')\n",
    "\n",
    "# 拟合转换器并对数据进行归一化操作\n",
    "scaler_model = scaler.fit(df_assembled)\n",
    "df_normalized = scaler_model.transform(df_assembled)\n",
    "\n",
    "# 显示归一化后的数据\n",
    "df_normalized.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 保留指定的列\n",
    "selected_columns = ['label','scaled_features']\n",
    "df_sets = df_normalized.select(selected_columns)\n",
    "# 划分数据集为训练集和测试集\n",
    "train_ratio = 0.8  # 训练集比例\n",
    "test_ratio = 1 - train_ratio  # 测试集比例\n",
    "\n",
    "# 使用 randomSplit 函数划分数据集\n",
    "train_data, test_data = df_sets.randomSplit([train_ratio, test_ratio], seed=42)\n",
    "\n",
    "# 显示划分后的训练集和测试集\n",
    "print(\"训练集样本数:\", train_data.count())\n",
    "print(\"测试集样本数:\", test_data.count())\n",
    "\n",
    "train_data.show()\n",
    "test_data.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "随机森林"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark.ml.feature import VectorAssembler\n",
    "from pyspark.ml.classification import RandomForestClassifier\n",
    "from pyspark.ml import Pipeline\n",
    "\n",
    "\n",
    "# 创建特征向量列\n",
    "feature_columns = ['scaled_features']\n",
    "assembler = VectorAssembler(inputCols=feature_columns, outputCol='features')\n",
    "\n",
    "# 创建随机森林分类器\n",
    "rf = RandomForestClassifier(\n",
    "    featuresCol='features',\n",
    "    labelCol='label',\n",
    "    numTrees=10,  # 决策树的数量\n",
    "    maxDepth=5  # 决策树的最大深度\n",
    ")\n",
    "\n",
    "# 创建 Pipeline\n",
    "pipeline = Pipeline(stages=[assembler, rf])\n",
    "\n",
    "# 训练模型\n",
    "model = pipeline.fit(train_data)\n",
    "\n",
    "# 进行预测\n",
    "predictions = model.transform(test_data)\n",
    "\n",
    "# 显示预测结果\n",
    "predictions.select('features', 'label', 'prediction', 'probability').show(truncate=False)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.evaluation import MulticlassClassificationEvaluator\n",
    "\n",
    "# 创建 MulticlassClassificationEvaluator\n",
    "evaluator = MulticlassClassificationEvaluator(labelCol='label', predictionCol='prediction', metricName='accuracy')\n",
    "\n",
    "# 计算准确率\n",
    "accuracy = evaluator.evaluate(predictions)\n",
    "\n",
    "print(f'Accuracy: {accuracy}')\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "KNN"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.feature import VectorAssembler\n",
    "from sklearn.neighbors import KNeighborsRegressor\n",
    "from pyspark.sql import functions as F\n",
    "\n",
    "# 创建特征向量\n",
    "assembler = VectorAssembler(\n",
    "    inputCols=df.columns[1:],\n",
    "    outputCol=\"features\"\n",
    ")\n",
    "transformed_df = assembler.transform(df)\n",
    "\n",
    "# 转换为 Pandas DataFrame 进行本地计算\n",
    "pandas_df = transformed_df.toPandas()\n",
    "\n",
    "# 获取特征矩阵和目标变量\n",
    "X = pandas_df.drop(\"scaled_features\", axis=1).values\n",
    "y = pandas_df[\"label\"].values\n",
    "\n",
    "# 构建 KNN 回归模型\n",
    "k = 5  # 设置近邻数\n",
    "knn = KNeighborsRegressor(n_neighbors=k)\n",
    "\n",
    "# 进行训练\n",
    "knn.fit(X, y)\n",
    "\n",
    "# 进行预测\n",
    "predictions = knn.predict(X)\n",
    "\n",
    "# 将预测结果添加到 PySpark DataFrame\n",
    "transformed_df = transformed_df.withColumn(\"prediction\", F.lit(None))\n",
    "transformed_df = transformed_df.withColumn(\"prediction\", F.expr(\"array({})\".format(\",\".join([str(p) for p in predictions]))))\n",
    "\n",
    "# 显示预测结果\n",
    "transformed_df.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.evaluation import MulticlassClassificationEvaluator\n",
    "\n",
    "# 创建 MulticlassClassificationEvaluator\n",
    "evaluator = MulticlassClassificationEvaluator(labelCol='label', predictionCol='prediction', metricName='accuracy')\n",
    "\n",
    "# 计算准确率\n",
    "accuracy = evaluator.evaluate(predictions)\n",
    "\n",
    "print(f'Accuracy: {accuracy}')\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "SVM"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark.ml.feature import VectorAssembler\n",
    "from pyspark.ml.classification import LinearSVC\n",
    "from pyspark.ml import Pipeline\n",
    "from pyspark.ml.evaluation import BinaryClassificationEvaluator\n",
    "\n",
    "# 创建特征向量列\n",
    "feature_columns = ['scaled_features']\n",
    "assembler = VectorAssembler(inputCols=feature_columns, outputCol='features')\n",
    "\n",
    "# 创建 SVM 分类器\n",
    "svm = LinearSVC(\n",
    "    featuresCol='scaled_features',\n",
    "    labelCol='label',\n",
    "    maxIter=10,  # 迭代次数\n",
    "    regParam=0.1  # 正则化参数\n",
    ")\n",
    "\n",
    "# 创建 Pipeline\n",
    "pipeline = Pipeline(stages=[assembler, svm])\n",
    "\n",
    "# 划分数据集为训练集和测试集\n",
    "train_ratio = 0.8\n",
    "test_ratio = 1 - train_ratio\n",
    "train_data, test_data = df_sets.randomSplit([train_ratio, test_ratio], seed=42)\n",
    "\n",
    "# 训练模型\n",
    "model = pipeline.fit(train_data)\n",
    "\n",
    "# 在测试集上进行预测\n",
    "predictions = model.transform(test_data)\n",
    "\n",
    "# 评估模型\n",
    "evaluator = BinaryClassificationEvaluator(\n",
    "    labelCol='label',\n",
    "    rawPredictionCol='rawPrediction',\n",
    "    metricName='areaUnderROC'\n",
    ")\n",
    "areaUnderROC = evaluator.evaluate(predictions)\n",
    "print('Area under ROC:', areaUnderROC)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "决策树"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.feature import VectorAssembler\n",
    "from pyspark.ml.classification import DecisionTreeClassifier\n",
    "from pyspark.ml import Pipeline\n",
    "from pyspark.ml.evaluation import MulticlassClassificationEvaluator\n",
    "\n",
    "# 创建特征向量列\n",
    "feature_columns = ['scaled_features']\n",
    "assembler = VectorAssembler(inputCols=feature_columns, outputCol='features')\n",
    "\n",
    "# 创建决策树分类器\n",
    "dt = DecisionTreeClassifier(labelCol=\"label\", featuresCol=\"scaled_features\")\n",
    "\n",
    "# 创建Pipeline\n",
    "pipeline = Pipeline(stages=[assembler, dt])\n",
    "\n",
    "# 训练模型\n",
    "model = pipeline.fit(train_data)\n",
    "\n",
    "# 进行预测\n",
    "predictions = model.transform(test_data)\n",
    "\n",
    "# 评估模型性能\n",
    "evaluator = MulticlassClassificationEvaluator(labelCol=\"label\", predictionCol=\"prediction\", metricName=\"accuracy\")\n",
    "accuracy = evaluator.evaluate(predictions)\n",
    "print(\"Accuracy:\", accuracy)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.evaluation import MulticlassClassificationEvaluator\n",
    "\n",
    "# 创建 MulticlassClassificationEvaluator\n",
    "evaluator = MulticlassClassificationEvaluator(labelCol='label', predictionCol='prediction', metricName='accuracy')\n",
    "\n",
    "# 计算准确率\n",
    "accuracy = evaluator.evaluate(predictions)\n",
    "\n",
    "print(f'Accuracy: {accuracy}')\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "Ridge Model\n",
    "在 PySpark 中，Ridge 回归模型没有内置的实现，但我们可以使用线性回归模型并应用 Ridge 正则化来实现类似的效果。\n",
    "此代码将使用 Ridge 正则化的线性回归模型训练一个模型，并将其应用于测试数据进行预测。最后，使用回归评估器计算模型的均方根误差（RMSE）作为性能指标"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.feature import VectorAssembler\n",
    "from pyspark.ml.regression import LinearRegression\n",
    "from pyspark.ml import Pipeline\n",
    "from pyspark.ml.evaluation import RegressionEvaluator\n",
    "\n",
    "# 创建特征向量列\n",
    "feature_columns = ['scaled_features']\n",
    "assembler = VectorAssembler(inputCols=feature_columns, outputCol='features')\n",
    "\n",
    "# 创建线性回归模型并应用 Ridge 正则化\n",
    "ridge = LinearRegression(labelCol=\"label\", featuresCol=\"features\", regParam=0.1)\n",
    "\n",
    "# 创建Pipeline\n",
    "pipeline = Pipeline(stages=[assembler, ridge])\n",
    "\n",
    "# 训练模型\n",
    "model = pipeline.fit(train_data)\n",
    "\n",
    "# 进行预测\n",
    "predictions = model.transform(test_data)\n",
    "\n",
    "# 评估模型性能\n",
    "evaluator = RegressionEvaluator(labelCol=\"label\", predictionCol=\"prediction\", metricName=\"rmse\")\n",
    "rmse = evaluator.evaluate(predictions)\n",
    "print(\"RMSE:\", rmse)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "可以根据需要选择其他评估指标，例如平均绝对误差（MAE）或决定系数（R2）。只需将 metricName 参数设置为相应的指标名称即可。"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.evaluation import RegressionEvaluator\n",
    "\n",
    "# 创建回归评估器\n",
    "evaluator = RegressionEvaluator(labelCol=\"label\", predictionCol=\"prediction\", metricName=\"rmse\")\n",
    "\n",
    "# 应用模型进行预测\n",
    "predictions = model.transform(test_data)\n",
    "\n",
    "# 计算评估指标\n",
    "rmse = evaluator.evaluate(predictions)\n",
    "print(\"RMSE:\", rmse)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "Lasso Model\n",
    "在 PySpark 中，没有直接的 Lasso 类可用于回归模型。然而，您可以使用 LinearRegression 类来实现 Lasso 回归。通过设置 elasticNetParam 参数为 1.0，您可以将 L1 正则化项的权重设置为 1，从而实现 Lasso 回归。"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.feature import VectorAssembler\n",
    "from pyspark.ml.regression import LinearRegression\n",
    "from pyspark.ml import Pipeline\n",
    "from pyspark.sql import SparkSession\n",
    "\n",
    "# 创建特征向量列\n",
    "feature_columns = ['scaled_features']\n",
    "assembler = VectorAssembler(inputCols=feature_columns, outputCol='features')\n",
    "\n",
    "# 创建 Lasso 回归模型\n",
    "lasso = LinearRegression(featuresCol=\"features\", labelCol=\"label\", maxIter=10, regParam=0.01, elasticNetParam=1.0)\n",
    "\n",
    "# 创建一个 Pipeline，将特征转换和 Lasso 模型组合在一起\n",
    "pipeline = Pipeline(stages=[assembler, lasso])\n",
    "\n",
    "# 拟合模型\n",
    "model = pipeline.fit(train_data)\n",
    "\n",
    "# 进行预测\n",
    "predictions = model.transform(test_data)\n",
    "\n",
    "# 显示预测结果\n",
    "predictions.show()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.evaluation import RegressionEvaluator\n",
    "\n",
    "# 创建 RegressionEvaluator 对象\n",
    "evaluator = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"label\", metricName=\"rmse\")\n",
    "\n",
    "# 计算均方根误差（RMSE）\n",
    "rmse = evaluator.evaluate(predictions)\n",
    "print(\"Root Mean Squared Error (RMSE):\", rmse)\n",
    "\n",
    "# 创建 RegressionEvaluator 对象\n",
    "evaluator = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"label\", metricName=\"r2\")\n",
    "\n",
    "# 计算决定系数（R-squared）\n",
    "r2 = evaluator.evaluate(predictions)\n",
    "print(\"R-squared:\", r2)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "Elastic NET Model\n",
    "在 PySpark 中，可以使用 LinearRegression 类来实现 Elastic Net 回归。"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.regression import LinearRegression\n",
    "from pyspark.ml.evaluation import RegressionEvaluator\n",
    "\n",
    "# 创建 LinearRegression 模型对象\n",
    "enet = LinearRegression(featuresCol=\"scaled_features\", labelCol=\"label\", maxIter=10, regParam=0.1, elasticNetParam=0.5)\n",
    "\n",
    "# 使用训练数据拟合模型\n",
    "enet_model = enet.fit(train_data)\n",
    "\n",
    "# 对测试数据进行预测\n",
    "predictions = enet_model.transform(test_data)\n",
    "\n",
    "# 创建 RegressionEvaluator 对象\n",
    "evaluator = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"label\", metricName=\"rmse\")\n",
    "\n",
    "# 计算均方根误差（RMSE）\n",
    "rmse = evaluator.evaluate(predictions)\n",
    "print(\"Root Mean Squared Error (RMSE):\", rmse)\n",
    "\n",
    "# 创建 RegressionEvaluator 对象\n",
    "evaluator = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"label\", metricName=\"r2\")\n",
    "\n",
    "# 计算决定系数（R-squared）\n",
    "r2 = evaluator.evaluate(predictions)\n",
    "print(\"R-squared:\", r2)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "Gradient Boosting\n",
    "在 PySpark 中，可以使用 GBTRegressor 类来实现梯度提升树回归（Gradient Boosting Tree Regression）。"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.regression import GBTRegressor\n",
    "from pyspark.ml.evaluation import RegressionEvaluator\n",
    "\n",
    "# 创建 GBTRegressor 模型对象\n",
    "gbt = GBTRegressor(featuresCol=\"scaled_features\", labelCol=\"label\", maxIter=10, stepSize=0.1)\n",
    "\n",
    "# 使用训练数据拟合模型\n",
    "gbt_model = gbt.fit(train_data)\n",
    "\n",
    "# 对测试数据进行预测\n",
    "predictions = gbt_model.transform(test_data)\n",
    "\n",
    "# 创建 RegressionEvaluator 对象\n",
    "evaluator = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"label\", metricName=\"rmse\")\n",
    "\n",
    "# 计算均方根误差（RMSE）\n",
    "rmse = evaluator.evaluate(predictions)\n",
    "print(\"Root Mean Squared Error (RMSE):\", rmse)\n",
    "\n",
    "# 创建 RegressionEvaluator 对象\n",
    "evaluator = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"label\", metricName=\"r2\")\n",
    "\n",
    "# 计算决定系数（R-squared）\n",
    "r2 = evaluator.evaluate(predictions)\n",
    "print(\"R-squared:\", r2)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "XG Boosting\n",
    "在 PySpark 中使用 XGBoost，可以考虑使用 xgboost4j 和 xgboost4j-spark 库，这两个库提供了 XGBoost 在 Spark 中的集成。\n",
    "需要确保已经正确安装了这两个库，然后可以按照以下步骤使用 XGBoost 进行训练和预测："
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 法一"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml import Pipeline\n",
    "from pyspark.ml.feature import VectorAssembler\n",
    "from pyspark.ml.evaluation import RegressionEvaluator\n",
    "from pyspark.ml.linalg import VectorUDT\n",
    "from pyspark.sql.functions import col\n",
    "from pyspark.sql.types import DoubleType\n",
    "import xgboost as xgb\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 准备数据，将特征列转换为向量类型\n",
    "# 创建 VectorAssembler\n",
    "feature_columns=[\"scaled_features\"]\n",
    "vector_assembler = VectorAssembler(\n",
    "    inputCols=feature_columns,\n",
    "    outputCol=\"features\"\n",
    ")\n",
    "\n",
    "# 转换特征列为向量类型\n",
    "vectorized_data = vector_assembler.transform(data).select(\"scaled_features\", \"label\")\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 将数据转换为 XGBoost 的 DMatrix 格式：\n",
    "# 将 DataFrame 转换为 Pandas DataFrame\n",
    "pandas_data = vectorized_data.select(\"features\", col(\"label\").cast(DoubleType())).toPandas()\n",
    "\n",
    "# 创建 XGBoost 的 DMatrix\n",
    "dtrain = xgb.DMatrix(pandas_data[\"features\"].tolist(), pandas_data[\"label\"])\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "#定义 XGBoost 参数，并训练模型：\n",
    "# 定义 XGBoost 参数\n",
    "params = {\n",
    "    \"objective\": \"reg:squarederror\",\n",
    "    \"max_depth\": 5,\n",
    "    \"eta\": 0.1,\n",
    "    \"subsample\": 0.8,\n",
    "    \"colsample_bytree\": 0.8\n",
    "}\n",
    "\n",
    "# 训练 XGBoost 模型\n",
    "model = xgb.train(params, dtrain, num_boost_round=100)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 使用训练好的模型进行预测：\n",
    "# 将测试数据转换为 XGBoost 的 DMatrix\n",
    "test_data = test_data.select(\"features\").toPandas()\n",
    "dtest = xgb.DMatrix(test_data[\"features\"].tolist())\n",
    "\n",
    "# 使用训练好的模型进行预测\n",
    "predictions = model.predict(dtest)\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "法2\n",
    "一种常用的方法是使用 sparkxgb 包，它是一个为 PySpark 提供 XGBoost 支持的第三方包。\n",
    "\n",
    "首先，您需要安装 sparkxgb 包。"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from pyspark.ml.feature import VectorAssembler\n",
    "from pyspark.ml import Pipeline\n",
    "\n",
    "# 创建特征向量列\n",
    "feature_columns = ['scaled_features']\n",
    "\n",
    "# 创建特征向量列\n",
    "vectorAssembler = VectorAssembler(inputCols=feature_columns, outputCol=\"features\")\n",
    "\n",
    "# # 加载训练数据\n",
    "# train_data = spark.read.format(\"csv\").option(\"header\", \"true\").load(\"train.csv\")\n",
    "\n",
    "# 创建 Pipeline\n",
    "pipeline = Pipeline(stages=[vectorAssembler])\n",
    "\n",
    "# 对训练数据进行转换\n",
    "transformed_data = pipeline.fit(train_data).transform(train_data)\n",
    "\n",
    "# 将数据转换为 XGBoost 所需的格式\n",
    "xgboost_data = transformed_data.select(\"label\", \"scaled_features\")\n",
    "\n",
    "# 使用 sparkxgb 训练 XGBoost 模型\n",
    "from sparkxgb import XGBoostClassifier\n",
    "\n",
    "xgb = XGBoostClassifier(\n",
    "    featuresCol=\"scaled_features\",\n",
    "    labelCol=\"label\",\n",
    "    predictionCol=\"prediction\",\n",
    "    maxDepth=3,\n",
    "    numRound=100,\n",
    "    objective=\"binary:logistic\"\n",
    ")\n",
    "model = xgb.fit(xgboost_data)\n",
    "\n",
    "# # 对测试数据进行预测\n",
    "# test_data = spark.read.format(\"csv\").option(\"header\", \"true\").load(\"test.csv\")\n",
    "transformed_test_data = pipeline.fit(test_data).transform(test_data)\n",
    "predictions = model.transform(transformed_test_data)\n",
    "\n",
    "# 进行模型评估等操作\n",
    "\n",
    "# 关闭 SparkSession\n",
    "spark.stop()\n"
   ],
   "metadata": {
    "collapsed": false
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "bigdata",
   "language": "python",
   "display_name": "bigdata"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
