{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8f10c84f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# pip install pyspark"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "4fd22319",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Spencer Cheung\\.conda\\envs\\py37\\lib\\site-packages\\pyspark\\context.py:317: FutureWarning: Python 3.7 support is deprecated in Spark 3.4.\n",
      "  warnings.warn(\"Python 3.7 support is deprecated in Spark 3.4.\", FutureWarning)\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.datasets import load_iris\n",
    "import os\n",
    "os.environ['JAVA_HOME'] = \"D:\\Develop\\Java\\jdk1.8.0_241\" # 记得把地址改成自己的\n",
    "os.environ['PYSPARK_PYTHON']=\"C:\\\\Users\\\\Spencer Cheung\\\\.conda\\\\envs\\\\py37\\\\python.exe\"#本机电脑所使用的python编译器的地址\n",
    "\n",
    "from pyspark.conf import SparkConf\n",
    "from pyspark.ml.feature import VectorAssembler,StandardScaler,PCA\n",
    "from pyspark.context import SparkContext\n",
    "from pyspark.sql import SparkSession\n",
    "\n",
    "sc = SparkContext('local','test')\n",
    "spark=SparkSession(sc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "142b9c1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "iris=load_iris()\n",
    "X=iris['data']\n",
    "y=iris['target']\n",
    "data=pd.DataFrame(X,columns=iris.feature_names)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "26702d17",
   "metadata": {},
   "source": [
    "1.将 Pandas 数据框 data 转换为Spark数据框dataset，并将其列名设置为iris 特征名称，使得 dataset 输出为:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "746c267c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-----------------+----------------+-----------------+----------------+\n",
      "|sepal length (cm)|sepal width (cm)|petal length (cm)|petal width (cm)|\n",
      "+-----------------+----------------+-----------------+----------------+\n",
      "|              5.1|             3.5|              1.4|             0.2|\n",
      "|              4.9|             3.0|              1.4|             0.2|\n",
      "|              4.7|             3.2|              1.3|             0.2|\n",
      "|              4.6|             3.1|              1.5|             0.2|\n",
      "|              5.0|             3.6|              1.4|             0.2|\n",
      "|              5.4|             3.9|              1.7|             0.4|\n",
      "+-----------------+----------------+-----------------+----------------+\n",
      "only showing top 6 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "#由考生填写\n",
    "dataset = spark.createDataFrame(data=data)\n",
    "#由考生填写\n",
    "dataset.show(6)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1981e30b",
   "metadata": {},
   "source": [
    "2.使用 VectorAssembler 将 dataset 多列数据转化为单列的向量列'features',使得 df 输出为"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "db792870",
   "metadata": {},
   "outputs": [],
   "source": [
    "col =['sepal length (cm)','sepal width (cm)', 'petal length (cm)', 'petal width (cm)']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "2b086d5f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-----------------+\n",
      "|         features|\n",
      "+-----------------+\n",
      "|[5.1,3.5,1.4,0.2]|\n",
      "|[4.9,3.0,1.4,0.2]|\n",
      "|[4.7,3.2,1.3,0.2]|\n",
      "|[4.6,3.1,1.5,0.2]|\n",
      "|[5.0,3.6,1.4,0.2]|\n",
      "|[5.4,3.9,1.7,0.4]|\n",
      "+-----------------+\n",
      "only showing top 6 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "#由考生填写\n",
    "model_va = VectorAssembler(inputCols=col,outputCol=\"features\")\n",
    "df = model_va.transform(dataset).select(\"features\")\n",
    "#由考生填写\n",
    "df.show(6)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9f8a668b",
   "metadata": {},
   "source": [
    "3.创建一个 StandardScaler对象，命名为scaler，\n",
    "用来对特征列’features'进行标准化，\n",
    "将标准化后的结果输出到新的特征列’scaledFeatures’，\n",
    "withMean和withStd 分别为True，\n",
    "最后在 df 上进行 fit 操作。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "25b360db",
   "metadata": {},
   "outputs": [],
   "source": [
    "#由考生填写\n",
    "scaler = StandardScaler(inputCol=\"features\",outputCol=\"scaledFeatures\",withMean=True,withStd=True).fit(df)\n",
    "#由考生填写"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3a8d18fb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-----------------+--------------------+\n",
      "|         features|      scaledFeatures|\n",
      "+-----------------+--------------------+\n",
      "|[5.1,3.5,1.4,0.2]|[-0.8976738791967...|\n",
      "|[4.9,3.0,1.4,0.2]|[-1.1392004834649...|\n",
      "|[4.7,3.2,1.3,0.2]|[-1.3807270877331...|\n",
      "|[4.6,3.1,1.5,0.2]|[-1.5014903898672...|\n",
      "|[5.0,3.6,1.4,0.2]|[-1.0184371813308...|\n",
      "|[5.4,3.9,1.7,0.4]|[-0.5353839727944...|\n",
      "+-----------------+--------------------+\n",
      "only showing top 6 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "df_scaled =scaler.transform(df)\n",
    "df_scaled.show(6)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "0f670e3d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Explained Variance Ratio [0.72962445 0.22850762 0.03668922]\n",
      "+-----------------+--------------------+--------------------+\n",
      "|         features|      scaledFeatures|         pcaFeatures|\n",
      "+-----------------+--------------------+--------------------+\n",
      "|[5.1,3.5,1.4,0.2]|[-0.8976738791967...|[2.25714117564811...|\n",
      "|[4.9,3.0,1.4,0.2]|[-1.1392004834649...|[2.07401301519962...|\n",
      "|[4.7,3.2,1.3,0.2]|[-1.3807270877331...|[2.35633511180617...|\n",
      "|[4.6,3.1,1.5,0.2]|[-1.5014903898672...|[2.29170678586969...|\n",
      "|[5.0,3.6,1.4,0.2]|[-1.0184371813308...|[2.38186270441693...|\n",
      "|[5.4,3.9,1.7,0.4]|[-0.5353839727944...|[2.06870060846769...|\n",
      "+-----------------+--------------------+--------------------+\n",
      "only showing top 6 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "ncomponents=3\n",
    "pca=PCA(k=ncomponents,inputCol ='scaledFeatures', outputCol ='pcaFeatures').fit(df_scaled)\n",
    "df_pca=pca.transform(df_scaled)\n",
    "print ('Explained Variance Ratio',pca.explainedVariance.toArray())\n",
    "df_pca.show(6)\n",
    "df_pca.rdd.collect()\n",
    "list1 = df_pca.rdd.map(lambda x:(x[2][0],x[2][1],x[2][2])).collect()\n",
    "type(list1[1][1])\n",
    "df_origin = spark.createDataFrame([(float(tup[0]), float (tup[1]),float(tup[2]))for tup in list1],['x','y','z'])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b69915f1",
   "metadata": {},
   "source": [
    "4.将Spark数据框df_origin转换为Pandas数据框df_Pandas."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "45c4aa5d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#由考生填写\n",
    "df_Pandas = df_origin.toPandas()\n",
    "#由考生填写\n",
    "df_Pandas['id']= 'row'+df_Pandas.index.astype(str)\n",
    "cols = list(df_Pandas)\n",
    "cols.insert(0, cols.pop(cols.index('id')))\n",
    "df_Pandas = df_Pandas.loc[:, cols]\n",
    "df_Pandas.head()\n",
    "path='./input.csv'"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0e4ad6a5",
   "metadata": {},
   "source": [
    "5.将df_Pandas的数据保存到指定路径path下，不保存索引index。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "c32f0e1d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-----+------------------+--------------------+--------------------+\n",
      "|   id|                 x|                   y|                   z|\n",
      "+-----+------------------+--------------------+--------------------+\n",
      "| row0| 2.257141175648119|-0.47842383212490414| 0.12727962370642454|\n",
      "| row1|  2.07401301519962|  0.6718826870273567|  0.2338255167259694|\n",
      "| row2|2.3563351118061773| 0.34076642462456574|-0.04405390017016...|\n",
      "| row3| 2.291706785869694|  0.5953998626815402|-0.09098529710138725|\n",
      "| row4| 2.381862704416938| -0.6446756594005424|-0.01568564729691...|\n",
      "| row5|2.0687006084676938| -1.4842052973421647|-0.02687824979962...|\n",
      "| row6| 2.435868448963138|-0.04748511806193842|  -0.334350296509764|\n",
      "| row7|2.2253918887805524|-0.22240300191944512| 0.08839935188574966|\n",
      "| row8|2.3268453293413143|  1.1116036995379135|-0.14459246541111082|\n",
      "| row9|2.1770349052411113|  0.4674475685116715|  0.2529182675479318|\n",
      "|row10| 2.159076991691412| -1.0402058665204315| 0.26778400087288634|\n",
      "|row11| 2.318364130681806| -0.1326339989896237|-0.09344619093826612|\n",
      "|row12| 2.211043696511381|  0.7262431830141011| 0.23014024594834015|\n",
      "|row13|2.6243090161435623|  0.9582963471553219|-0.18019242300371774|\n",
      "|row14| 2.191399211809024| -1.8538465546759613| 0.47132202519164834|\n",
      "|row15|2.2546612065689424|   -2.67731522973661|-0.03042468374134...|\n",
      "|row16| 2.200216764743365| -1.4786557287482247|0.005326250642546992|\n",
      "|row17|2.1830361348214438| -0.4872061305724952| 0.04406768562731633|\n",
      "|row18| 1.892232839555378|  -1.400327566697242| 0.37309337699630096|\n",
      "|row19| 2.335544760603728| -1.1240835971653529|-0.13218762592303634|\n",
      "+-----+------------------+--------------------+--------------------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "#由考生填写\n",
    "df_Pandas.to_csv('data/input.csv',index=False)\n",
    "path = 'data/input.csv'\n",
    "#由考生填写\n",
    "df = spark.read.csv(path, header=True)\n",
    "df.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
