{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "e630e069",
   "metadata": {},
   "source": [
    "1. scaler = StandardScaler(inputCol=\"features\",outputCol=\"scaledFeatures\",withMean=True,withStd=True).fit(df) 不能分开写\n",
    "如下写法报错：AttributeError: 'StandardScaler' object has no attribute 'transform'\n",
    "scaler = StandardScaler(inputCol=\"features\",outputCol=\"scaledFeatures\",withMean=True,withStd=True)\n",
    "scaler.fit(df)\n",
    "\n",
    "df = va.transform(dataset).select('features') 记得select"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "8f10c84f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# pip install pyspark"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4fd22319",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.datasets import load_iris\n",
    "import os\n",
    "#os.environ['JAVA_HOME'] = \"D:\\Develop\\Java\\jdk1.8.0_241\" # 记得把地址改成自己的\n",
    "#os.environ['PYSPARK_PYTHON']=\"C:\\\\Users\\\\Spencer Cheung\\\\.conda\\\\envs\\\\py37\\\\python.exe\"#本机电脑所使用的python编译器的地址\n",
    "\n",
    "from pyspark.conf import SparkConf\n",
    "from pyspark.ml.feature import VectorAssembler,StandardScaler,PCA\n",
    "from pyspark.context import SparkContext\n",
    "from pyspark.sql import SparkSession\n",
    "\n",
    "sc = SparkContext('local','test')\n",
    "spark=SparkSession(sc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "142b9c1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "iris=load_iris()\n",
    "X=iris['data']\n",
    "y=iris['target']\n",
    "data=pd.DataFrame(X,columns=iris.feature_names)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "26702d17",
   "metadata": {},
   "source": [
    "1.将 Pandas 数据框 data 转换为Spark数据框dataset，并将其列名设置为iris 特征名称，使得 dataset 输出为:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "746c267c",
   "metadata": {},
   "outputs": [],
   "source": [
    "#由考生填写\n",
    "dataset = spark.createDataFrame(data=data)\n",
    "#由考生填写\n",
    "dataset.show(6)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1981e30b",
   "metadata": {},
   "source": [
    "2.使用 VectorAssembler 将 dataset 多列数据转化为单列的向量列'features',使得 df 输出为"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "db792870",
   "metadata": {},
   "outputs": [],
   "source": [
    "col =['sepal length (cm)','sepal width (cm)', 'petal length (cm)', 'petal width (cm)']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2b086d5f",
   "metadata": {},
   "outputs": [],
   "source": [
    "#由考生填写\n",
    "model_va = VectorAssembler(inputCols=col,outputCol=\"features\")\n",
    "df = model_va.transform(dataset).select(\"features\")\n",
    "#由考生填写\n",
    "df.show(6)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9f8a668b",
   "metadata": {},
   "source": [
    "3.创建一个 StandardScaler对象，命名为scaler，\n",
    "用来对特征列’features'进行标准化，\n",
    "将标准化后的结果输出到新的特征列’scaledFeatures’，\n",
    "withMean和withStd 分别为True，\n",
    "最后在 df 上进行 fit 操作。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "25b360db",
   "metadata": {},
   "outputs": [],
   "source": [
    "#由考生填写\n",
    "scaler = StandardScaler(inputCol='features',outputCol='scaledFeatures',withMean=True,withStd=True).fit(df)\n",
    "#由考生填写"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3a8d18fb",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_scaled =scaler.transform(df)\n",
    "df_scaled.show(6)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0f670e3d",
   "metadata": {},
   "outputs": [],
   "source": [
    "ncomponents=3\n",
    "pca=PCA(k=ncomponents,inputCol ='scaledFeatures', outputCol ='pcaFeatures').fit(df_scaled)\n",
    "df_pca=pca.transform(df_scaled)\n",
    "print ('Explained Variance Ratio',pca.explainedVariance.toArray())\n",
    "df_pca.show(6)\n",
    "df_pca.rdd.collect()\n",
    "list1 = df_pca.rdd.map(lambda x:(x[2][0],x[2][1],x[2][2])).collect()\n",
    "type(list1[1][1])\n",
    "df_origin = spark.createDataFrame([(float(tup[0]), float (tup[1]),float(tup[2]))for tup in list1],['x','y','z'])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b69915f1",
   "metadata": {},
   "source": [
    "4.将Spark数据框df_origin转换为Pandas数据框df_Pandas."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "45c4aa5d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#由考生填写\n",
    "df_Pandas = df_origin.toPandas()\n",
    "#由考生填写\n",
    "df_Pandas['id']= 'row'+df_Pandas.index.astype(str)\n",
    "cols = list(df_Pandas)\n",
    "cols.insert(0, cols.pop(cols.index('id')))\n",
    "df_Pandas = df_Pandas.loc[:, cols]\n",
    "df_Pandas.head()\n",
    "path='./input.csv'"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0e4ad6a5",
   "metadata": {},
   "source": [
    "5.将df_Pandas的数据保存到指定路径path下，不保存索引index。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c32f0e1d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#由考生填写\n",
    "df_Pandas.to_csv('data/input.csv',index=False)\n",
    "path = 'data/input.csv'\n",
    "#由考生填写\n",
    "df = spark.read.csv(path, header=True)\n",
    "df.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PySpark-2.4.5",
   "language": "python",
   "name": "pyspark-2.4.5"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
