{
 "cells": [
  {
   "cell_type": "raw",
   "id": "034c5932-139f-48ee-ad75-15fe2f6848c2",
   "metadata": {},
   "source": [
    "Spark中的数据类型：\n",
    "    RDD:数据没有结构，就是一个算子，默认情况下spark使用的数据都是RDD类型，当做数据挖掘的时候应该要把RDD转成spark的df类型方便操作,借助RDD主要实现分布式\n",
    "    DataFrame:类似于二维表的，结构化数据类型\n",
    "    DataSet：不常用"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f02b3a83-a07d-4f09-b84f-b8f1e11e4867",
   "metadata": {},
   "source": [
    "1.初始化环境"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "e57e331f-cb55-4dbb-b7e2-a3ce03056fcf",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "24/06/04 14:07:21 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n",
      "Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties\n",
      "Setting default log level to \"WARN\".\n",
      "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n"
     ]
    }
   ],
   "source": [
    "from pyspark.context import SparkContext\n",
    "from pyspark.sql import SparkSession\n",
    "sc = SparkContext('local','test')\n",
    "spark = SparkSession(sc)\n",
    "# spark.stop   #停止spark环境，如果不停止去重复运行这段代码会报错：有一个spark环境在运行了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "58bf6bd6-0c79-46bb-89e3-50fd199ab413",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from pyspark.mllib.stat import Statistics  #统计分析  主要看RDD的一些统计描述，像看均值等"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "304e3a65-42a4-4aaa-97dc-4609415ae198",
   "metadata": {},
   "outputs": [],
   "source": [
    "observations = sc.parallelize(   #observations是一个RDD类型的\n",
    "[(1.0,10.0,100.0),\n",
    " (2.0,20.0,200.0),\n",
    " (3.0,30.0,300.0)   \n",
    "])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "eb470194-4799-4e67-b0da-b06f424ef709",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "                                                                                \r"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[(1.0, 10.0, 100.0)]"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = observations.take(1)  #查看第几行\n",
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "5030aa26-b4f3-4695-95a3-e5d4af0b15eb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([  2.,  20., 200.])"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#利用pyspark.mllib.stat import Statistics库中的统计分析进行rdd的操作   一般我们最好把rdd变成pandas中的dataframe去操作\n",
    "summary = Statistics.colStats(observations)   #按列去统计\n",
    "summary.count() #3行数据\n",
    "summary.max()  #每一列的最大值\n",
    "summary.mean()  #每一列的均值"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5364e767-4a8e-429e-9876-ef7cf078e3aa",
   "metadata": {},
   "source": [
    "2.rdd与dataframe之间的转换操作"
   ]
  },
  {
   "cell_type": "raw",
   "id": "e507e676-ca39-4007-ae8a-bbf5ec295f2b",
   "metadata": {},
   "source": [
    "1.转换\n",
    "    rdd-->dataframe(pyspak)\n",
    "    rdd-->dataframe(pandas)\n",
    "    dataframe(pyspak)-->dataframe(pandas)\n",
    "2.行列操作\n",
    "    1.读取某列数据\n",
    "    2.读取某行数据（索引）\n",
    "3.pyspark读取数据\n",
    "    1.spark.read.csv('./car_data.csv',header=True) 读出来直接是pyspak的dataframe类型\n",
    "    2.sc.textFile('./car_data.csv')  这个读出来是一个RDD\n",
    "4.数据描述\n",
    "    1.RDD和dataframe的数据描述。最小值、最大值、均值等"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "adf404bb-3c1f-42c7-b809-5ec6874c0515",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+----+-----+\n",
      "|  a|   b|    c|\n",
      "+---+----+-----+\n",
      "|1.0|10.0|100.0|\n",
      "|2.0|20.0|200.0|\n",
      "|3.0|30.0|300.0|\n",
      "+---+----+-----+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# rdd-->dataframe(pyspak)   pyspark的dataframe操作有限，想办法转成pandas的dataframe\n",
    "df_ps = observations.toDF(schema=['a','b','c'])  #schema列名\n",
    "df_ps.show()  #pyspark中的dataframe显示的时候没有索引"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "75ef51fb-66c6-4a60-a181-c77c4b33aed7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Row(a=1.0, b=10.0, c=100.0)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_ps.take(2)[0]   #查看数据前几行，这个是有返回值的，返回一个列表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3c4c8602-bb26-4889-90b5-cdeaed642326",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[2.0, 20.0, 200.0]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 在pyspark中的pandas取值非常麻烦\n",
    "# 按行处理\n",
    "df_ps_a = df_ps.select('a').collect()   #返回一个[Row(a=1.0), Row(a=2.0), Row(a=3.0)]\n",
    "df_ps_a_1 = [col[0] for col in df_ps_a]   #取出a列中的值\n",
    "df_ps_a_1\n",
    "#按列处理\n",
    "df_ps_r = [ row for row in df_ps.take(5)[1] ]  #查看前n行，有返回值,并把值变成列表\n",
    "df_ps_r"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "aefe8848-98af-456e-abac-91c6198433ba",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>a</th>\n",
       "      <th>b</th>\n",
       "      <th>c</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1.0</td>\n",
       "      <td>10.0</td>\n",
       "      <td>100.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2.0</td>\n",
       "      <td>20.0</td>\n",
       "      <td>200.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>3.0</td>\n",
       "      <td>30.0</td>\n",
       "      <td>300.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "     a     b      c\n",
       "0  1.0  10.0  100.0\n",
       "1  2.0  20.0  200.0\n",
       "2  3.0  30.0  300.0"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# rdd-->dataframe(pandas)\n",
    "df_pd = df_ps.toPandas()\n",
    "df_pd.head()  #pandas的dataframe就有索引"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "597711f4-ffe9-455d-a74d-f27d5cbfff43",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1.0"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_pd_a = df_pd['a']\n",
    "df_pd_a.values[0]  #按列取值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "47790306-fd8f-4cee-8523-79854b6802db",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<PandasArray>\n",
       "[1.0, 2.0, 3.0]\n",
       "Length: 3, dtype: float64"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_pd1 = df_pd.iloc[0]   #选择第0行   返回pandas.core.series.Series类型的数据\n",
    "# df_pd1[0] #取到第0行第一列数据   返回int类型\n",
    "df_pd.iloc[0,0] #取到第0行第一列数据\n",
    "df_pd.iloc[0:2,0] #取到第0行第一列数据   返回类型pandas.core.series.Series\n",
    "df_pd.iloc[0:2,0:2]  #返回一个pandas.core.frame.DataFrame   取0-2行，0-2列\n",
    "df_pd.iloc[[0,1]]['a']  #返回一个pandas.core.frame.DataFrame  取0-1行的a列\n",
    "df_pd.loc[0:2,'a'] #取到第0-2行第一列数据\n",
    "df_pd.loc[0:2,'a'].values #取到第0-2行第一列所有数据\n",
    "a = df_pd.loc[0:2,'a'].array #取到第0-2行第一列所有数据\n",
    "a "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cad0ce9d-31f2-48b0-8166-4a727e793563",
   "metadata": {},
   "source": [
    "3.spark读取csv文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "1ae10d75-0519-4f1c-a86b-35dd4ecbf6d8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[Row(User ID='385', Gender='Male', Age='35', AnnualSalary='20000', Purchased='0'),\n",
       " Row(User ID='681', Gender='Male', Age='40', AnnualSalary='43500', Purchased='0')]"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# spark.read.csv读出来直接是dataframe类型的\n",
    "# sc.textFile('./car_data.csv')  这个读出来是一个RDD\n",
    "df_s = spark.read.csv('./car_data.csv',header=True)    #header是否以第一行作为表头\n",
    "df_s.head(5)\n",
    "df_s.head(5)[0] #读取出来是一个pyspark.sql.types.Row\n",
    "# df_s.take(5) #这个也是拿前几行\n",
    "# 要拿里面的值\n",
    "df_s.collect()[0:2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "e7ceaec7-740b-4c25-957a-fb20a7f7c016",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-------+------+---+------------+---------+\n",
      "|User ID|Gender|Age|AnnualSalary|Purchased|\n",
      "+-------+------+---+------------+---------+\n",
      "|    385|  Male| 35|       20000|        0|\n",
      "|    681|  Male| 40|       43500|        0|\n",
      "|    353|  Male| 49|       74000|        0|\n",
      "|    895|  Male| 40|      107500|        1|\n",
      "|    661|  Male| 25|       79000|        0|\n",
      "|    846|Female| 47|       33500|        1|\n",
      "|    219|Female| 46|      132500|        1|\n",
      "|    588|  Male| 42|       64000|        0|\n",
      "|     85|Female| 30|       84500|        2|\n",
      "|    465|  Male| 41|       52000|        2|\n",
      "|    686|  Male| 42|       80000|        2|\n",
      "|    408|  Male| 47|       23000|        1|\n",
      "|    790|Female| 32|       72500|        0|\n",
      "|    116|Female| 27|       57000|        0|\n",
      "|    118|Female| 42|      108000|        1|\n",
      "|     54|Female| 33|      149000|        1|\n",
      "|     90|  Male| 35|       75000|        0|\n",
      "|    372|  Male| 35|       53000|        0|\n",
      "|    926|  Male| 46|       79000|        1|\n",
      "|     94|Female| 39|      134000|        1|\n",
      "+-------+------+---+------------+---------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "df_s.show()   #用show才能显示未dataframe形式的，用head()只能以列表形式  pyspark的dataframe没有索引"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "368a9513-1350-4b0f-afee-fb1ad9e09063",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "pyspark.sql.dataframe.DataFrame"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "type(df_s)   #pyspark.sql.dataframe.DataFrame"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "13c0531a-8de0-44eb-9c68-cd07be1d4713",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-------+-----------------+------+------------------+-----------------+------------------+\n",
      "|summary|          User ID|Gender|               Age|     AnnualSalary|         Purchased|\n",
      "+-------+-----------------+------+------------------+-----------------+------------------+\n",
      "|  count|             1000|  1000|              1000|             1000|              1000|\n",
      "|   mean|            500.5|  null|            40.106|          72689.0|             0.521|\n",
      "| stddev|288.8194360957494|  null|10.707072681429098|34488.34186685011|0.6276567158510716|\n",
      "|    min|                1|Female|                18|           100000|                 0|\n",
      "|    max|              999|  Male|                63|            99500|                 2|\n",
      "+-------+-----------------+------+------------------+-----------------+------------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "df_s.describe().show()    # 和原生的pandas的dataFrame操作类似"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "765caa8f-595d-41d8-8633-177f6e699b95",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[3. 3. 3.]\n"
     ]
    }
   ],
   "source": [
    "summary = Statistics.colStats(observations)   #传一个RDD\n",
    "print(summary.numNonzeros())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5d457e14-47e3-4898-8a3d-00ae43eecf60",
   "metadata": {},
   "source": [
    "4.相关性系数，一般我们把RDD或者pyspark中的dataframe转成pandas中的dataframe去处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "3dad284d-adf4-45e4-a9ba-214af97c2940",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>a</th>\n",
       "      <th>b</th>\n",
       "      <th>c</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>a</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>b</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>c</th>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "     a    b    c\n",
       "a  1.0  1.0  1.0\n",
       "b  1.0  1.0  1.0\n",
       "c  1.0  1.0  1.0"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cor = df_pd.corr()\n",
    "cor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "644bfddf-ff0f-4108-bee9-e70d0854ec88",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1.0"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#pyspark中求相关性系数\n",
    "from pyspark.mllib.stat import Statistics\n",
    "# seriesX = sc.parallelize([1,2,3,3,5])   #RDD类型\n",
    "# seriesY = sc.parallelize([11,22,33,33,555]) #RDD类型\n",
    "# pandas中的dataframe转成rdd:\n",
    "seriesX = sc.parallelize(df_pd['a'].values)   #RDD类型\n",
    "seriesY = sc.parallelize(df_pd['b'].values) #RDD类型\n",
    "Statistics.corr(x=seriesX,y=seriesY)    #x,y必须是rdd类型，所以在用pyspark的相关性系数时要把数据转为RDD\n",
    "# pysaprk中的dataframe转成rdd:\n",
    "# df_ps.rdd\n",
    "a = [col[0] for col in df_ps.select('a').collect()]\n",
    "b = [col[0] for col in df_ps.select('b').collect()]\n",
    "seriesX = sc.parallelize(a)   #RDD类型\n",
    "seriesY = sc.parallelize(b) #RDD类型\n",
    "Statistics.corr(x=seriesX,y=seriesY)    #x,y必须是rdd类型，所以在用pyspark的相关性系数时要把数据转为RDD"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "17c9a426-0220-41e2-8bab-db7f7a3ec2b2",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
