#!/usr/bin/env python
# -*- coding:utf-8 -*-

""" 
:Description: 创建复杂结构的DataFrame
:Owner: leo_jie
:Create time: 2019/12/5
"""

from pyspark.sql import SparkSession
from pyspark.sql.types import *

spark = SparkSession.builder.master("local") \
    .appName("leo_pyspark_learning") \
    .getOrCreate()

data = [('leo', u'北京', 23),
        ('leo2', u'上海', 24)]

df = spark.createDataFrame(data, ['name', 'address', 'age'])
"""
df.show()
+----+-------+---+
|name|address|age|
+----+-------+---+
| leo|   北京| 23|
|leo2|   上海| 24|
+----+-------+---+
"""

# 创建普通结构的DataFrame应该非常简单，以下将分别演示复杂结构如：array、map、struct

# array

data = [('leo', [u'北京', u'上海']),
        ('leo2', [u'南京', u'上海'])]

df = spark.createDataFrame(data, ['name', 'addresses'])

"""
df.show()
+----+------------+
|name|   addresses|
+----+------------+
| leo|[北京, 上海]|
|leo2|[南京, 上海]|
+----+------------+
"""

"""
df.printSchema()
root
 |-- name: string (nullable = true)
 |-- addresses: array (nullable = true)
 |    |-- element: string (containsNull = true)
"""

# 从RDD来创建DataFrame
data_rdd = spark.sparkContext.parallelize(data)
schema = StructType([StructField("name", StringType(), nullable=True),
                     StructField("addresses", ArrayType(StringType()), nullable=True)])

df = spark.createDataFrame(data_rdd, schema)
df.select("name", df['addresses'][0], df['addresses'][1])

"""
df.show()
+----+------------+
|name|   addresses|
+----+------------+
| leo|[北京, 上海]|
|leo2|[南京, 上海]|
+----+------------+

df.select("name", df['addresses'][0], df['addresses'][1]).show()
+----+------------+------------+
|name|addresses[0]|addresses[1]|
+----+------------+------------+
| leo|        北京|        上海|
|leo2|        南京|        上海|
+----+------------+------------+

"""

# map

data = [('leo', {'address': 'sh', 'age': '23'}),
        ('leo2', {'address': 'nj', 'age': '22'})]

df = spark.createDataFrame(data, ['name', 'info'])

df.select("name", df['info']['address'], 'info.age')

# 同样，下面的方法也是可以的

data_rdd = spark.sparkContext.parallelize(data)
schema = StructType([StructField("name", StringType(), nullable=True),
                     StructField("info", MapType(StringType(), StringType()), nullable=True)])

df = spark.createDataFrame(data_rdd, schema)

"""
df.show()
+----+--------------------+
|name|                info|
+----+--------------------+
| leo|[age -> 23, addre...|
|leo2|[age -> 22, addre...|
+----+--------------------+
"""

"""
df.select("name", df['info']['address'], 'info.age').show()
+----+-------------+---+
|name|info[address]|age|
+----+-------------+---+
| leo|           sh| 23|
|leo2|           nj| 22|
+----+-------------+---+
"""

# struct type 结构

data = [(1, 'leo', ('sh', 23)),
        (2, 'leo2', ('nj', 24))]

df = spark.createDataFrame(data, ['id', 'name', 'info'])

"""
df.show()
+---+----+--------+
| id|name|    info|
+---+----+--------+
|  1| leo|[sh, 23]|
|  2|leo2|[nj, 24]|
+---+----+--------+
"""

# df.select('name', df['info'][0], df['info'][1]).show()
"""
这样直接查是会抛异常的，因为不是array类型，因此 无法用索引查询数据
df.select('name', df['info'][0], df['info'][1]).show()
pyspark.sql.utils.AnalysisException: u"Field name should be String Literal, but it's 0;"
"""

"""
df.printSchema()
root
 |-- id: long (nullable = true)
 |-- name: string (nullable = true)
 |-- info: struct (nullable = true)
 |    |-- _1: string (nullable = true)
 |    |-- _2: long (nullable = true)
"""

"""
df.select('name', df['info']['_1'], df['info']['_2']).show()
+----+-------+-------+
|name|info._1|info._2|
+----+-------+-------+
| leo|     sh|     23|
|leo2|     nj|     24|
+----+-------+-------+
"""
data = [(1, 'leo', ('sh', 23)),
        (2, 'leo2', ('nj', 24))]

# 这样简单创建struct type DataFrame的方式，好像不好直接指定字段名，下面演示创建schema的方式

data_rdd = spark.sparkContext.parallelize(data)
schema = StructType([StructField('id', IntegerType()),
                     StructField('name', StringType()),
                     StructField('info', StructType([
                         StructField('address', StringType(), nullable=True),
                         StructField('age', IntegerType(), nullable=True)
                     ]), nullable=True)])
df = spark.createDataFrame(data_rdd, schema)

"""
df.printSchema()

root
 |-- id: integer (nullable = true)
 |-- name: string (nullable = true)
 |-- info: struct (nullable = true)
 |    |-- address: string (nullable = true)
 |    |-- age: integer (nullable = true)
"""

"""
df.show()
+---+----+--------+
| id|name|    info|
+---+----+--------+
|  1| leo|[sh, 23]|
|  2|leo2|[nj, 24]|
+---+----+--------+
"""

"""
df.select('name', 'info.address', 'info.age').show()
+----+-------+---+
|name|address|age|
+----+-------+---+
| leo|     sh| 23|
|leo2|     nj| 24|
+----+-------+---+
"""
# 以上便是pyspark中复杂类型DataFrame的创建和使用


spark.stop()
