#coding:utf8

import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StringType, IntegerType

if __name__ == '__main__':
    # 构建SparkSession执行环境入口对象
    spark = SparkSession.builder.\
        appName("test_parque").\
        master("local[*]").\
        getOrCreate()

    df = spark.read.format("csv").\
        schema('id INT, name STRING, score INT').\
        option('sep', ',').\
        load("../data/input/sql/stu_score.txt")

    # 获取column对象
    column_id = df['id']
    column_name = df['name']

    print(column_id)
    print(column_name)

    # 筛选字段的方式:以下三种传参方式都是可以的
    df.select(['id','name']).show() #通过list定义筛选字段
    df.select('id','name').show()   #通过可变惨定义筛选字段
    df.select(column_id,column_name).show() #通过column传参

    #filter API
    df.filter(df['score']<99).show()
    df.filter("score < 99").show()

    # group gy:返回后不是DataFrame类型,而是一个提供了聚合方法的类
    df.groupBy('name').count().show()
    df.groupBy(df['name']).count().show()
    print(type(df.groupBy(df['name'])))

