# -*- coding: utf-8 -*-
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DateType, FloatType
def big_data_processing():
    sc = SparkSession.builder.config(conf=SparkConf()).getOrCreate()

    # 定义 schema
    schema = StructType([
        StructField("video_id", StringType(), True),
        StructField("nickname", StringType(), True),
        StructField("time", DateType(), True),
        StructField("ip", StringType(), True),
        StructField("like", IntegerType(), True),
        StructField("text", StringType(), True),
        StructField("label", FloatType(), True)
    ])

    data = sc.read.option("header", "true") \
        .schema(schema) \
        .csv('hdfs://master:9000/demo_input.csv', header=True)
    data.createTempView("count_provinces_table")
    x=sc.sql("SELECT ip,SUM(label) AS y1,COUNT(*)-SUM(label) AS y0,COUNT(ip) AS y12 FROM count_provinces_table GROUP BY ip having ip is not null ORDER BY y12 DESC").withColumnRenamed("y1", "count_1").withColumnRenamed("y0", "count_0").withColumnRenamed("y12", "count")
    x.coalesce(1).write.mode('overwrite').option('sep', ',').option('header', True).csv('hdfs://master:9000/out1',header=True)
    x2 = sc.sql("SELECT time AS date,SUM(label) as t2,COUNT(*)-SUM(label) AS t3,COUNT(*) as t4 FROM count_provinces_table GROUP BY date ORDER BY date DESC").withColumnRenamed("t2", "count_1").withColumnRenamed("t3", "count_0").withColumnRenamed("t4", "count")
    x2.coalesce(1).write.mode('overwrite').option('sep', ',').option('header', True).csv('hdfs://master:9000/out2',header=True)
    sc.stop()


big_data_processing()