from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql import Row
from datetime import datetime
import pyspark.sql.functions as func
#将日期字符串转为标准格式
def toDate(inputStr):
        newStr=""
        #日期一位数
        if len(inputStr)==8:
                s1=inputStr[0:4]
                s2=inputStr[5:6]
                s3=inputStr[7]
                newStr=s1+"-"+"0"+s2+"-"+"0"+s3
        #日期二位数
        else:
                s1 = inputStr[0:4]
                s2 = inputStr[5:6]
                s3 = inputStr[7:]
                newStr = s1 + "-" + "0" + s2 + "-" + s3
        date=datetime.strptime(newStr,"%Y-%m-%d")
        return date


#创建sparksession
spark=SparkSession.builder.config(conf=SparkConf()).getOrCreate()
#生成表头
fields=[StructField("date",DateType(),False),
        StructField("county",StringType(),False),
        StructField("state",StringType(),False),
        StructField("cases",IntegerType(),False),
        StructField("deaths",IntegerType(),False)]
schema=StructType(fields)

#生成表中记录
rdd0=spark.sparkContext.textFile("/tmp/us-counties.txt")
rdd1=rdd0.map(lambda x:x.split("\t")).map(lambda p:Row(toDate(p[0]),p[1],p[2],int(p[3]),int(p[4])))
#拼接
schemaUsCovid=spark.createDataFrame(rdd1,schema)
#注册临时表
schemaUsCovid.createOrReplaceTempView("usInfo")


#1.计算每日累计确诊病例数和死亡数
df=schemaUsCovid.groupBy("date").agg(func.sum("cases"),func.sum("deaths")).sort(schemaUsCovid["date"].asc())    #agg为聚合（其操作包括max、min、std、sum、count）
df1=df.withColumnRenamed("sum(cases)","cases").withColumnRenamed("sum(deaths)","deaths")        #列重命名
df1.repartition(1).write.json("/tmp/us/result1.json")      #写入hdfs
df1.createOrReplaceTempView("ustotal")  #临时表

#2.计算每日较昨日新增确诊病例数和死亡数      自连接，新增数=今日-昨日 连接条件：t1.date=t2.date+1
df2=spark.sql("select t1.date,t1.cases-t2.cases as caseIncrease,t1.deaths-t2.deaths as deathIncrease from ustotal t1,ustotal t2 where t1.date=date_add(t2.date,1)")
df2.sort(df2["date"].asc()).repartition(1).write.json("/tmp/us/result2.json")

#3.统计截止9.9日 美国各州累计确诊人数和死亡人数   病死率=死亡率/确诊率
    #筛选出9.9日的数据
    #sql中round 函数用于把数值字段舍入为指定的小数位数 to_date 转换为日期
df3=spark.sql("select date,state,sum(cases) as sumCases,sum(deaths) as sumDeaths,round(sum(deaths)/sum(cases),4) as deathRate from usInfo where date=to_date('2020-09-09','yyyy-MM-dd') group by date,state")
df3.sort(df3["sumCases"].desc()).repartition(1).write.json("/tmp/us/result3.json")
df3.createOrReplaceTempView("eachStateInfo")

#4.找出美国确诊最多的10个州
df4=spark.sql("select state,sumCases from eachStateInfo order by sumCases desc limit 10")
df4.repartition(1).write.json("/tmp/us/result4.json")

#5.找出美国死亡最多的10个州
df5=spark.sql("select state,sumDeaths from eachStateInfo order by sumDeaths desc limit 10")
df5.repartition(1).write.json("/tmp/us/result5.json")

#6.找出美国确诊最少的10个州
df6=spark.sql("select state,sumCases from eachStateInfo order by sumCases asc limit 10")
df6.repartition(1).write.json("/tmp/us/result6.json")
#7.找出美国死亡最少的10个州
df7=spark.sql("select state,sumDeaths from eachStateInfo order by sumDeaths asc limit 10")
df7.repartition(1).write.json("/tmp/us/result7.json")
#8.统计截止9,9全美和各州病死率
df8=spark.sql("select 1 as sign,date,'USA' as state,round(sum(sumDeaths)/sum(sumCases),4) as deathRate from eachStateInfo group by date union select 2 as sign,date,state,deathRate from eachStateInfo").cache()
df8.sort(df8["sign"].asc(),df8["deathRate"].desc()).repartition(1).write.json("/tmp/us/result8.json")

