import os
os.environ['JAVA_HOME'] = '/usr/local/jdk8'
os.environ['SPARK_HOME'] = '/usr/local/spark2/'
os.environ['PYTHONPATH'] = '/usr/local/spark2/python:/usr/local/spark2/python/lib/py4j-0.10.7-src.zip:$PYTHONPATH'

import findspark
findspark.init()
import sys
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.functions import count,isnull
from pyspark.sql.types import IntegerType
import pyspark.sql.functions as psf
from mydb import MyDBConnection

sys.path.append('/usr/local/spark2/python/bin') #不加始终
if __name__ == "__main__":
   print(len(sys.argv))
   #print(sys.argv[0])
   #print(sys.argv[1])
   #if len(sys.argv) != 2:
       #print("app浏览数据分析",file=sys.stderr)
       #sys.exit(-1)
       # 使用sparksession 的api 构建sparksession对象
       # 如果不存在sparksession对象 则创建一个新的实例
       # 每个jvm只能由一个sparksession实例

   spark = (SparkSession).builder.appName("appVisitCount").getOrCreate()
   #app_file = sys.argv[1]
   app_file = '/usr/ftp/Kobe_data.csv' #单文件

   path = "/usr/ftp/datas/"
   fs = spark._jvm.org.apache.hadoop.fs.FileSystem.get(spark._jsc.hadoopConfiguration())
   list_status = fs.listStatus(spark._jvm.org.apache.hadoop.fs.Path(path))
   app_files = [path+file.getPath().getName() for file in list_status if file.getPath().getName().find(".csv")>0]

   # 将csv格式的文件读入spark dataFrame
   # 指定读取数据时自动推断表结构，并声明文件包含表头
   #app_df = (spark.read.format("csv")).option("header", "true").option("inferSchema", "true").load(app_file)
   app_df = (spark.read.format("csv")).option("header", "true").option("inferSchema", "true").load(app_file) #读取列表
   # 我们使用高层dataframe api

   # 1.选出列 groupby 分组 4 order by 降序
  # app_df = app_df.withColumn("count", app_df["count"].cast(IntegerType())) #转整数用

   count_app_def = (app_df.select("combined_shot_type", "game_event_id").where(app_df['game_event_id'].isNotNull())
                    .groupBy("combined_shot_type").agg(
       psf.count("game_event_id").alias("counts")
       ).orderBy("counts", ascending=False)

      )
   #count_app_def.show()
   #count_app_def.show(n=60, truncate=False)  # show 是行动操作，它会触发上面的查询 #n 默认显示多少行
   print("total rows = %d" % (count_app_def.count()))
   print(count_app_def)
   pandasdf =  count_app_def.toPandas()
   index_list = [i+1 for i in list(range(pandasdf.shape[0]))]
   pandasdf.insert(loc=0,column="id", value=index_list,allow_duplicates=False)
   print(pandasdf)
   myDBConnection = MyDBConnection()
   myDBConnection.save_db(tb_name='count_result',df=pandasdf)
   spark.stop()