import os

os.environ['JAVA_HOME'] = '/usr/local/jdk8'
os.environ['SPARK_HOME'] = '/usr/local/spark2/'
os.environ['PYTHONPATH'] = '/usr/local/spark2/python:/usr/local/spark2/python/lib/py4j-0.10.7-src.zip:$PYTHONPATH'

import findspark

findspark.init()
import sys
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.functions import count, isnull
from pyspark.sql.types import IntegerType
import pyspark.sql.functions as psf
from pyspark.sql.types import *

# from mydb import MyDBConnection

sys.path.append('/usr/local/spark2/python/bin')  # 不加始终
if __name__ == "__main__":
    print(len(sys.argv))
    # print(sys.argv[0])
    # print(sys.argv[1])
    # if len(sys.argv) != 2:
    # print("app浏览数据分析",file=sys.stderr)
    # sys.exit(-1)
    # 使用sparksession 的api 构建sparksession对象
    # 如果不存在sparksession对象 则创建一个新的实例
    # 每个jvm只能由一个sparksession实例

    spark = (SparkSession).builder.appName("appVisitCount").getOrCreate()
    # app_file = sys.argv[1]
    #app_file = '/usr/ftp/datas/app数据rfm数据表2.csv'  # 单文件

    #path = "/user/hive/warehouse/lgtest.db/gamerecords/" #被hive load处理过后 数据在hive数据库文件位置
    path = "/usr/logs/gamerecords/20240721/" #没有被hive load，文件在flume采集后的文件位置,时间可以根据需要用python计算
    fs = spark._jvm.org.apache.hadoop.fs.FileSystem.get(spark._jsc.hadoopConfiguration())
    list_status = fs.listStatus(spark._jvm.org.apache.hadoop.fs.Path(path))
    app_files = [path + file.getPath().getName() for file in list_status if file.getPath().getName().find(".log") > 0]

    # 将csv格式的文件读入spark dataFrame
    # 指定读取数据时自动推断表结构，并声明文件包含表头
    # app_df = (spark.read.format("csv")).option("header", "true").option("inferSchema", "true").load(app_file)
    #---------------------分区表可用-----------------------#
    #basePath = "/usr/logs/gamerecords/20240721/" #
    #app_df = (spark.read.format("csv")).option("header", "false").option("inferSchema", "true").option("basePath", basePath)

    data = [("", "", "", "","","","")]
    schema = StructType([
        StructField("git_id", StringType(), True),
        StructField("git_name", StringType(), True),
        StructField("p_id", StringType(), True),
        StructField("t_id", StringType(), True),
        StructField("git_time", StringType(), True),
        StructField("t_name", StringType(), True),
        StructField("p_name", StringType(), True),
    ])
    #app_df = spark.createDataFrame(data, schema=['git_id', 'git_name', 'p_id', 't_id', 'git_time', 't_name', 'p_name'])
    app_df = spark.createDataFrame(data, schema=schema)

    # # stringCSVRDD = spark.sparkContext.parallelize([
    # #     ("tet", "Katie", 19, "brown")
    # # ])
    # # 通过定义好的dataframe的schema来创建空dataframe
    # app_df = spark.createDataFrame(spark.sparkContext.emptyRDD(), schema,)

    # ---------------------分区表可用结束-----------------------#



    # 读取HDFS上的CSV文件
    #df = spark.read.csv("/usr/logs/gamerecords/20240721/*.log")
    for app_file in app_files:
        print(app_file)
        #app_df_new = (spark.read.format("csv")).option("header", "true").option("delimiter","\t").option("inferSchema", "true").load(app_file,schema=schema)
        app_df_new = spark.sparkContext.textFile(app_file)

        app_df.unionAll(app_df_new)
        # 读取列表
    #     # 我们使用高层dataframe api
    #     print(type(app_df))
    #     print(len(app_df))


        # 1.选出列 groupby 分组 4 order by 降序
        #app_df = app_df.withColumn("count", app_df["count"].cast(IntegerType())) #转整数用
    print(app_df)

    count_app_df = (app_df.select("git_id", "git_name", "p_id", "t_id", "git_time", "t_name", "p_name").where(app_df['p_name'].isNotNull())
                     .groupBy("p_name")
                    .agg(
        psf.count("p_name").alias("countno")
    ).orderBy("countno", ascending=False)

                     )
    # count_app_def.show()
    # count_app_def.show(n=60, truncate=False)  # show 是行动操作，它会触发上面的查询 #n 默认显示多少行
    print("total rows = %d" % (count_app_df.count()))
    print(count_app_df)
    pandasdf = count_app_df.toPandas()
    index_list = [i + 1 for i in list(range(pandasdf.shape[0]))]
    pandasdf.insert(loc=0, column="id", value=index_list, allow_duplicates=False)
    print(pandasdf)
    # myDBConnection = MyDBConnection()
    # myDBConnection.save_db(tb_name='count_result',df=pandasdf)
    spark.stop()