import os

os.environ['JAVA_HOME'] = '/usr/local/jdk8'
os.environ['SPARK_HOME'] = '/usr/local/spark2/'
os.environ['PYTHONPATH'] = '/usr/local/spark2/python:/usr/local/spark2/python/lib/py4j-0.10.7-src.zip:$PYTHONPATH'

import findspark

findspark.init()
import sys
import pandas as pd

import pyspark
from pyspark.sql import SparkSession
from pyspark import SparkContext
from db.mydb import MyDBConnection
# from mydb import MyDBConnection

sys.path.append('/usr/local/spark2/python/bin')  # 不加始终
if __name__ == "__main__":
    print(len(sys.argv))
    # print(sys.argv[0])
    # print(sys.argv[1])
    # if len(sys.argv) != 2:
    # print("app浏览数据分析",file=sys.stderr)
    # sys.exit(-1)
    # 使用sparksession 的api 构建sparksession对象
    # 如果不存在sparksession对象 则创建一个新的实例
    # 每个jvm只能由一个sparksession实例

    spark = (SparkSession).builder.appName("appVisitCount").getOrCreate() #创建sparksession会话
    path = "/usr/logs/jobs_companies/20240728/" #没有被hive load，文件在flume采集后的文件位置,时间可以根据需要用python计算
    fs = spark._jvm.org.apache.hadoop.fs.FileSystem.get(spark._jsc.hadoopConfiguration())
    list_status = fs.listStatus(spark._jvm.org.apache.hadoop.fs.Path(path)) #列出目录中全部文件路径
    app_files = [path + file.getPath().getName() for file in list_status if file.getPath().getName().find(".log") > 0]

# app_df = spark.createDataFrame(data, schema=['git_id', 'git_name', 'p_id', 't_id', 'git_time', 't_name', 'p_name'])
    sc = spark.sparkContext
    final_list = []
    for app_file in app_files:
        print(app_file)

        # 读取文件生成RDD
        input_rdd = sc.textFile(app_file)

        # 假设每行数据格式为 "<group_id>,<value>"
        # 002,002,前端开发,小米企业,新疆维吾尔自治区
        parsed_rdd = input_rdd.map(lambda line: (line.split(',')[1], line.split(',')[0],line.split(',')[2]))#需要计算的字段组成map
        # 清洗非空数据
        cleaned_data = parsed_rdd.filter(lambda x: x[0] is not None  and x[2] is not None and x[0] and x[1])
        # # 分组统计
        # rdd2 = cleaned_data.flatMap(lambda element: element.split("\n"))  #展平
        result_data = cleaned_data.map(lambda x: (str(x[2]), x[0]))
        print(result_data)
        grouped_rdd = result_data.reduceByKey(lambda x, y: x + "," + y)  # 流式追加出现选项信息， ，号分隔
        # grouped_rdd = parsed_rdd.reduceByKey(lambda x, y: (x + "," + y))
        # 收集结果并打印
        result = grouped_rdd.collect()  # 单文件手机
        print(result)
        print("_________________________________________________________________-")
        #final_list.append(result) #append 变成子集 需要 关联
        #final_list = final_list+result
        # for group_name, sum_value in result:
        #     #print(f"球场事件: {group_name}, 球员: {sum_value}")
        #     print(f"球场事件: {group_name}, 球员: {len(sum_value.split(','))}")
        process_list = [(group_name,len(sum_value.split(','))) for group_name, sum_value in result] #列表推导处理值
        #final_list = final_list + result #要原始值
        final_list = final_list + process_list #要统计值
    print(final_list)
    print("7777777777777777777777777777777777777")
    df = pd.DataFrame(final_list, columns=["item", "countno"],index=range(len(final_list))) #指定索引为其他，
    new_df = df.groupby('item',as_index=False).sum() #as_index 不指定默认分组字段为索引，因为是索引就不是数据就不能插入数据库
    cr_no_list = range(1,len(new_df)+1)#根据行数，生成索引号
    new_df.insert(loc=0, column='cr_no', value=cr_no_list) #添加索引号 新增一列
    new_df.insert(loc=3, column='type', value="jobsByCompanyCount") #添加索引号 新增一列


    print(new_df)
    myDBConnection = MyDBConnection()
    myDBConnection.save_db(tb_name='count_result',df=new_df)
    # GroupBy：分割、应用和组合
    spark.stop()