import os

os.environ['JAVA_HOME'] = '/usr/local/jdk8'
os.environ['SPARK_HOME'] = '/usr/local/spark2/'
os.environ['PYTHONPATH'] = '/usr/local/spark2/python:/usr/local/spark2/python/lib/py4j-0.10.7-src.zip:$PYTHONPATH'

import findspark,time

findspark.init()
import sys
import pandas as pd

import pyspark
from pyspark.sql import SparkSession
from pyspark import SparkContext
from db.mydb import MyDBConnection
# from mydb import MyDBConnection
from myspark.step.recommend import *

sys.path.append('/usr/local/spark2/python/bin')  # 不加始终
if __name__ == "__main__":
    print(len(sys.argv))
    # print(sys.argv[0])
    # print(sys.argv[1])
    # if len(sys.argv) != 2:
    # print("app浏览数据分析",file=sys.stderr)
    # sys.exit(-1)
    # 使用sparksession 的api 构建sparksession对象
    # 如果不存在sparksession对象 则创建一个新的实例
    # 每个jvm只能由一个sparksession实例

    spark = (SparkSession).builder.appName("appVisitCount").getOrCreate() #创建sparksession会话
    path = "/usr/logs/gameevents/20240725/" #没有被hive load，文件在flume采集后的文件位置,时间可以根据需要用python计算
    fs = spark._jvm.org.apache.hadoop.fs.FileSystem.get(spark._jsc.hadoopConfiguration())
    list_status = fs.listStatus(spark._jvm.org.apache.hadoop.fs.Path(path)) #列出目录中全部文件路径
    app_files = [path + file.getPath().getName() for file in list_status if file.getPath().getName().find(".log") > 0]

# app_df = spark.createDataFrame(data, schema=['git_id', 'git_name', 'p_id', 't_id', 'git_time', 't_name', 'p_name'])
    sc = spark.sparkContext
    final_list = []
    for app_file in app_files:
        #print(app_file)

        # 读取文件生成RDD
        input_rdd = sc.textFile(app_file)

        # 假设每行数据格式为 "<group_id>,<value>"
        #    0                                  1    2    3        4               5        6   7    8   9
        #4765f230-f0e6-4c5d-81cc-a411c4832f47,过人,0007,0006,2024-07-25 19:40:02,国际米兰,武磊,451,后腰  德甲

        # 字段编号1为 比赛事件项 字段编号2为球员编号 字段编号6为球员姓名，字段编号6为球员姓名
        parsed_rdd = input_rdd.map(lambda line: (line.split(',')[2]+':'+line.split(',')[6],line.split(',')[1],line.split(',')[7], line.split(',')[9]))#需要计算的字段组成map

        # 清洗非空数据
        cleaned_data = parsed_rdd.filter(lambda x: x[0] is not None and x[1] is not None and x[1] =='进球' and x[2] =='442'  and x[0] and x[1])
        result_data = cleaned_data.map(lambda x: (x[0], str(x[3])))
        result = result_data.collect()
        # result = result_data. #单文件手机
        # process_list = [(group_name,len(sum_value.split(','))) for group_name, sum_value in result_data] #列表推导处理值
        final_list = final_list + result #要原始值
        # final_list = final_list + process_list #要统计值
    print(final_list)
    df = pd.DataFrame(final_list, columns=["item", "league"]) #指定索引为其他，
    spark.stop()
    train,test = LoadGameEventData(df,0.8)
    print("train data size: %d, test data size: %d" % (len(train), len(test)))
    itemCF = ItemCF(train, similarity='iuf', norm=True)
    itemCF.train()

    # 分别对以下4个用户进行物品推荐
    print(itemCF.recommend('法甲', 4, 3))
    # 被推荐的用户,推荐的商品个数 K: 查找的最相似的用户个数
