import os
from django.shortcuts import HttpResponse
import json,time
import findspark,time
import sys
import pandas as pd
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
import threading
import asyncio

#有顺序
os.environ['JAVA_HOME'] = '/usr/local/jdk8'
os.environ['SPARK_HOME'] = '/usr/local/spark2/'
os.environ['PYTHONPATH'] = '/usr/local/spark2/python:/usr/local/spark2/python/lib/py4j-0.10.7-src.zip:$PYTHONPATH'
findspark.init()
import pyspark
from pyspark.sql import SparkSession
from pyspark import SparkContext
from db.mydb import MyDBConnection
# from mydb import MyDBConnection
from myspark.step.recommend import *
from db.dao.recommendList import RecommendList
from db.utils.dfutils import load_json_to_object
from db.dao.player import Player


'''
推荐逻辑，如果mysql中有值 获取mysql，mysql 没有值,随便一个逻辑插入 返回，如果计数小于3 计数 加1 ，如果等于3，开启线程异步 spark 查询录入数据
'''
sys.path.append('/usr/local/spark2/python/bin')  # 不加始终
def recommend(request):
    myDBConnection = MyDBConnection()
    j_id = request.GET.get("j_id")
    session = myDBConnection.get_session()
    #recommand_json = myDBConnection.fetch_data_by_sql(f"select * from recommend where rem_key = '{p_id}' and rem_type='player'")
    recommendList = session.query(RecommendList).filter(RecommendList.rem_key == j_id and RecommendList.rem_type == 'jobs').first()
    rep_json = None
    msg = f'现在查看的是推荐的球员成功'
    rep_json = {"code": 200, "msg": msg, "playerids":None }
    if(recommendList is None ):#非空取值
        players_json_array = myDBConnection.fetch_data_by_sql(f"select j_id,j_name from jobs where j_name is not null")
        print('000000000000000')
        print(players_json_array)
        playerid_list = [[f"{players_json['j_id']}:{players_json['j_name']}","1.0"]   for players_json in players_json_array]
        rem_value = json.dumps(playerid_list,ensure_ascii=False)
        print(rem_value)
        recommendList_json_obj = {'rem_key':j_id,'rem_value':rem_value,'rem_count':'0','rem_type':'jobs'} # j字符串转换为json
        recommendList = load_json_to_object(recommendList_json_obj, RecommendList())  # json对象转换为指定对象Game_events
        myDBConnection.insert_entity(recommendList)
        rep_json['jobids'] = str(recommendList.rem_value)
    elif (int(recommendList.rem_count) < 3 ):
        rep_json['jobids'] = recommendList.rem_value
        # 1.把需要修改的数据查询出来
        # 2.把这条数据，你想要修改的地方修改
        recommendList.rem_count = int(recommendList.rem_count)+1
        # 3.做事务的提交
        session.commit()
        print(recommendList.format_str())
    else:
        rep_json['jobids'] = recommendList.rem_value
    flag = True
    if recommendList is not None and  int(recommendList.rem_count) >= 3:
        print('1111111111111')
          # for 循环5次，每循环一次调用一个task（），得到返回，得到5个返回值
        def async1(f):
            def wrapper(*args, **kwargs):
                thr = threading.Thread(target=f)
                thr.start()
            return wrapper

        @async1
        def  spark():
            asyncio.sleep(1)
            print('spark')
            print(len(sys.argv))
            # print(sys.argv[0])
            # print(sys.argv[1])
            # if len(sys.argv) != 2:
            # print("app浏览数据分析",file=sys.stderr)
            # sys.exit(-1)
            # 使用sparksession 的api 构建sparksession对象
            # 如果不存在sparksession对象 则创建一个新的实例
            # 每个jvm只能由一个sparksession实例

            spark = (SparkSession).builder.appName("appVisitCount").getOrCreate()  # 创建sparksession会话
            path = "/usr/logs/send_logs/20240729/"  # 没有被hive load，文件在flume采集后的文件位置,时间可以根据需要用python计算
            fs = spark._jvm.org.apache.hadoop.fs.FileSystem.get(spark._jsc.hadoopConfiguration())
            list_status = fs.listStatus(spark._jvm.org.apache.hadoop.fs.Path(path))  # 列出目录中全部文件路径
            app_files = [path + file.getPath().getName() for file in list_status if file.getPath().getName().find(".log") > 0 ]

            # app_df = spark.createDataFrame(data, schema=['git_id', 'git_name', 'p_id', 't_id', 'git_time', 't_name', 'p_name'])
            sc = spark.sparkContext
            final_list = []
            for app_file in app_files:
                # print(app_file)

                # 读取文件生成RDD
                input_rdd = sc.textFile(app_file)

                # 假设每行数据格式为 "<group_id>,<value>"
                #    0                                  1    2    3        4               5        6   7    8   9
                # ddf394ad-16d6-42a5-9d9b-e54014c89d1e,002,006,2013-04-05,1

                # 字段编号1为 比赛事件项 字段编号2为球员编号 字段编号6为球员姓名，字段编号6为球员姓名
                parsed_rdd = input_rdd.map(lambda line: (
                line.split(',')[2], line.split(',')[1], line.split(',')[4]))  # 需要计算的字段组成map

                # 清洗非空数据
                cleaned_data = parsed_rdd.filter(
                    lambda x: x[0] is not None and x[1] is not None and x[2] != '0')
                result_data = cleaned_data.map(lambda x: (x[0], x[1]))
                result = result_data.collect()
                # result = result_data. #单文件手机
                # process_list = [(group_name,len(sum_value.split(','))) for group_name, sum_value in result_data] #列表推导处理值
                final_list = final_list + result  # 要原始值
                # final_list = final_list + process_list #要统计值
            print(final_list)
            df = pd.DataFrame(final_list, columns=["item", "league"])  # 指定索引为其他，
            spark.stop()
            train, test = LoadGameEventData(df, 0.8)  # spark取出的数据集 df 加入 训练环境
            print("train data size: %d, test data size: %d" % (len(train), len(test)))
            itemCF = ItemCF(train, similarity='iuf', norm=True)
            itemCF.train()

            # 分别对以下4个用户进行物品推荐
            from collections import Counter

            j_id = request.GET.get("j_id")
            st_id_json = myDBConnection.fetch_data_by_sql(f"select distinct st_id from send_logs where j_id={j_id} and st_id is not null")

            code = 200

            st_ids = [ameevent['st_id'] for ameevent in st_id_json]
            print(st_ids)
            result = Counter()
            for key in st_ids:
                counter = Counter(itemCF.recommend(key, 2, 3))
                print(counter)
                result = result + counter
            # 被推荐的用户,推荐的商品个数 K: 查找的最相似的用户个数
            result_dict = dict(result)
            print(result_dict)
            # 排序的根据：item[1]为取字典的value（元组）
            sorted_list = list(dict(sorted(result_dict.items(), key=lambda item: item[1], reverse=True)).items())
            dict_json = json.dumps(sorted_list,ensure_ascii=False)
            print(dict_json)

            print(f"排序后的数据：{sorted_list}")

            recommendList.rem_count = 0
            recommendList.rem_value = dict_json
            rep_json['jobids'] = recommendList.rem_value
            # 3.做事务的提交
            session.commit()
            print('异步')
        spark()

    jobids = rep_json['jobids'].replace('[', '').replace('[', '').split('],')
    where_str = []
    for jobid in jobids:
        p_id = jobid.replace('\"', '', 2).replace(']', '').replace(']', '').split(', ')[0].replace(' ', '')
        where_str.append(p_id.split(':')[0])

    where_in_str = str(where_str).replace('[', '(').replace(']', ')')
    print(where_in_str)
    players_json = myDBConnection.fetch_data_by_sql(f"select * from jobs where j_id in {where_in_str} ")
    print(players_json)
    code = 200
    rep_json = {"code": code, "msg": msg, "jobss": players_json}
    return HttpResponse(json.dumps(rep_json, ensure_ascii=False), content_type="application/json")

if __name__ == "__main__":
    pass


