#coding:UTF-8
import os
from typing import Iterable

import numpy as np
import pandas as pd
from pandas import DataFrame
from pyspark import Row
from pyspark.sql import SparkSession

from spark_utils import spark_rows_to_df

# 本地windows环境测试时需要加上以下两行
import findspark
findspark.init()

np.set_printoptions(threshold=np.inf)
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 1000)
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
'''
在无开门状况下，化霜加热器启动到结束化霜时刻，冷冻蒸发器最高温度值小于0℃为故障。
'''

def jiareqi_for_each_p(rows:Iterable[Row]):
    pddf = spark_rows_to_df(rows)
    return jiareqi_guzhang(pddf)

def jiareqi_guzhang(dataframe:DataFrame):
    if len(dataframe) > 0:
        m = dataframe
        m = m.fillna(0)
        data = m
        m1 = data.loc[:, ["sn"]]  # zzfld00000b sn
        sn_indexs = m1.drop_duplicates()["sn"]  # zzfld00000b sn
        error_file= []
        for index in sn_indexs:
            print("sn=========================================================:", index)
            data0 = m[(m['sn'] == index)]
            data0 = data0.drop_duplicates(keep='last', subset=['timestamp'])
            # data = data0.sort_values(by='timestamp', ascending=True)
            data = data0.reset_index(drop=True)
            f_list1 = []
            cnt0 = 0
            for i in range(0, len(data)-1):
                if data.iloc[i].values[4] == True and (data.iloc[i].values[6] ==0 and data.iloc[i].values[7] ==0 and data.iloc[i].values[8] ==0 and data.iloc[i].values[9] ==0):
                    f_list1.append(data.iloc[i,[1,4,5]].values)
                else:
                    if len(f_list1) > 5:
                        f_data1 = np.array(f_list1)
                        #f_time_l = (datetime.datetime.strptime(f_data1[:, 0][-2],"%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(f_data1[:, 0][0],"%Y-%m-%d %H:%M:%S")).seconds / 3600
                        if np.max(f_data1[:, 2]) < 0:#>=(f_data1.shape[0]* ratio ):
                            cnt0 +=1
                    f_list1 = []
            if cnt0 >= 10:
                if index not in error_file:
                    error_file.append(index)

        print("app log -> error_file:", error_file)
        print("app log -> len:", len(error_file))
        return error_file

if __name__ == "__main__":
    # 创建spark环境
    spark = SparkSession.builder.appName("jiareqi app").enableHiveSupport().getOrCreate()
    # 设置日志打印级别
    spark.sparkContext.setLogLevel("INFO")
    # 读取数据的sql
    sql = '''
            select 
                timestamp,sn,freezeroomsensortempreture,compressorstatus,
                freezeheat,freezeroomevaporationsensortempreture,colddoor,
                changingdoor,freezedoorb,freezedoora
            from
                ods.iot_fridge
            where 
                p_date between '2022-01-01' and '2022-12-31'
            and 
                material in ('9025444','9025442','9028072','9037194','9027209','9023854')
         '''
    # 读取数据sql，sql只能在使用EMR集群时可以使用，在本地运行时需要使用下一行的文件读取
    df2 = spark.sql(sql).repartition(10000,"sn").sortWithinPartitions("sn","timestamp").rdd.mapPartitions(jiareqi_for_each_p).toDF()



    # 读取本地parquet文件数据
    # spark.read.parquet("C:\\Users\\ljc\\Desktop\\tmp\\569").repartition(2, "sn").sortWithinPartitions("timestamp").rdd.foreachPartition(jiareqi_for_each_p)
    print ("done!")
