#coding:UTF-8
from typing import Iterable
import datetime
import time
import numpy as np
import pandas as pd
from pandas import DataFrame
from pyspark import Row
from pyspark.sql import SparkSession
from spark_utils import spark_rows_to_df
# 本地windows环境测试时需要加上以下两行

import findspark
findspark.init()

from pyspark.sql.functions import *
from pyspark.sql.types import *
'''
在无开门状况下，化霜加热器启动到结束化霜时刻，冷冻蒸发器最高温度值小于0℃为故障。
'''
def jiareqi_for_each_p(rows:Iterable[Row]):
    pddf = spark_rows_to_df(rows)
    file = jiareqi_guzhang(pddf)
    print ("ttttt:",file)
    return file
def filter(row):
    if len(row) != 0:
        return True
    else:
        return False

def jiareqi_guzhang(dataframe):
    if len(dataframe) > 0:
        m = dataframe
        m = m.loc[:, ['sn', 'timestamp', 'freezeroomsensortempreture', 'compressorstatus', 'freezeheat','freezeroomevaporationsensortempreture', 'colddoor', 'changingdoor', 'freezedoorb','freezedoora']]
        m = m.fillna(0)
        data = m
        m1 = data.loc[:, ["sn"]]  # zzfld00000b sn
        sn_indexs = m1.drop_duplicates()["sn"]  # zzfld00000b sn
        error_file= []
        for index in sn_indexs:
            #print("sn=========================================================:", index)
            data0 = m[(m['sn'] == index)]
            data0 = data0.drop_duplicates(keep='last', subset=['timestamp'])
            # data = data0.sort_values(by='timestamp', ascending=True)
            data = data0.reset_index(drop=True)
            f_list1 = []
            cnt0 = 0
            for i in range(0, len(data)-1):
                if data.iloc[i].values[4] == True and (data.iloc[i].values[6] ==0 and data.iloc[i].values[7] ==0 and data.iloc[i].values[8] ==0 and data.iloc[i].values[9] ==0):
                    f_list1.append(data.iloc[i,[1,4,5]].values)
                else:
                    if len(f_list1) > 5:
                        f_data1 = np.array(f_list1)
                        #f_time_l = (datetime.datetime.strptime(f_data1[:, 0][-2],"%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(f_data1[:, 0][0],"%Y-%m-%d %H:%M:%S")).seconds / 3600
                        #print ("===============================")
                        #print (np.max(f_data1[:, 2]).dtype)
                        if np.max(f_data1[:, 2].astype(float)) < 0:#>=(f_data1.shape[0]* ratio ):
                            cnt0 +=1
                            error_file.append([index, f_data1[:, 0][0], f_data1[:, 0][-1], "freezeHeat_Fault", "http", time.strftime('%Y-%m-%d %H:%M:%S',time.localtime()), f_data1[:, 0][-1].split(' ')[0]])
                    f_list1 = []
        return error_file
    else:
        return []

if __name__ == "__main__":
    # 创建spark环境
    spark = SparkSession.builder.appName("jiareqi app").master("local[*]").getOrCreate()
    # spark = SparkSession.builder.appName("jiareqi app").enableHiveSupport().getOrCreate()
    # 设置日志打印级别
    spark.sparkContext.setLogLevel("INFO")
    # 读取数据的sql
    sql = '''
                select 
      * 
  from (
    select 
        "sn", "timestamp", "freezeroomsensortempreture", "compressorstatus", "freezeheat",
        "freezeroomevaporationsensortempreture", "colddoor", "changingdoor", "freezedoorb", "freezedoora",
        count(1) over(partition by sn,p_date) as day_count,
        count(1) over(partition by sn,substr(p_date,1,7)) as month_count,
        substr(p_date,1,7),model
    from
      (
      select 
        distinct * 
      from
          ods.iot_fridge
      where 
            p_date between '2022-01-01' and '2022-12-31'
      and 
          material in ('9036661','9034962')
      
      )
  )
  where day_count >=100 and month_count>=3000
             '''
    # 读取数据sql，sql只能在使用EMR集群时可以使用，在本地运行时需要使用下一行的文件读取
    # spark_df = spark.sql(sql).repartition(10000,"sn").sortWithinPartitions("sn","timestamp")#.rdd.foreachPartition(jiareqi_for_each_p)
    spark_df = spark.read.parquet("C:\\Users\\ljc\\Downloads\\part-00045-00004bf1-1649-4956-aa30-b400c0185d31.c000.snappy.parquet").repartition(5, "sn").sortWithinPartitions("sn","timestamp")  # .rdd.foreachPartition(jiareqi_for_each_p)

    # 读取本地parquet文件数据
    schema = StructType([
        StructField('sn', StringType()),
        StructField('start_time', StringType()),
        StructField( 'end_time', StringType()),
        StructField('fault_code', StringType()),
        StructField('protocol', StringType()),
        StructField('detect_time', StringType()),
        StructField('p_date', StringType())
    ])
    '''
    spark_df = spark.read.option('header', 'true').csv("D:\\虹美\\code\\jiareqi_data.csv").repartition(2, "sn").sortWithinPartitions("timestamp")#.rdd.foreachPartition(jiareqi_for_each_p)
    spark_df.rdd.mapPartitions(jiareqi_for_each_p).toDF(schema).repartition(1).show()#.option("headen", "true").mode("Append").csv("D:\\虹美\\code")
    #spark_df.write.csv("D:\\虹美\\code")
    '''
    spark_df.rdd.mapPartitions(jiareqi_for_each_p).filter(filter).toDF(schema).repartition(1, "p_date").createOrReplaceTempView("t3")

    spark.sql("insert into dws.fridge_iot_devices_error_1d select * from t3")

    time.sleep(1000)
    print ("done!")