# coding=UTF-8
""" 对kafka来的电费进行分析 """
import sys
from pyspark import SparkContext, SparkConf
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
import datetime
import time
import math
import redis

from conf import prod_config as config

db_n = 0

""" ================================ Common =================================== """

def cal_mean(rdd):
    dic = rdd.collectAsMap()
    arr = dic.values()

    try:
        mean = sum(arr) / len(arr)
        set_to_name('period_mean', str(mean))

        for key in dic.keys():
            dic[key] -= mean 
        hset_to_name('mean_delta', str(dic))
    except:
        print('[ERROR] 除数为0，kafka没有传来数据')
        sys.exit(1)

def appendState(new_values, last_values):

    return (last_values or list()) + new_values

""" =========================================================================== """

""" ================================ Redis ==================================== """

def hset_to_name(name, s):
    """ k默认为当前时间 """
    r = redis.Redis(host='127.0.0.1', password="chenteam", port=6379, db=db_n)
    now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    r.hset(name, now, s)
    print('[INFO] 于{}存入{}库成功'.format(now, name))


def set_to_name(name, v):
    r = redis.Redis(host='127.0.0.1', password="chenteam", port=6379, db=db_n)
    now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    r.hset(name, now, v)

def get_latest_key_from(name):
    r = redis.Redis(host='127.0.0.1', password="chenteam", port=6379, db=db_n)
    keys = r.hkeys(name)
    keys = list(map(lambda x: x.decode(), keys))
    try:
        keys.remove('created')
    except:
        pass
    latest_key = sorted(keys, reverse=True)[0]
    return latest_key

def get_value_by_key(name, key):
    r = redis.Redis(host='127.0.0.1', password="chenteam", port=6379, db=db_n)
    return r.hget(name, key)


""" =========================================================================== """



""" ================================ KNN ====================================== """

def get_k_nearest_neighbors(dorm, k, exist_dorms):
    neighbors = []
    room = int(dorm[-3:])
    for i in range(1, k*2, 1):
        room = room + ((-1)** i) * i

        dum = dorm[0:-3] + str(room)
        if dum in exist_dorms:
            neighbors.append(dum)
        
    return neighbors[:k]


def find_outliers(rdd):
    period_mean = 0.0
    try:
        period_mean = float(get_value_by_key('period_mean', get_latest_key_from('period_mean')))
    except:
        print('[WARN] period_mean未取到 取默认值0.0')
    print('[INFO] period_mean:{}'.format(period_mean))

    dic = rdd.collectAsMap()
    arr = []
    for item in dic.items():
        arr.append(item)

    # judge logi here
    exist_dorm = list(map(lambda x: x[0], arr))

    stack = []

    for item in arr:
        # item 形如：('dorm', usage)

        # 获得邻居的信息
        neighbors = get_k_nearest_neighbors(item[0], 3, exist_dorm)
        
        # 获得邻居的消耗
        neighbors_usage = []
        for neighbor in neighbors:
            neighbors_usage.append(dic[neighbor])
        
        # 根据自己的usage和邻居的均值进行计算 得到一个离群指数
        if len(neighbors_usage) != 0:
            
            usage_list = [item[1]] + neighbors_usage    # 形如 [当前寝室的用电量, 邻居1的电费, 邻居2的电费 ...]
            mean = sum(usage_list)/len(usage_list)

            # 对所有寝室对mean做差
            usage_list = list(map(lambda x: x-mean, usage_list))
            
            # 求寝室到邻居的距离
            distance = 0.0
            for n in neighbors_usage:
                distance += (n - item[1]) ** 2 

            distance = distance ** 0.5

            stack.append({
                'dorm': item[0],
                'usage': item[1],
                'distance': distance,
                'period_mean_distance': (item[1] - period_mean) ** 2
            })
            # print('[INFO] ({},{})邻居信息:{} 离群距离:{}'.format(item[0], item[1], neighbors_usage, distance))
            

        else:
            print('[ERROR] 没有邻居')


    """ 分析所有寝室后打分 abnormal_score """

    # 计算最大值
    distance_max = max(list(map(lambda x: x['distance'], stack)))
    period_mean_distance_max = max(list(map(lambda x: x['period_mean_distance'], stack)))

    if distance_max <= 0.0 or period_mean_distance_max <= 0.0:
        print('[WARN] distance_max period_mean_distance_max出现0')
        hset_to_name('abnormal_score', "异常：出现0！")
        return
    

    print('[INFO] distance_max:{} period_mean_distance_max:{}'.format(distance_max, period_mean_distance_max))

    
    for item in stack:

        item['neighbor_score'] = (item['distance'] / distance_max) * 100 
        item['period_mean_score'] = (item['period_mean_distance'] / period_mean_distance_max) * 100 

        # 根据 neighbor_score period_mean_score 以3:7的权重打分
        item['abnormal_score'] = item['neighbor_score'] * 0.3 + item['period_mean_score'] * 0.7

    # 存入 abnormal_score 的表中
    score_list = list(map(lambda x: (x['dorm'], x['abnormal_score']), stack))
    hset_to_name('abnormal_score', str(score_list))

    
""" ================================== main =========================================== """


if __name__ == '__main__':
    print('[INFO] 程序提交于{}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
    conf = SparkConf().setAppName('KafkaCase1').set('spark.io.compression.codec','snappy')
    sc = SparkContext(conf=conf)
    sc.setLogLevel('OFF')

    scc = StreamingContext(sc, config['windowDuration'])
    scc.checkpoint('file:///home/chenteam/project/pyspark/cp/001')
    zk, topic = sys.argv[1:]
    kvs = KafkaUtils.createStream(scc, zk, "spark-streaming-consumer", {topic: 1})


    lines = kvs.map(lambda x: eval(x[1])).map(lambda x: (str(x['building_id'])+'-'+x['roomid'], (x['time'].split('.')[0], x['fee'])))

    state_ds = lines.updateStateByKey(updateFunc=appendState)
    state_ds = state_ds.mapValues(lambda x: [x[0]] + x)     # 防止-2的下标越界 这里在list的开头补一个
    
    """ reduced_ds保存了每个寝室号所对应的时间跨度与用电总量信息 """
    reduced_ds = state_ds.mapValues(
        lambda x: (
            time.mktime(time.strptime(x[-1][0], "%Y-%m-%d %H:%M:%S")) - 
            time.mktime(time.strptime(x[-2][0], "%Y-%m-%d %H:%M:%S")), 
            
            x[-2][1] - x[-1][1])
    ).filter(lambda x: x[1][1] >= 0.0)

     # """ old version """
     # # reduced_ds = lines.groupByKeyAndWindow(config['windowDuration'], config['slideDuration']).\
     # #     mapValues(lambda x: list(x)).\
     # #     mapValues(lambda x: x[0] - x[-1]).\
     # #     filter(lambda x: x[1]>=0)


    """ 保存到redis """
    reduced_ds.foreachRDD(lambda rdd: hset_to_name("stack", str(rdd.collectAsMap())))
    

    """ 计算单位时间的用电消耗 """
    unit_ds = reduced_ds.mapValues(lambda x: 0 if x[0] == 0.0 else x[1] / (x[0]/config['sampleInterval']))     # 考虑除数为0的情况


    """ 计算寝室单位时间平均用电 """
    unit_ds.foreachRDD(cal_mean)


    """ abnormal_ds保存了超过阈值的寝室的信息与用电量 """
    abnormal_ds = unit_ds.filter(lambda x: x>config['warningThreshold'])
    abnormal_ds.foreachRDD(lambda rdd: hset_to_name("abnormal", str(rdd.collectAsMap())))
    

    """ 计算离群点 """
    unit_ds.foreachRDD(find_outliers)


    unit_ds.pprint()


    scc.start()
    scc.awaitTermination()

'''
    RUN COMMAND:
    $ spark-submit --jars /home/chenteam/app/spark/jars/spark-streaming-kafka-0-8-assembly_2.11-2.4.0.jar test.py node:2181 fee

    $ spark-submit --jars /home/chenteam/app/spark/jars/spark-streaming-kafka-0-8-assembly_2.11-2.4.0.jar,/home/chenteam/lz4-1.3.0.jar test.py node:2181 test
'''
