# 为二网供温预测 页面计算 换热站聚类数据
# 供api 程序查

import datetime
import time
from pymongo import MongoClient
import requests
import pandas as pd
import moduleAiMongdb
import moduleGeneral
import moduleAthena
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from Bio.Cluster import kcluster


# 从Athena提取 所有换热站机组列表
def get_station_unit_list():
    sql = '''SELECT
        distinct(name)
        FROM
        "datawarehouse".
        "tonghua_ods_station_unit";
    '''
    res = moduleAthena.get_athena_data_all_db('datawarehouse', sql)

    staion_unit_list = res['name'].values.tolist()

    return staion_unit_list


# 从Athena提取历史数据
def get_data_hour(station_unit_list, work_hour):
    sql = '''
    SELECT data_time,station_name,ygw,yhw,egw,ehw,(egw-ehw) as ewc,eljrl,eljll
    FROM "datawarehouse"."dwh_dws_station_hourly_data"  
    where station_name in (%s)
    and (egw+ehw)>0
    and partition_0='%s'
    and partition_1='%s'
    and partition_2='%s'
    and partition_3='%s'
    order by station_name    
    ;
    ''' % (station_unit_list, str(work_hour.year).zfill(4), str(work_hour.month).zfill(2), str(work_hour.day).zfill(2),
           str(work_hour.hour).zfill(2))

    res_workhour = moduleAthena.get_athena_data_all_db('datawarehouse', sql)

    work_hour_pre = work_hour +  datetime.timedelta(hours=-1)
    sql = '''
        SELECT data_time,station_name,eljrl as eljrl_pre,eljll as eljll_pre
        FROM "datawarehouse"."dwh_dws_station_hourly_data"  
        where station_name in (%s)
        and (egw+ehw)>0
        and partition_0='%s'
        and partition_1='%s'
        and partition_2='%s'
        and partition_3='%s'    
        order by station_name    
        ;
        ''' % (
                station_unit_list, str(work_hour_pre.year).zfill(4), str(work_hour_pre.month).zfill(2), str(work_hour_pre.day).zfill(2),
                str(work_hour_pre.hour).zfill(2))

    res_workhour_pre = moduleAthena.get_athena_data_all_db('datawarehouse', sql)



    data_all = res_workhour.merge(res_workhour_pre, left_on=['station_name'],right_on=['station_name'])

    # 删除nan行
    data_all.dropna(axis=0, how='any', inplace=True)

    # 吨 / 小时、吉焦
    data_all['xsrh'] = data_all['eljrl'] - data_all['eljrl_pre']
    data_all['xsll'] = data_all['eljll'] - data_all['eljll_pre']


    return data_all


def make_forecast_hourly(company_id, company_code, station_unit_list, workHour):
    station_uints = "'" + "','".join(station_unit_list) + "'"

    # 提取该小时数据
    data_the_hour = get_data_hour(station_uints, workHour)

    # 进行聚类
    mean_colume = ['yhw','egw', 'ehw', 'ewc','xsrh','xsll']
    heat_forecast=make_forecast(data_the_hour,mean_colume,workHour)

    # 写入预测结果数据库
    insert_forecast_result(company_id, company_code,heat_forecast,workHour)

    print()


# # 用轮廓系数法 确定k
def make_k(data):
    data_ndarray=data.values

    coef = []
    x = range(2, 5)
    for clusters in x:
        clusterid, error, nfound = kcluster(data_ndarray, clusters, dist='u', npass=100)
        silhouette_avg = silhouette_score(data_ndarray, clusterid, metric='cosine')
        coef.append(silhouette_avg)

    e = [i + 3 for i, j in enumerate(coef) if j == max(coef)]

    max_index=coef.index(max(coef))

    print('k值：',e)
    print('detail:',coef)

    if e[0]>5:
        print()

    return e[0]

# 进行聚类
def make_kmeans(data,k, workHour):
    # 聚类
    mod = KMeans(n_clusters=k, n_jobs=4, max_iter=500)  # 聚成k类数据,并发数为4，最大循环次数为500
    mod.fit_predict(data)  # y_pred表示聚类的结果

    # 聚成数据，统计每个聚类下的数据量，并且求出他们的中心
    r1 = pd.Series(mod.labels_).value_counts()
    r2 = pd.DataFrame(mod.cluster_centers_)
    r = pd.concat([r2, r1], axis=1)
    r.columns = list(data.columns) + [u'count']

    print('聚类结果-类别数据\n',r)

    # 聚类数据保存到mongdb
    moduleAiMongdb.insert_ai_station_cluster_detail(r,workHour)


    # 给每一条数据标注上被分为哪一类
    r = pd.concat([data, pd.Series(mod.labels_, index=data.index)], axis=1)
    r.columns = list(data.columns) + ['cluster']
    print('聚类结果-每行类别\n',r)
    # r.to_excel(outfile)  # 如果需要保存到本地，就写上这一列

    return r

def make_forecast(data_the_hour,mean_colume,workHour):
    print(data_the_hour)

    data_2_kmeans=data_the_hour[mean_colume]

    k=make_k(data_2_kmeans)

    kmeans_res=make_kmeans(data_2_kmeans,k,workHour)

    kmeans_res = kmeans_res.merge(data_the_hour, left_on=mean_colume,right_on=mean_colume)

    if k>5:
        print()

    return kmeans_res


# 插入mongdb
def insert_forecast_result(company_id, company_code,heat_forecast,workHour):
    heat_forecast = heat_forecast.sort_values(axis=0, ascending=True, by=['cluster']).reset_index(drop=True)

    moduleAiMongdb.insert_ai_station_cluster_hour(company_id, company_code,heat_forecast,workHour)
    print()


# Press the green button in the gutter to run the script.
if __name__ == '__main__':

    company_id = 100073
    company_code = '000082'


    station_unit_list = get_station_unit_list()


    # 抽样时间点，'2021-01-25 20:00:00'
    workHours = moduleGeneral.make_hours_list('2021-01-25 20:00:00', '2021-01-25 20:00:00')
    # workHours = moduleGeneral.make_hours_list('2021-01-19 20:00:00', '2021-01-31 20:00:00')

    for oneHour in workHours:
        work_datetime = datetime.datetime(int(oneHour[0]), int(oneHour[1]), int(oneHour[2]), int(oneHour[3]), 0, 0)
        print(work_datetime)
        make_forecast_hourly(company_id, company_code, station_unit_list, work_datetime)
