# -*- coding: utf-8 -*-
# +
import pandas as pd 
import numpy as np 
import sys,os
# basePath = os.path.split(os.path.realpath(__file__))[0]
# sys.path.append(basePath)
from datetime import datetime
import matplotlib.pyplot as plt
from PIL import Image #元组顺序为（R,G.B）

from config import CONFIG
from functions import *

def basic_feature_encode(df):
    df['模型运行日期'] = df['模型运行日期'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
    df['预测时间'] = df['预测时间'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
    df['date_sub'] = [i.days for i in (df['预测时间'] - df['模型运行日期'])]
    df['地表温度（K）'] -= 273.15
    return df

def force_convert_float(x):
    try:
        return np.float(x)
    except:
        return np.nan

def fill_na(df, features):
    df_temp = df[features].copy()
    df2, df3 = df_temp.shift(1), df_temp.shift(-1)
    df_mean = (df2 + df3) / 2
    # 寻找空值，按照前后均值填充
    locs = np.argwhere(np.isnan(df_temp.values))
    print('find na:',len(locs), len(set([i[0] for i in locs])))
    for loc in locs:
        df_temp.iloc[loc[0],loc[1]] = df_mean.iloc[loc[0],loc[1]]
    # 继续寻找空值，按照前一位元素填充
    locs = np.argwhere(np.isnan(df_temp.values))
    if len(locs) > 0:
        print('can not fillna by method 1:','find na:',len(locs), len(set([i[0] for i in locs])))
    for loc in locs:
        df_temp.iloc[loc[0],loc[1]] = df_temp.iloc[loc[0]-1 if loc[0]-1 > 0 else 0,loc[1]]
    # 如果依然存在空值，删除此行
    locs = np.argwhere(np.isnan(df_temp.values))
    if len(locs) > 0:
        print('can not fillna by method 2:','find na:',len(locs), len(set([i[0] for i in locs])))
    df.loc[:, features] = df_temp
    df = df.dropna(axis=0, how='any', subset=features) 
    return df

def ensure_contigous_dataset(df, features):
    return df

def fix_wrong_value(df, features):
    print('[INFO] Check wrong value start...')
    for fea in features:
        df_temp = df[fea].copy()
        diff = df_temp.diff()#.dropna()
        describe = diff.describe()
        # 设置差分的合理范围，超出范围视为异常值
        high = describe['75%'] + 6*(describe['75%'] - describe['25%'])
        low = describe['25%'] - 6*(describe['75%'] - describe['25%'])
        bad_index = diff[(diff > high) | (diff < low)].index
        if len(bad_index) == 0:
            continue
        i = 0
        # 对异常值采用插值算法插值，边界点如果是异常值，按全体均值插值
        while i < len(bad_index) - 1:
            n = 1
            start = bad_index[i]
            while i+n < len(bad_index) and bad_index[i+n] == start + n:
                n += 1
            i += (n-1)
            end = bad_index[i]
            if start-1 in range(len(df_temp)) and end+1 in range(len(df_temp)):
                value = np.linspace(df_temp[start-1], df_temp[end+1], n+2)[1:-1]
            else:
                value = [df_temp.mean()] * n
            df_temp[start:end+1] = value
            if start == end: 
                i += 1
        if bad_index[-1]-1 in range(len(df_temp)) and bad_index[-1]+1 in range(len(df_temp)):
            value = np.linspace(df_temp[bad_index[-1]-1], df_temp[bad_index[-1]+1], 3)[1:-1]
        else:
            value = df_temp.mean()
        df_temp[bad_index[-1]] = value
#         if len(bad_index) > 0:
#             print('[INFO] wrong value fixed:', len(bad_index))
           # print(pd.concat([df.loc[bad_index,fea], df_temp[bad_index]], axis=1))
        df.loc[:,fea] = df_temp
    print('[INFO] Check wrong value end.')
    return df

def filt_date(df):
    # df = df[(df['date_sub'] == CONFIG().DATASET_FILT_PARAMS['date_sub'])]
    for date_range_left, date_range_right in CONFIG().DATASET_FILT_PARAMS['to_delete_qb_ranges']:
        df = df[(df['qb_time'] > date_range_right) & (df['qb_time'] < date_range_left)]
    return df

def neutralize_danger_feature(df, features):
    df0 = df.copy()
    temp = df[features].applymap(force_convert_float)
    temp = temp.dropna(axis=1, thresh=8*len(df)/10) # 整列大量空值但是不全为空值，超20%空值则使该特征无效化
    for i in features:
        if i not in temp:
            temp[i] = 0
    df = df.drop(labels=features, axis=1)
    df = pd.concat([df,temp], axis=1)
    return df

def fix_SCD(df, station):
    sc_data = pd.read_csv(os.path.join(CONFIG().GOOD_DATASET_PATH, '%s_SC.csv'%station))
    miss = df.isnull()
    temp = df[miss.any(axis=1)==True].copy()
    if len(temp) != 0:
        features = [CONFIG().RENAME_DICT[i] for i in CONFIG().SC_FEATURES[station] if CONFIG().RENAME_DICT[i] in list(temp)]
        for day in (list(temp['监测日期']) if '监测日期' in list(temp) else list(temp['实测日期'])):
            children = sc_data[(sc_data['监测时间'] >= day) & (pd.to_datetime(sc_data['监测时间']) < (datetime.strptime(day, '%Y-%m-%d') + pd.Timedelta(days=1)))]
            if len(children) < 20:
                continue
            for fea in features:
                if 'O3_' != fea:
                    temp[fea] = children[fea].mean()
            window_8_hours = children['O3_'].rolling(8).mean().dropna()
            a = max(window_8_hours) if len(window_8_hours)!=0 else 0
            temp['O3'] = a
        df[miss.any(axis=1)==True] = temp
    df = df.dropna(how='any', axis=0)
    return df

# 只处理前两种数据
def data_clean(df, station, data_type):
    features = CONFIG().YB_FEATURES[station] if data_type=='YB' else CONFIG().SC_FEATURES[station]
    # 构建基本特征，用于数据处理，不一定用于模型训练
    df = basic_feature_encode(df) if data_type=='YB' else df
    #df = filt_date(df)
    # 列：此处统一转换非数值型元素为NAN，如果存在无效值超过20%的列，使此列无效化
    df = neutralize_danger_feature(df, features)
    # 行：空值检测与填值
    df = fill_na(df, features)
    # 行：行插值，保障连续数据
    df = ensure_contigous_dataset(df, features)
    # 行：异常值检测与修正
    df = fix_wrong_value(df, features)
    # 行：仍无法被填充的继续填充意义不大
#     df = df.dropna(axis=0, how='any', subset=features) 
    df = df.drop(labels=[i for i in list(df) if 'Unnamed' in i], axis=1)
    return df

# 只跑一次 ----------------------------------------------------------------------
def generate_csv_dataset():
    dfs = {}
    # 读取原始数据到dataframe
    for station in CONFIG().STATIONS:
        dfs['%s_YB'%station] = pd.read_excel(CONFIG().FILE_DICT[station], sheet_name='监测点%s逐小时污染物浓度与气象一次预报数据'%station)
        dfs['%s_SC'%station] = pd.read_excel(CONFIG().FILE_DICT[station], sheet_name='监测点%s逐小时污染物浓度与气象实测数据'%station)
        dfs['%s_SCD'%station] = pd.read_excel(CONFIG().FILE_DICT[station], sheet_name='监测点%s逐日污染物浓度实测数据'%station)
    for df in dfs:
        dfs[df].to_csv(os.path.join(CONFIG().NEW_DATASET_PATH, df + '.csv'))
    print('all ok.')
    
# 只跑一次 ----------------------------------------------------------------------
def run_one_clean(tar):
    print(tar)
    file_path = os.path.join(CONFIG().NEW_DATASET_PATH, tar)
    df = pd.read_csv(file_path)
    df = data_clean(df, station=tar.split('_')[0], data_type=(tar.split('_')[1]).split('.')[0])
    dic = CONFIG().RENAME_DICT
    df = df.rename(columns={i:dic[i] for i in list(df) if i in dic})
    df.to_csv(os.path.join(CONFIG().GOOD_DATASET_PATH, tar))
def run_one_fix_scd(tar):
    print(tar)
    file_path = os.path.join(CONFIG().NEW_DATASET_PATH, tar)
    df = pd.read_csv(file_path)
    dic = CONFIG().RENAME_DICT
    df = df.rename(columns={i:dic[i] for i in list(df) if i in dic})
    df = fix_SCD(df, station=tar.split('_')[0])
    df.to_csv(os.path.join(CONFIG().GOOD_DATASET_PATH, tar)) 
    return 1
def clean_csv_dataset():
    targets = [i for i in os.listdir(CONFIG().NEW_DATASET_PATH) if 'SCD' not in i]
#     run_one_clean(targets[0])
    pool(run_one_clean, [(tar,) for tar in targets])
    targets = [i for i in os.listdir(CONFIG().NEW_DATASET_PATH) if 'SCD' in i]
#     pool(run_one_fix_scd, [(tar,) for tar in targets])
    print('all ok.')
    
# 只跑一次 ----------------------------------------------------------------------    
def generate_hist_map():
    width, height = 864, 288
    dataset_path = CONFIG().GOOD_DATASET_PATH
    # 生成子图
    targets = [i for i in os.listdir(dataset_path) if 'SCD' not in i]
    sub_path = os.path.join(CONFIG().DATASET_FIGURES_PATH, 'subplots')
    for tar in targets:
#         break
        print(tar)
        station, data_type = tar.split('_')[0], (tar.split('_')[1]).split('.')[0]
        file_path = os.path.join(dataset_path, tar)
        df = pd.read_csv(file_path)
        features = [i for i in list(df) if i in list(CONFIG().RENAME_DICT.values())]
        if not features:
            print(list(df))
        for fea in features:
            plt.figure(figsize=(12,4))
            plt.hist(df[fea], bins=150)
            plt.title('%s: %s'%(station, fea))
            fname = os.path.join(sub_path, tar.split('.')[0] + '_distribution_'+ fea + '.png')
            plt.savefig(fname)
            plt.close()
    # 生成比较图-同场站
    total_path = os.path.join(CONFIG().DATASET_FIGURES_PATH, 'plots_by_station')
    for station in CONFIG().STATIONS:
        for data_type in ['YB', 'SC']:
            files = [i for i in os.listdir(sub_path) if (i.split('_')[0]==station and (i.split('_')[1]).split('.')[0]==data_type)]
            files.sort()
            blocks = (len(files) - 1) // 3 + 1
            cols = max(int(np.sqrt(blocks)) if (int(np.sqrt(blocks))==np.sqrt(blocks)) else (int(np.sqrt(blocks))+1), 1)
            rows = (len(files) + cols - 1) // cols
            big_img = np.zeros((height*rows, width*cols, 4), dtype=np.int32)
            for i in range(len(files)):
                col = i % cols
                row = i // cols
                img = np.array(Image.open(os.path.join(sub_path, files[i])))[:,:,:]
                big_img[row*height:(row+1)*height, (col*width):((col+1)*width), :] = img
            big_img = Image.fromarray(big_img.astype(np.uint8))
            big_img.save(os.path.join(total_path, '%s_STATION_%s.png'%(data_type, station)))
    # 生成比较图-同特征
    total_path2 = os.path.join(CONFIG().DATASET_FIGURES_PATH, 'plots_by_feature')
    for data_type in ['YB', 'SC']:
        many_files = [i for i in os.listdir(sub_path) if ((i.split('_')[1]).split('.')[0]==data_type)]
        features = list(set([(i.split('distribution_')[-1]).split('.')[0] for i in many_files]))
        for fea in features:
            files = [i for i in many_files if (i.split('distribution_')[-1]).split('.')[0]==fea]
            files.sort()
            blocks = (len(files) - 1) // 3 + 1
            cols = max(int(np.sqrt(blocks)) if (int(np.sqrt(blocks))==np.sqrt(blocks)) else (int(np.sqrt(blocks))+1), 1)
            rows = (len(files) + cols - 1) // cols
            big_img = np.zeros((height*rows, width*cols, 4), dtype=np.int32)
            for i in range(len(files)):
                col = i % cols
                row = i // cols
                img = np.array(Image.open(os.path.join(sub_path, files[i])))[:,:,:]
                big_img[row*height:(row+1)*height, (col*width):((col+1)*width), :] = img
            big_img = Image.fromarray(big_img.astype(np.uint8))
            big_img.save(os.path.join(total_path2, '%s_FEATURE_%s.png'%(data_type, fea)))
    print('all ok.')
        
        
# 只跑一次 ----------------------------------------------------------------------    
def run_one_merge(tar1, tar2):
    print(tar1, tar2)
    file_path1 = os.path.join(CONFIG().GOOD_DATASET_PATH, tar1)
    df1 = pd.read_csv(file_path1)
    df1['预测时间'] = pd.to_datetime(df1['预测时间'])
    file_path2 = os.path.join(CONFIG().GOOD_DATASET_PATH, tar2)
    df2 = pd.read_csv(file_path2)
    print(tar2)
    df2_time = '监测时间' if '监测时间' in list(df2) else '实测时间'
    df2[df2_time] = pd.to_datetime(df2[df2_time])
    df = df1.merge(df2, how='left', left_on='预测时间', right_on=df2_time, suffixes=('','_')).reset_index(drop=True)
    df = df[[i for i in list(df) if 'Unnamed' not in i]]
    df = df.dropna(how='any', axis=0)
    df.to_csv(os.path.join(CONFIG().MERGED_DATASET_PATH, tar1.replace('YB', 'MERGE')))
def generate_merged_dataset():
    targets1 = sorted([i for i in os.listdir(CONFIG().GOOD_DATASET_PATH) if 'YB.' in i])
    targets2 = sorted([i for i in os.listdir(CONFIG().GOOD_DATASET_PATH) if 'SC.' in i])
    targets = [[i,j] for i,j in zip(targets1, targets2)]
#     run_one_merge(targets[0][0], targets[0][1])
    pool(run_one_merge, [(tar[0],tar[1],) for tar in targets])
    print('all ok.')
    
    
if __name__ == '__main__':
    clean_csv_dataset()
    generate_merged_dataset()
    pass

# -
# debug
# path = CONFIG().GOOD_DATASET_PATH
# targets = [os.path.join(path, i) for i in os.listdir(path)]
# targets
# df = pd.read_csv(targets[1], nrows=5)
# df.iloc[2,3] = np.nan
# df.iloc[0,5] = np.nan
# df.iloc[0,6] = 999
# features = CONFIG().SC_FEATURES['A']
# df
pass

