#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2018/4/10 9:13
# @Author  : Devin
# @Site    : 
# @File    : model.py
# @Software: PyCharm Edu


import pandas, os, pickle
from math import ceil
from keras import optimizers
from keras.models import Sequential, load_model
from keras.layers import Dense,Dropout, BatchNormalization
from keras.callbacks import Callback, CSVLogger,  ModelCheckpoint
#from keras.utils import plot_model
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
from pathlib import Path
import logging
import sys
import configparser
import gc
import time

cf = configparser.ConfigParser()
cf.read("conf/train.conf")

def is_number(s):
    try:
        float(s)
        return True
    except ValueError:
        pass
        return False

class MyCallback(Callback):
    def __init__(self,log_path):
        self.log_path = log_path

    def on_batch_end(self, batch, logs={}):
        a = [logs.get("loss"), logs.get("acc")]
        if not os.path.isfile(self.log_path):
            with open(self.log_path, "a") as f:
                f.write("batch,acc,loss\n")
        with open(self.log_path,"a") as f:
            f.write("{},{},{}\n".format(batch,logs.get("acc"),logs.get("loss")))

class Data():
    def __init__(self, df, input_col, output_col, cate_col, num_col, name_by_program,user_id,model_name,
                 obj_name="AIAOIinfo_column_counts", column_file_x='column_dict_x_AOI_AI_project',
                 column_file_y='column_dict_y_AOI_AI_project',models_dir="./model"):
        self.df = df
        self.x = input_col  # 设定的输入的列名組成的list
        self.y = output_col  # 设定的输出的列名組成的list
        self.categorical_columns = cate_col
        self.numerical_columns = num_col
        self.name_by_program = name_by_program
        self.model_name = model_name
        self.obj_name = name_by_program+"_"+obj_name
        self.column_file_x = name_by_program + "_" + column_file_x
        self.column_file_y =  name_by_program + "_" + column_file_y
        self.eva = name_by_program + "_" + "evaluate"
        self.train_loss_acc = name_by_program + "_" + "train_loss_and_acc"
        self.user_id = str(user_id)
        self.models_dir = models_dir+"/"+str(user_id) +"/" + str(model_name)
        if not os.path.isdir(self.models_dir):
            os.makedirs(self.models_dir)
        self.model_exists = False
        self.model_path = self.models_dir + "/" + self.name_by_program
        log_path = self.model_path+".log"
        self.history = MyCallback(log_path = log_path)
        if Path(self.model_path + '.h5').is_file():
            self.model_exists = True

        '''
        def para_check(parameter,para_name):
            if not set(df.columns) >= set(parameter):
                logging.warning("{} error,please check DataFrame and {}".format(para_name,para_name))
        para_check(cate_col,"categorical columns")
        para_check(num_col,"numerical columns")
        para_check(input_col, "input columns")
        para_check(output_col, "output columns")
        '''
    def save_obj(self, obj, name, obj_dir="./obj/"):
        obj_dir+="{}/{}/".format(self.user_id,self.model_name)
        if not os.path.exists(obj_dir):
            os.makedirs(obj_dir)
        with open(obj_dir + name + '.pkl', 'wb') as f:
            pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)  # 将序列化后的对象以二进制形式写入file，第三个参数对应python的版本

    def load_obj(self, name, obj_dir="./obj/"):
        obj_dir+="{}/{}/".format(self.user_id,self.model_name)
        if not os.path.exists(obj_dir):
            os.makedirs(obj_dir)
            return None
        if not os.path.exists(obj_dir + name + '.pkl'):
            return None
        with open(obj_dir + name + '.pkl', 'rb') as f:
            return pickle.load(f)

    def get_dummies_labeldf(self, label_list):  # 傳入一個列名表，將初試DataFrame中在列名表中的列構成的新DataFrame的數據變為只有數字型的DataFrame
        categorical_columns2 = [xx for xx in self.categorical_columns if xx in label_list]
        numerical_columns2 = [xx for xx in self.numerical_columns if xx in label_list]
        dftmp = self.df.copy()
        for key in numerical_columns2:
            dftmp = dftmp[dftmp[key].apply(is_number)] #篩選那些無法強制轉換為數字的數據
        for key in numerical_columns2:
            dftmp[key] = dftmp[key].astype(float)

        for key in categorical_columns2:
            dftmp[key] = dftmp[key].astype(str)
        return pandas.get_dummies(dftmp[numerical_columns2 + categorical_columns2], prefix_sep="__")

    def run_column_getter(self):  # 保存原始表與現表元素映射關係
        getter = {}
        getter['x'] = {}
        getter['y'] = {}
        df_x = self.get_dummies_labeldf(self.x)
        df_y = self.get_dummies_labeldf(self.y)

        for key in self.df[self.x]:
            if key not in getter['x']:
                getter['x'][key] = set([])
            for key2 in df_x:
                if str(key2).rsplit("__", 1)[0] == str(key):
                    getter['x'][key] = getter['x'][key] | set([key2])
        for key in self.df[self.y]:
            if key not in getter['y']:
                getter['y'][key] = set([])
            for key2 in df_y:
                if str(key2).rsplit("__", 1)[0] == str(key):
                    getter['y'][key] = getter['y'][key] | set([key2])  # 通过集合的并运算确保没有重复label
        return getter  # getter形式为：{'x': {'window': {'window_C581', 'window_C86', 'window_FL12'}, ...},'y': {'defect': {'defect_nan'}...}},

    def get_train_data(self):  # 獲取訓練的x和y
        df2 = self.df.copy()
        df2_x = self.get_dummies_labeldf(label_list=self.x)
        df2_y = self.get_dummies_labeldf(label_list=self.y)
        return df2_x, df2_y

    def column_getter(self):  # 获取x 和y的维度,并存在pkl文件中
        record = self.load_obj(self.obj_name)
        name_by_program = self.name_by_program
        if not record:
            record = self.load_obj(self.obj_name)
            if not record:
                record = {}
        if name_by_program not in record :
            record[name_by_program] = {}
            getter = self.run_column_getter()
            record[name_by_program] = [0, 0]
            for key in getter['x']:
                record[name_by_program][0] += len(getter['x'][key])
            for key in getter['y']:
                record[name_by_program][1] += len(getter['y'][key])
            self.save_obj(record, self.obj_name)
        return record[name_by_program]

    def get_len_of_labels(self):  # 訓練數據比實際數據增加部分維度，為了以後會有新類型的數據進來
        xy_record = self.column_getter()
        if set(self.x ) <= set(self.numerical_columns):    
            return xy_record[0]+20, xy_record[1]+10
        return xy_record[0]+5000, xy_record[1]+100

    def column_name_convertor(self, column_name, column_dict={}):  # 將列名與位置一一對應
        if column_name not in column_dict:
            column_num = len(column_dict)
            column_dict[column_name] = column_num
            return column_num, column_dict
        else:
            return column_dict[column_name], column_dict

    def column_name_modifier(self, df, xy): #保存相應的列名位置信息，將列名轉換為數字并返回處理后的DataFrame
        dftmp = df.copy()

        len_of_labels = self.get_len_of_labels()
        if xy == 'x':
            filename = self.column_file_x
            len_of_columns = len_of_labels[0]
        elif xy == 'y':
            filename = self.column_file_y
            len_of_columns = len_of_labels[1]
        else:
            filename = ""
            len_of_columns = 0
            logging.error("error input in xy")

        column_dict = self.load_obj(filename)

        if not column_dict:
            column_dict = {}

        new_column_num = list(df.columns)
        for i in range(len(new_column_num)):
            column_num, column_dict = self.column_name_convertor(column_name=new_column_num[i], column_dict=column_dict)
            new_column_num[i] = column_num
        self.save_obj(column_dict, filename)

        dftmp.columns = new_column_num
        datatank = pandas.DataFrame(columns=range(len_of_columns))

        return pandas.concat([datatank, dftmp]).fillna(0)

    def get_x_column_num(self, column_name): #根據x列名獲取x的列位置信息
        column_dict = self.load_obj(self.column_file_x)
        if column_name not in column_dict:
            return column_name
        return column_dict[column_name]

    def get_y_column_num(self, column_name):
        column_dict = self.load_obj(self.column_file_y)
        if column_name not in column_dict:
            return column_name
        return column_dict[column_name]

    def get_y_column_name_by_num(self,column_num): #根據y的數字編號獲得輸入對應結果，例如數字8 對應color_red,最終獲得red
        column_dict = self.load_obj(self.column_file_y)
        for column_name in column_dict:
            if column_dict[column_name] == column_num:
                result = column_name.rsplit("__",1)[1]
                return result
        return None

    def combine_fdata(self, fdata_x, fdata_y,  ):

        def combine_each_fdata(fdata, xy):
            new_fdata_case = self.column_name_modifier(fdata,  xy)
            old_fdata_case = self.load_obj(self.user_id + "_"+self.name_by_program + '_old_fdata_' + xy)
            fdata_case = pandas.concat([new_fdata_case, old_fdata_case])
            self.save_obj(fdata_case, self.user_id + "_"+self.name_by_program + '_old_fdata_' + xy)
            return fdata_case.copy()

        return combine_each_fdata(fdata_x,  'x'), combine_each_fdata(fdata_y, 'y')

    def create_train_df(self,new_fdata_case, new_pdata_case):
        if new_pdata_case.empty:
            return new_fdata_case

        if new_fdata_case.empty:
            return new_pdata_case

        len_new_fdata_case = len(new_fdata_case)
        len_new_pdata_case = len(new_pdata_case)

        fdata_times = int(1.0 * len_new_pdata_case / len_new_fdata_case)
        pdata_times = ceil(1.0 * len_new_fdata_case / len_new_pdata_case)

        data3 = pandas.concat([new_fdata_case, ] * fdata_times + [new_pdata_case, ] * pdata_times, axis=0)

        return data3.fillna(0)
    """
    模型相關部分
    """
    def get_optimizer(self, optimizer_parm):
        if isinstance (optimizer_parm, str):
            return optimizer_parm
        elif isinstance (optimizer_parm, dict):
            lr = eval(optimizer_parm["lr"])
            if optimizer_parm["type"] == "SGD":
                momentum = eval(optimizer_parm["momentum"])
                decay = eval(optimizer_parm["decay"])
                nesterov = eval(optimizer_parm["nesterov"])
                return optimizers.SGD(lr=lr, momentum=momentum, decay=decay, nesterov=nesterov)
            elif optimizer_parm["type"] == "RMSprop":
                rho = eval(optimizer_parm["rho"])
                epsilon = eval(optimizer_parm["epsilon"])
                decay = eval(optimizer_parm["decay"])
                return optimizers.RMSprop(lr=lr, rho=rho, epsilon=epsilon, decay=decay)
            elif optimizer_parm["type"] == "Adagrad":
                epsilon = eval(optimizer_parm["epsilon"])
                decay = eval(optimizer_parm["decay"])
                return optimizers.Adagrad(lr=lr, epsilon=epsilon, decay=decay)
            elif optimizer_parm["type"] == "Adadelta":
                rho = eval(optimizer_parm["rho"])
                epsilon = eval(optimizer_parm["epsilon"])
                decay = eval(optimizer_parm["decay"])
                return optimizers.Adadelta(lr=lr, rho=rho, epsilon=epsilon, decay=decay)
            elif optimizer_parm["type"] == "Adam":
                beta_1 = eval(optimizer_parm["beta_1"])
                beta_2 = eval(optimizer_parm["beta_2"])
                epsilon = eval(optimizer_parm["epsilon"])
                decay = eval(optimizer_parm["decay"])
                amsgrad = eval(optimizer_parm["amsgrad"])
                return optimizers.Adam(lr=lr, rho=rho, beta_1=beta_1, beta_2=beta_2, decay=decay, amsgrad=amsgrad)
            elif optimizer_parm["type"] == "Adamax":
                beta_1 = eval(optimizer_parm["beta_1"])
                beta_2 = eval(optimizer_parm["beta_2"])
                epsilon = eval(optimizer_parm["epsilon"])
                decay = eval(optimizer_parm["decay"])
                return optimizers.Adamax(lr=lr, rho=rho, beta_1=beta_1, beta_2=beta_2, decay=decay)
            elif optimizer_parm["type"] == "Nadam":
                beta_1 = eval(optimizer_parm["beta_1"])
                beta_2 = eval(optimizer_parm["beta_2"])
                epsilon = eval(optimizer_parm["epsilon"])
                schedule_decay = eval(optimizer_parm["schedule_decay"])
                return optimizers.Adam(lr=lr, rho=rho, beta_1=beta_1, beta_2=beta_2, schedule_decay=schedule_decay)
            else:
                raise Exception("optimizer type error")
        else:
            raise Exception("optimizer parameter error")

    def create_model(self, default_model_number, len_of_x_labels, len_of_y_labels):
        if default_model_number == 1:
            """
            use the 32, 64, and 128  as the number of neurons.
            """
            model = Sequential()
            model.add(BatchNormalization( input_shape=(len_of_x_labels,)))
            model.add(Dense(32, activation='relu'))
            model.add(Dense(64, activation='relu'))
            model.add(Dense(128, activation='relu'))
            model.add(Dropout(0.2))
            model.add(Dense(len_of_y_labels, activation='softmax'))
            model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
        elif default_model_number == 2:
            """
            use the tanh as the activation function.
            """
            model = Sequential()
            model.add(BatchNormalization( input_shape=(len_of_x_labels,)))
            model.add(Dense(128, activation='tanh'))
            model.add(Dense(64, activation='tanh'))
            model.add(Dense(32, activation='tanh'))
            model.add(Dropout(0.2))
            model.add(Dense(len_of_y_labels, activation='softmax'))
            model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
        elif default_model_number == 3:
            """
            use the sigmoid as the activation function.
            """
            model = Sequential()
            model.add(BatchNormalization( input_shape=(len_of_x_labels,)))
            model.add(Dense(128, activation='sigmoid'))
            model.add(Dense(64, activation='sigmoid'))
            model.add(Dense(32, activation='sigmoid'))
            model.add(Dropout(0.2))
            model.add(Dense(len_of_y_labels, activation='softmax'))
            model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
        elif default_model_number == 4:
            """
            use the relu, tanh, sigmoid as the activation function.
            """
            model = Sequential()
            model.add(BatchNormalization( input_shape=(len_of_x_labels,)))
            model.add(Dense(128, activation='relu'))
            model.add(Dense(64, activation='tanh'))
            model.add(Dense(32, activation='sigmoid'))
            model.add(Dropout(0.2))
            model.add(Dense(len_of_y_labels, activation='softmax'))
            model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
        elif default_model_number == 5:
            """
            reduce a hidden layer.
            """
            model = Sequential()
            model.add(BatchNormalization( input_shape=(len_of_x_labels,)))
            model.add(Dense(64, activation='relu'))
            model.add(Dense(32, activation='relu'))
            model.add(Dropout(0.2))
            model.add(Dense(len_of_y_labels, activation='softmax'))
            model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
        elif default_model_number == 6:
            """
            add Dropout layer after every fully-connected layer.
            """
            model = Sequential()
            model.add(BatchNormalization( input_shape=(len_of_x_labels,)))
            model.add(Dense(128, activation='relu'))
            model.add(Dropout(0.2))
            model.add(Dense(64, activation='relu'))
            model.add(Dropout(0.2))
            model.add(Dense(32, activation='relu'))
            model.add(Dropout(0.2))
            model.add(Dense(len_of_y_labels, activation='softmax'))
            model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
        elif default_model_number == 7:
            """
            use sgd as the optimizer
            """
            model = Sequential()
            model.add(BatchNormalization( input_shape=(len_of_x_labels,)))
            model.add(Dense(128, activation='relu'))
            model.add(Dense(64, activation='relu'))
            model.add(Dense(32, activation='relu'))
            model.add(Dropout(0.2))
            model.add(Dense(len_of_y_labels, activation='softmax'))
            model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])

        else:
            raise Exception("model parameter error, you iput an integer which not belong to correct serial number")
        return model  
            
        
        
    def load(self, model_parm = None):
        #KTF.set_session(tf.Session(config = tf.ConfigProto(device_count={"CPU":1})))
        len_of_x_labels, len_of_y_labels = self.get_len_of_labels()
        if len_of_x_labels>10000 or len_of_y_labels>500:
            return None
        if Path(self.model_path + '.h5').is_file():
            model = load_model(self.model_path + '.h5')
            self.model_exists = True
        elif model_parm == None:
            model = Sequential()
            model.add(BatchNormalization( input_shape=(len_of_x_labels,)))
            model.add(Dense(128, activation='relu'))
            model.add(Dense(64, activation='relu'))
            model.add(Dense(32, activation='relu'))
            model.add(Dropout(0.2))
            model.add(Dense(len_of_y_labels, activation='softmax'))
            model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
        elif isinstance(model_parm, int):
            model = self.create_model(model_parm, len_of_x_labels, len_of_y_labels)
        else:
            model = Sequential()
            model.add(Dense(16, input_dim = len_of_x_labels))
            for key in model_parm["hierarchy"]:
                if model_parm["hierarchy"][key]["name"] == "Dense":
                    model.add(Dense(int(model_parm["hierarchy"][key]["num"]), activation = model_parm["hierarchy"][key]["act"]))
                else:
                    model.add(Dropout(float(model_parm["hierarchy"][key]["num"])))
            model.add(Dense(len_of_y_labels, activation='softmax'))
            optimizer = self.get_optimizer(model_parm["optimizer"])
            model.compile(optimizer=optimizer, loss = "categorical_crossentropy", metrics=['accuracy'])

        return model

    def train(self,train_dfx, train_dfy,epochs = 5, model_parm = None, validation_split = 0.2):
        core = cf.getint("cpu_core", "core")
        KTF.set_session(tf.Session(config = tf.ConfigProto( intra_op_parallelism_threads=core, inter_op_parallelism_threads=core)))
        model = self.load(model_parm = model_parm)
        model_check = ModelCheckpoint(self.model_path+".h5", monitor='val_loss', save_best_only=True)
        #plot_model(model,to_file = 'model.png', show_shapes=True)
        if model == None:
            return None
        csv_path = self.model_path + ".log"
        csv_logger = CSVLogger(csv_path)
        model.fit(x=train_dfx,
                  y=train_dfy,
                  batch_size=32,
                  epochs=epochs,
                  #callbacks = [csv_logger],
                  callbacks = [self.history, model_check],
                  verbose=0,
                  validation_split = validation_split,
                  validation_data=None,
                  shuffle=True,
                  initial_epoch=0)

        loss, acc = model.evaluate(x=train_dfx,
                                          y=train_dfy,
                                          batch_size=8,
                                          verbose=0)
        model.save(self.model_path+".h5")
        train_result = {"loss":loss, "accuracy":acc}
        self.save_obj(obj = train_result, name = self.train_loss_acc)
        return "successfully"

    def predict(self, test_df_x):
        start_time = time.time()
        model = self.load()
        predlist = model.predict(test_df_x, batch_size = 8, verbose = 1)
        predlist = pandas.DataFrame(predlist).idxmax(axis=1)
        print (predlist,"\n", type(predlist))
        if self.model_exists == False:
            return None

        predlist = predlist.apply(self.get_y_column_name_by_num)
        return predlist

    def predict2(self, test_df_x):
        model = self.load()
        if self.model_exists == False:
            return None
        predlist = model.predict(test_df_x, batch_size = 8, verbose=1)
        return predlist

    def evaluate(self,eval_df_x, eval_df_y):
        model = self.load()
        if self.model_exists == False:
            return None
        eva = model.evaluate(eval_df_x,eval_df_y,  batch_size = 8, verbose=1,sample_weight = None)
        result = {"loss":eva[0], "accuracy":eva[1]}
        self.save_obj(result, self.eva)
        return eva
