#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2018/4/10 9:14
# @Author  : Devin
# @Site    : 
# @File    : train_test.py
# @Software: PyCharm Edu

"""
V 0.0.1.20180604
"""
import os
if not os.path.isdir("./log"):
    os.makedirs("./log")
import logging
logging.basicConfig(filename='./log/error.log', format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',  
                    datefmt='%a, %d %b %Y %H:%M:%S', level=logging.INFO)
import pandas
import cassandra
from keras import backend as K
from cassandra import *
from cassandra.cluster import Cluster
import json
import sys
import shutil
import requests
import configparser
import gc

sys.path.append("./gen-py")

from train_or_test import Train_or_Evaluate
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TProcessPoolServer, TNonblockingServer
from thrift.server import TServer
from DataProcessing import get_name_by_program, split_df, get_df_from_cassandra
from model import Data
from datetime import datetime
from db import Redis_Operation
import asyncio

cf = configparser.ConfigParser()
cf.read("conf/train.conf")
timeID = datetime.now()
def save_log( data, log_path = "./log/train.log"):
    data = str(data)
    with open (log_path,"a") as f:
        f.write("timeID: {}\n".format(timeID))
        f.write("{}: {}\n".format(datetime.now(), data))
        
def train_or_test(x, y ,cate_col, num_col ,class_list ,user_id ,df,model_name,model_parm = None, methods = "train" ):

    df_2, name_list = get_name_by_program(df,class_list)
    df_dict = split_df(df_2, name_list)
    i = 0
    result = {}
    for name_by_program in df_dict.keys():
        save_log(name_by_program)
        result[name_by_program] = {}
        def cate_col_legal(cate_col = cate_col, df = df_dict[name_by_program]):
            for cate_col_name in cate_col:
                if len(df)<100:
                    continue
                df1 = df.sample(n=100)
                cate_col_set = set(df1[cate_col_name])
                if len(cate_col_set)>90:
                    return False
            return True
        if not cate_col_legal():
            save_log("Too many character data")
            return ("Too many character data")
            
        data = Data(df_dict[name_by_program], x, y, cate_col, num_col, name_by_program, user_id,model_name)
        train_x, train_y = data.get_train_data()

        if train_x.empty:
            continue

        try:
            train_dfx_1 = data.column_name_modifier(train_x, 'x').reset_index(drop=True)#將列名轉換為數字
            train_dfy_1 = data.column_name_modifier(train_y,  'y').reset_index(drop=True)
            train_dfx = train_dfx_1.sample(frac=1)  #将x随机打乱顺序
            train_dfy = train_dfy_1.iloc[train_dfx.index] #每一个y与相应x对应
            train_x_dims, train_y_dims = data.get_len_of_labels()
        except Exception as e:
            logging.error ("data process error", exc_info=True)
            return ("error ")

        if train_dfx.shape[1]!=train_x_dims or train_dfy.shape[1]!=train_y_dims:
            logging.error("輸入的數據與程式名要求的格式不相符")
        if methods == "train":
            try:
                train_result = data.train(train_dfx, train_dfy, model_parm = model_parm)
                if train_result == None:
                    return("the labels of x and y must less than 500")
            except Exception :
                logging.error("train model failed",exc_info=True)
                return("error")
            finally:
                K.clear_session()
        else :
            try:
                loss, acc = data.evaluate(train_dfx_1, train_dfy_1)
                result[name_by_program]["loss"] = loss
                result[name_by_program]["accuracy"] = acc
            except Exception as e:
                logging.error("evaluate model failed",exc_info=True)
                return ("error ")
            finally:
                K.clear_session()
        del data, train_dfx_1, train_dfy_1, train_dfx, train_dfy, train_x, train_y
    del df ,df_2, df_dict
    gc.collect()
    if methods == "train":
        return "successfully"
    else :
        return str(result)


class Train_or_evaluate:
    def __init__(self, model_root_path="./model"):
        self.in_training = False
        self.model_root_path = model_root_path
    def train_or_evaluate(self, json_data):
        self.in_training = True
        save_log("enter train or evaluate function")
        load_dict = json.loads(json_data)
        try:
            x = load_dict["x"]
            y = load_dict["y"]
            cate_col = load_dict["cate_col"]
            num_col = load_dict["num_col"]
            class_list = load_dict["class"]
            user_id = load_dict["user_id"]
            model_name = user_id
            methods = load_dict["methods"]
            customize = load_dict["customize"]
        except Exception:
            logging.error("error json in Train_or_ecaluate funtion", exc_info=True)
            self.in_training = False
            return "KeyError"
        data = {"user_id": user_id,
                "model_name" : model_name
        }
        save_log("user_id: {}".format(user_id))
        def send_failed():
            gc.collect()
            data["status"] = "rejected"
            requests.post(train_result_url, data=data)

        r = Redis_Operation()
        df = pandas.read_json(r.get_hash_data("train_data", model_name))
        
        def model_train(customize):
            result = []
            if customize == "y":
                model_parm = self.get_argument("model_parm")
                result.append(train_or_test(x, y ,cate_col, num_col ,class_list ,user_id ,df,model_name, model_parm = model_parm, methods = methods ))
            else:
                result.append(train_or_test(x, y ,cate_col, num_col ,class_list ,user_id ,df,model_name,  methods = methods ))
            self.in_training = False
            return result
        result = model_train(customize)
        if methods =="train":
            data["status"] = "furfilled" if set(result) >= set(["successfully"]) else "rejected"
            tornado_host, tornado_port = cf.get("tornado_address", "host"), cf.get("tornado_address", "port")
            res = requests.post("http://{}:{}/worker".format(tornado_host, tornado_port), data=data)
        del df, data
        gc.collect()
        print("train model success")
        return (str(result))

    def test(self):
        save_log("testing")

    def get_train_status(self):
        return "yes" if self.in_training else "no"
    def get_model_name(self,user_id):
        save_log("user_id: {}".format(user_id))
        save_log("enter get_model_name function")
        result = {}
        root_path = os.path.join(self.model_root_path, user_id)
        if not os.path.isdir(root_path):
            return ""
        for model_name in os.listdir(root_path):
            model_list = []
            if os.path.isdir(os.path.join(root_path,model_name)):
                for real_model_name in os.listdir(os.path.join(root_path,model_name)):
                    if real_model_name.rsplit(".",1)[1] == "h5":
                        model_list .append(real_model_name.rsplit(".",1)[0])
                result[model_name] = model_list
            
        return json.dumps(result)

    def get_acc_and_loss(self, user_id,model_name):
        save_log("user_id: {}".format(user_id))
        save_log("enter get_acc_and_loss function")
        result = {}
        root_path = os.path.join(self.model_root_path, user_id, model_name)
        if not os.path.isdir(root_path):
            return "Null"
        for acc_and_loss_log in os.listdir(root_path):
            if acc_and_loss_log.rsplit(".",1)[1] == "log":
                key = acc_and_loss_log.rsplit(".",1)[0]
                try:
                    df = pandas.read_csv(root_path+acc_and_loss_log)
                except pandas.errors.EmptyDataError as e:
                    logging.error("log file is empty", exc_info = True)
                    continue
                zero_list = []
                for i in range(len(df)):
                    if df["batch"][i] == "0" or df["batch"][i]==0:
                        zero_list.append(i)
                if len(zero_list)==0:
                    save_log("no log")
                    continue
                if zero_list[(len(zero_list)-1)]!= (len(df)-1):
                    zero_list.append(len(df)-1)
                acc_list = []
                loss_list = []
                for j in range(len(zero_list)-1):
                    acc_list.append([])
                    loss_list.append([])
                    for k in range(zero_list[j],zero_list[j+1]):
                        acc_list[j].append(df["acc"][k])
                        loss_list[j].append(df["loss"][k])
                last_dict = {"acc":acc_list, "loss":loss_list}
                result[key] = last_dict
        return json.dumps(result)    
        
    def del_model(self, user_id, model_name):
        save_log("user_id: {}".format(user_id))
        save_log("enter del_model function")
        root_path = os.path.join(self.model_root_path, user_id)
        obj_root_path = "./obj/{}".format(user_id)
        if not os.path.isdir(root_path):
            return "the user is not exists"
        model_path = os.path.join(root_path,model_name)
        obj_path = os.path.join(obj_root_path,model_name)
        try:
            # shutil.rmtree(model_path)  #this just for the version position user aythentication
            # shutil.rmtree(obj_path)
            shutil.rmtree(root_path)
            shutil.rmtree(obj_root_path)
            return "successfully"
        except FileNotFoundError:
            return "successfully"
        except Exception as e:
            logging.error("delete model failed" ,exc_info = True)
            return "delete model failed"

    def get_model_path(self, model_name):
        model_path = os.path.join(self.model_root_path, model_name, model_name, "default.h5")
        model_abspath = os.path.abspath(model_path)
        return model_abspath

    def stop_training(self):
        K.clear_session()
        return "successfully"
        
if __name__ == "__main__":

    handler = Train_or_evaluate()
    processor = Train_or_Evaluate.Processor(handler)
    transport = TSocket.TServerSocket(host=cf.get("thrift_address","host"), port=cf.getint("thrift_address","port"))
    tfactory = TTransport.TBufferedTransportFactory()
    pfactory = TBinaryProtocol.TBinaryProtocolFactory()

    #server = TNonblockingServer.TNonblockingServer(processor, transport, tfactory, pfactory)
    server = TServer.TThreadPoolServer(processor, transport, tfactory, pfactory)
    print ("Starting AI main server ")
    logging.info("Starting AI mian server")
    server.serve()
    save_log ("Done")
