import algo.ModelTrainMange as modelTrains
from ossutil import OSSClient
import time
import pickle
from dbtool import Dao
import os
from loaddata import LoadData
import config
import logging
import errorcode
import json
import fasttext
from sklearn.cluster import KMeans
# from apriori import apriori
import pandas as pd
import numpy as np
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima_model import ARIMA
import statsmodels.api as sm
from detectutils import detectutils
from scputil import SCPClient
from PIL import Image
import zipfile
import h_to_pb

def train_model(self, task_info):
    dao = Dao()
    data_inf = None
    try:
        sql = 'select bucket_dir,file_type from {material_info} where id = {id}'.format(material_info=config.material_table, id=task_info.get_calibration_id())
        data_inf = dao.query_data(sql=sql)
        data_inf = data_inf.reset_index(drop=True)
        data_inf = data_inf.iloc[0, :]
    except Exception as e:
        logging.info(errorcode.Error_Read_Data_1001 + ':' + str(e), exc_info=True)
        error_code_msg = '1001' + ':' + errorcode.Error_Read_Data_1001
        raise Exception(error_code_msg)
    # oss_client = OSSClient(app_domain=config.app_domain,
    #                        auth_domain=config.auth_domain,
    #                        app_id=config.app_id,
    #                        app_key=config.app_key,
    #                        bucket=data_inf['bucket_dir'])
    scp_client = SCPClient(ips=config.ips,
                           port=config.port,
                           user=config.user,
                           password=config.password,
                           bucket=data_inf['bucket_dir'])
    type = data_inf['file_type']
    # 初始化日志表
    log_id = int(time.time()* 10000000)
    log_dict = {'id': str(log_id), 'task_id': task_info.get_task_id()}
    dao.insert_from_dict(config.training_log_table, log_dict)

    # 写入项目进度
    project_update_limit = {'id': task_info.get_project_id()}
    project_update_value = {'progress_rate': 'train'}
    dao.update_from_dict(config.project_table, project_update_value, project_update_limit)

    # 跑模型
    msg_response = create_training_result(dao, task_info, scp_client)
    if msg_response['code'] == 200:
        task_status = 'complete'
        project_status = "unpublish"
        progress_rate = "publish"
    else:
        task_status = 'error'
        project_status = "unpublish"
        progress_rate = "trainerr"
    task_id = task_info.get_task_id()
    task_update_limit = {'id': task_id}  # 查询条件
    # 写入任务状态
    time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    task_update_dic = { 'status': task_status, 'error_information': json.dumps(msg_response, ensure_ascii=False)}
    dao.update_from_dict(config.training_task_table, task_update_dic, task_update_limit)

    # 写入项目进度和状态
    project_update_limit = {'id': task_info.get_project_id()}
    project_update_value = {'progress_rate': progress_rate, "status": project_status}
    dao.update_from_dict(config.project_table, project_update_value, project_update_limit)

    # 更新日志信息
    if(config.scp_use==1):
        scp_client.upload(data_inf['bucket_dir']+'/log/'+task_info.get_log_name(), task_info.get_log_path())
    log_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    update_log_dict = {'log_time': log_time, 'log_name': task_info.get_log_name()}
    update_log_limit = {'id':log_id}
    dao.update_from_dict(config.training_log_table, update_log_dict,update_log_limit)
    # 设置任务的队列状态
    task_info.set_state(3)
    logging.info(errorcode.Model_Train_0)


def create_training_result(dao, task_info, scp_client):
    create_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    # 获取基本信息
    msg_response = dict()
    msg_response['code'] = 200
    msg_response['msg'] = '训练成功'
    task_id = task_info.get_task_id()
    calibration_set_id = task_info.get_calibration_id()

    try:
        data_info = LoadData(id = int(calibration_set_id), task_id = task_id).load_data()
        logging.info(data_info)
    except Exception as e:
        try: #数据获取可以捕获的错误
            error_code_msg = str(e).split(':')
            msg_response['code'] = int(error_code_msg[0])
            msg_response['msg'] = error_code_msg[1]
            logging.error(errorcode.Error_Read_Data_1005 + ':' + str(e), exc_info=True)
            return msg_response
        except: #数据获取未知的错误
            msg_response['code'] = 1005
            msg_response['msg'] = errorcode.Error_Read_Data_1005 + ':' + str(e)
            logging.error(errorcode.Error_Read_Data_1005 + ':' + str(e), exc_info=True)
            return msg_response

    this_id = int(time.time()* 10000000)
    project_id = int(task_info.get_project_id())
    class_type = data_info['model_type'] # 0：分类，1：预测
    arithmetic = data_info['arithmetic']  # 模型类型:tree，逻辑回归等
    creater = data_info['creater']
    # class_num = data_info['class_num']
    map_result = data_info['map_label']  # label的映射
    var_name_result = data_info['var_name']
    classifier = json.dumps(map_result, ensure_ascii=False)
    lable_map = dict(dict(map_result))
    classifier_task = json.dumps(var_name_result, ensure_ascii=False)
    parameter = data_info['parameter']
    if not parameter:
        model_params = {}
    else:
        model_params = json.loads(parameter)

    # 训练测试集  分类的
    if class_type == '1001' or class_type == '1002' or class_type == '1006' \
            or class_type == '1007' or class_type == '1008':
        x_train = data_info['x_train']
        y_train = data_info['y_train']
        x_test = data_info['x_test']
        y_test = data_info['y_test']
    elif class_type in ['1009','1010']:
        x_train = data_info['x_train']
        x_test = data_info['x_test']
    else:
        # 聚类和apriori只用x_train
        x_train = data_info['x_train']
        y_train = ''
        x_test = ''
        y_test = ''
    logging.info(x_train)
    # 加载完成数据后，同时更新task的status与写入task(模型训练)所需大概时间
    # model_cost_time = get_model_time(arithmetic, len(x_train), map_result)
    running_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    #将classifier插入task表
    task_update_classifier = {'algorithm_name':arithmetic, 'type':class_type,
                              'status': 'running', 'create_time':running_time}
    task_update_limit = {'id': task_id}
    dao.update_from_dict(config.training_task_table, task_update_classifier, task_update_limit)

    file_name = '' # 聚类与关联规则算法的输出结果
    center = ''
    model_file_name = ''
    logistic_formula = ''
    model = None
    metrics_dict = {}
    center_result = []
    modelTrain = modelTrains.ModelTrainMange(model_type=class_type, model_name=arithmetic, model_params=model_params)
    try:
        if class_type == '1001' or class_type == '1002' or class_type == '1006' \
                or class_type == '1007' or class_type == '1008':
            modelTrain.train((x_train, y_train))
            y_pred = modelTrain.predict(x_test)
            y_train_predict = modelTrain.predict(x_train)
            test_metrics_dict = modelTrain.evaluate(y_test, y_pred)
            train_metrics_dict = modelTrain.evaluate(y_train, y_train_predict)
            metrics_dict = {'train':train_metrics_dict,'test':test_metrics_dict}
            if class_type != '1007' and class_type != '1008':
                # 如果是回归算法，公式写入字段logistic_formula
                logistic_formula = modelTrain.logistic_formula(y_train, map_result)
        elif class_type == '1003':
            cluster = modelTrain.train(x_train)
        elif class_type == '1004':
            apriori_result=modelTrain.train(x_train)
            apriori_dict = format_relate_file(apriori_result)
            apriori_df = pd.DataFrame(apriori_dict)
            for i in range(len(apriori_df)):
                temp_dict = apriori_df.iloc[i,].to_dict()

        elif class_type == '1005':
            stander_predict=modelTrain.train(x_train)
            x_train = x_train[stander_predict.index]
            train_metrics_dict = modelTrain.evaluate(x_train, stander_predict)
            metrics_dict = {'train': train_metrics_dict}
        # elif class_type == '1009':
        #     mid_path = data_info['mid_path']
        #     ##xml_path用于训练检测模型，train_data_list,test_data_list用于评估模型分数
        #     xml_path, train_data_df, test_data_df = detectutils.write_xml(mid_path, x_train, x_test)
        #     ##构建分类模型的数据集
        #     shot_image_df = detectutils.shot_image_data(mid_path, x_train, x_test)
        #     shot_image_df['img_array'] = list(map(lambda x: np.asarray(Image.open(x).resize((125, 125), Image.BILINEAR)), shot_image_df['file_path']))
        #     x_tarin_list = list(shot_image_df['img_array'])
        #     y_tarin_list = list(shot_image_df['lable'])
        #     ##输入截图的array和lable，还有xml文件的途径进行训练
        #     modelTrain.train((x_tarin_list,y_tarin_list,xml_path))
        #     ##返回预测结果predict_result，和评估用的train_data
        #     train_data, predict_result = modelTrain.predict(train_data_df)
        #     test_data, predict_result = modelTrain.predict(test_data_df)
        #     train_metrics_dict  = modelTrain.evaluate(train_data, 1)
        #     test_metrics_dict = modelTrain.evaluate(test_data, 1)
        #     metrics_dict = {'train':train_metrics_dict,'test':test_metrics_dict}
        elif class_type == '1010':
            mid_path = data_info['mid_path']
            xml_path, train_data_df, test_data_df = detectutils.transform_write_xml(mid_path, x_train, x_test)
            logging.info("--------------------输入数据--------------")
            logging.info(map_result)
            logging.info(xml_path)
            modelTrain.train((xml_path,map_result))
            logging.info("--------------------预测数据--------------")
            logging.info(train_data_df)
            train_data = modelTrain.predict((train_data_df,'evaluate'))
            test_data = modelTrain.predict((test_data_df,'evaluate'))
            logging.info("--------------------结果数据集--------------")
            logging.info(train_data)
            train_metrics_dict  = modelTrain.evaluate(train_data, 1)
            test_metrics_dict = modelTrain.evaluate(test_data, 1)
            metrics_dict = {'train':train_metrics_dict,'test':test_metrics_dict}




    except Exception as e:
        logging.error(errorcode.Model_Train_3001 + ':' + str(e), exc_info=True)
        msg_response['code'] = 3001
        msg_response['msg'] = errorcode.Model_Train_3001
        return msg_response

    model = modelTrain.get_model().get_model()
    # 训练成功：上传模型到oss，同时在training_model写入记录
    try:
        model_status = 'unpublish'
        version = manage_version(dao, project_id) # 版本
        project_name_sql = 'select project_name from %s where id=%d' % (config.project_table, project_id)
        project_name_set = dao.get_sql_list(sql=project_name_sql)
        model_name = project_name_set[0][0] + version
        # 模型上传oss
        if class_type == '1001' or class_type == '1002' or class_type == '1005' or class_type == '1007' or class_type == '1008' or class_type == '1009':
            model_file_name = str(project_id) + str(task_id) + '.zip'  # 模型名字
            model_temp_name = str(project_id) + str(task_id) + '.sav'
            model_temp_name = os.path.join(os.getcwd(), 'model', model_temp_name)
            txt_name = str(project_id) + str(task_id) + '.txt'
            txt_name = os.path.join(os.getcwd(), 'model', txt_name)
            if(config.scp_use==0):
                model_dir = os.path.join(scp_client.get_bucket(), 'model')
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                model_path = os.path.join(model_dir, model_file_name)
            else:
                model_path = os.path.join(os.getcwd(), 'model', model_file_name)
            #写入label_map和模型文件，压缩到zip包
            pickle.dump(model, open(model_temp_name, 'wb'))
            with open(txt_name, 'w', encoding="utf-8") as fp:
                map_result['arithmetic'] = arithmetic
                fp.write(json.dumps(map_result,ensure_ascii=False))
            file_names = [txt_name,model_temp_name]
            with zipfile.ZipFile(model_path, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
                for fn in file_names:
                    parent_path, name = os.path.split(fn)
                    zf.write(fn, arcname=name)
            if(config.scp_use==1):
                scp_client.upload(scp_client.get_bucket()+'/model/'+model_file_name, model_path)
        elif class_type == '1006':    #文本分类直接使用fasttext的save_model
            model_file_name = str(project_id) + str(task_id) + '.zip'  # 模型名字
            model_temp_name = str(project_id) + str(task_id) + '.ftz'  # 模型名字
            model_temp_name = os.path.join(os.getcwd(), 'model', model_temp_name)
            txt_name = str(project_id) + str(task_id) + '.txt'
            txt_name = os.path.join(os.getcwd(), 'model', txt_name)
            if(config.scp_use==0):
                model_dir = os.path.join(scp_client.get_bucket(), 'model')
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                model_path = os.path.join(model_dir, model_file_name)
            else:
                model_path = os.path.join(os.getcwd(), 'model', model_file_name)
            # fasttext类方法，直接有save_model，这里的model是<fasttext.FastText._FastText at 0x1ecb51a2370>无法pickle
            model.quantize()
            model.save_model(model_temp_name)   # fasttext.load_model()进行读取
            with open(txt_name, 'w') as fp:
                map_result['arithmetic'] = arithmetic
                fp.write(json.dumps(map_result,ensure_ascii=False))
            file_names = [txt_name,model_temp_name]
            with zipfile.ZipFile(model_path, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
                for fn in file_names:
                    parent_path, name = os.path.split(fn)
                    zf.write(fn, arcname=name)
            if (config.scp_use == 1):
                scp_client.upload(scp_client.get_bucket() + '/model/' + model_file_name, model_path)
        elif class_type == '1010':
            model_file_name = str(project_id) + str(task_id) + '.zip'
            model_path = None
            if (config.scp_use == 0):
                model_dir = os.path.join(scp_client.get_bucket(), 'model')
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                model_path = os.path.join(model_dir, model_file_name)
            else:
                model_path = os.path.join(os.getcwd(), 'model', model_file_name)

            pb_model_path = h_to_pb.h_to_pb(model['json'],model['model_path'])
            detectutils.write_json(lable_map, model['json'])
            file_names = [model['model_path'], model['json'],pb_model_path]

            with zipfile.ZipFile(model_path, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
                for fn in file_names:
                    parent_path, name = os.path.split(fn)
                    zf.write(fn, arcname=name)
            if (config.scp_use == 1):
                scp_client.upload(scp_client.get_bucket() + '/model/' + model_file_name, model_path)
        elif class_type == '1003':
            file_name = str(project_id) + str(task_id) + '.csv'
            model_path = None
            if (config.scp_use == 0):
                model_dir = os.path.join(scp_client.get_bucket(), 'model')
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                model_path = os.path.join(model_dir, file_name)
            else:
                model_path = os.path.join(os.getcwd(), 'model', file_name)
            feature_name = map_result['feature_name']
            df_cluster = pd.DataFrame(x_train, columns=feature_name)
            df_cluster['label'] = cluster
            df_cluster['label'] = df_cluster['label'].astype('str')
            df_cluster.to_csv(model_path,index=False, encoding='utf_8_sig')
            metrics_dict['file_path'] = model_path
            if(config.scp_use==0):
                metrics_dict['file_path'] = model_path
            else:
                scp_client.upload(scp_client.get_bucket() + '/model/'+file_name, model_path)
                metrics_dict['file_path'] = scp_client.get_bucket() + '/model/' + file_name
            center_list = model.cluster_centers_.tolist()
            # 格式化输出结果

            center_dict = {}
            center_dict['type'] = 'sum'
            nums_dict = dict(df_cluster['label'].value_counts())
            for key, values in nums_dict.items():
                nums_dict[key] = int(nums_dict[key])
            center_dict['data'] = nums_dict

            center_result.append(center_dict)
            for index in range(len(feature_name)):
                center_dict = {}
                center_dict['type'] = 'center'
                center_dict['feature'] = feature_name[index]
                center_dict['data'] = {}
                for lable_index in range(len(center_list)):
                    center_dict['data'][str(lable_index)] = round(center_list[lable_index][index],3)
                center_result.append(center_dict)

        elif class_type == '1004':
            file_name = str(project_id) + str(task_id) + '.csv'
            model_path = None
            if (config.scp_use == 0):
                model_dir = os.path.join(scp_client.get_bucket(), 'model')
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                model_path = os.path.join(model_dir, file_name)
            else:
                model_path = os.path.join(os.getcwd(), 'model', file_name)
            apriori_result = list(apriori_result)
            apriori_dict = format_relate_file(apriori_result)
            apriori_df = pd.DataFrame(apriori_dict)
            apriori_df['model_id'] = [this_id for n in apriori_dict['main_word']]
            # dao.insert_data(apriori_df,'apriori_result')
            # 输出的文件内容不要model_id
            apriori_df.drop(columns=['model_id'],inplace=True)
            apriori_df.to_csv(model_path, index=False, encoding='utf_8_sig')

            if (config.scp_use==0):
                metrics_dict['file_path'] = model_path
            else:
                scp_client.upload(scp_client.get_bucket() + '/model/' + file_name, model_path)
                metrics_dict['file_path'] = scp_client.get_bucket() + '/model/' + file_name

        logging.info(errorcode.Model_Up_0)
        # 模型写入数据库
        end_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        model_dict = {'id': str(this_id), 'model_name': model_name, 'project_id': str(project_id),
                      'version': version, 'model_dir': scp_client.get_bucket()+'/model/'+model_file_name, 'create_time':create_time,
                      'task_id': str(task_id), 'creater': creater, 'status':model_status,
                      'evaluate':json.dumps(metrics_dict, ensure_ascii=False), 'classifier':classifier,
                      'end_time':end_time, 'arithmetic':arithmetic,'logistic_formula':logistic_formula,
                      'predict_file': scp_client.get_bucket()+'/model/'+file_name, 'center': json.dumps(center_result)}
        dao.insert_from_dict(config.training_model_table, model_dict)
    except Exception as e:
        logging.error(errorcode.Model_Train_3002 + ':' + str(e), exc_info=True)
        msg_response['code'] = 3002
        msg_response['msg'] = errorcode.Model_Train_3002
        return msg_response
    return msg_response


# 版本管理
def manage_version(dao, project_id):
    version = 'V1.0'  # 版本
    # 获取该项目的最大版本
    version_sql = 'select version from %s where project_id=%d' % (config.training_model_table, project_id)
    result = dao.get_sql_list(version_sql)
    if len(result) != 0:
        exit_version = []
        for i in range(len(result)):
            this_version = result[i][0].split('V')[1]
            exit_version.append(float(this_version))
        version = 'V' + str(max(exit_version) + 1)
    return version


# 获取模型训练时间
def get_model_time(model_name, sample_nums, map_result):
    feature_nums = 1
    # 时间序列算法map_result 是None
    if map_result:
        feature_nums = len(map_result['feature_name'])
    #m 是样本数量，n是特征数量
    sample_nums = sample_nums /10000
    time_weight = {'逻辑回归':0.03,'随机森林':0.1,'lgb':0.16,
                   'xgboost':0.6,'svm':6,'神经网络':6, '线性回归':0.05,
                   'svr': 6,'gbdt':0.16,'KMeans':0.29, 'apriori':0.01, 'ARIMA':0.1}

    weight = time_weight[model_name]
    if model_name == 'svm':
        model_cost_time = weight * pow(sample_nums,2) * feature_nums
    elif model_name == 'KMeans':
        model_cost_time = weight * sample_nums
    elif model_name == 'apriori':
        model_cost_time = 1
    elif model_name == 'ARIMA':
        model_cost_time = 1
    else:
        model_cost_time = weight * sample_nums * feature_nums
    model_cost_time += 1# 避免数据太小为0的情况
    return int(model_cost_time)


# 格式化关联规则算法输出
def format_relate_file(result):
    main_word = []
    second_word = []
    support_list = []
    confidence_list = []
    lift_list = []
    for rule in result:
        if len(rule[0]) < 2:
            continue
        support = rule[1]
        order_list = rule[2]
        for i in range(1, len(order_list)):
            single_main_word = str(list(order_list[i][0]))
            single_second_word = str(list(order_list[i][1]))
            main_word.append(single_main_word)
            second_word.append(single_second_word)
            support_list.append(round(support,3))
            confidence_list.append(round(order_list[i][2],3))
            lift_list.append(round(order_list[i][3],3))
    data={'main_word': main_word, 'second_word': second_word, 'support': support_list,
                            'confidence': confidence_list, 'lift': lift_list}

    return data


def arima_pre_recovery(pre, diff_data, step=1):
    """
    功能：还原arima预测结果
    :param pre: pd.Series, 预测值
    :param diff_data: list, 如[origin, t1] origin:原始数据, t1:第一次差分结果
    :param step: 差分步长, 默认为1
    :return: rec
    """
    rec = pre  # 初始化还原结果
    diff_data.reverse()
    for d in diff_data: # 逐阶还原
        d_shift = d.shift(step)
        rec = rec.add(d_shift)
    rec = rec.dropna()
    return rec