# -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 15:07:32 2020

@author: hhao
"""
import shutil
from pyAudioAnalysis import audioBasicIO
from sklearn.model_selection import train_test_split

from scputil import SCPClient

"""
功能：读取参数、数据、返回训练集、预测集
"""
import codecs
import chardet
import time
from tkinter import *
from tkinter.filedialog import askdirectory
from tkinter import messagebox
import dbtool
import re
import pandas as pd
from ossutil import OSSClient
from pandas.api.types import is_string_dtype, is_numeric_dtype, is_datetime64_dtype
import logging
import errorcode
import config
import numpy as np
import json
import random
import featureextraction
import dataupload
from lib.dateutil import transform_to_days, which_is_datetime, is_delta_my
import jieba
import zipfile
from PIL import Image
from pyAudioAnalysis import MidTermFeatures as aF
from pyAudioAnalysis import audioTrainTest as aT
from os import rename
from sklearn import model_selection
import shutil,os

class LoadData:
    def __init__(self, id=None, file_name=None, file_size=None, task_id=None):
        self.id = id
        self.file_name = file_name
        self.file_size = file_size
        self.task_id = task_id

    """
    读取本地数据
    """

    @staticmethod
    def get_local_data(file, encoding=None, sep=None, sheet=None, model_type=None):

        ##定义解压zip文件函数
        # def unzip_file(zip_src, dst_dir):
        #     fz = zipfile.ZipFile(zip_src, 'r')
        #     fz.extractall(path=dst_dir)
        #     fz.close()
        def unzip_file(zip_src, dst_dir):   #修复解压乱码
            with zipfile.ZipFile(zip_src, 'r') as fd:
                for i in fd.namelist():
                    try:
                        gbkfilename = i.encode('cp437').decode('GBK')
                        fd.extract(i, dst_dir)
                        rename(''.join([dst_dir + '/', i]), ''.join([dst_dir + '/', gbkfilename]))
                    except Exception as e:
                        fd.extract(i, dst_dir)


        # 递归判断文件在第几层文件夹
        def fileCount(file_path, value,target_floor, cnt=1):

            data_listdir_name = os.listdir(file_path)
            old_cnt = cnt
            # 判断文件夹下是否有文件夹，然后循环
            for path in data_listdir_name:
                cnt = old_cnt
                if os.path.isdir(file_path + '/' + path):
                    newfile_path = file_path + '/' + path
                    value, cnt = fileCount(newfile_path, value,target_floor,cnt)

            cnt += 1
            ##查看第几层的文件夹
            if cnt == target_floor:
                value.append(file_path)
                return value, cnt
            else:
                return value, cnt


        try:
            if model_type in ['1001', '1002', '1003', '1004', '1005', '1006']:  # 原来的数据1001~1005保留原本的读取方式,返回的是dataframe
                ##解决文本格式utf-8-sig格式读取错误问题
                content = open(file, 'rb').read()
                source_encoding = chardet.detect(content)
                if (source_encoding['encoding'] == "UTF-8-SIG"):
                    content = content.decode("utf-8-sig")
                    content = content.encode('utf-8')
                    logging.info("-------------------开始转换格式---------------------")
                    codecs.open(file, 'wb').write(content)
                if re.search('\.xlsx?$', file):
                    if sheet is not None:
                        data = pd.read_excel(file, sheet_name=sheet)
                    else:
                        data = pd.read_excel(file)
                elif re.search('\.csv$', file):
                    if sep is not None:
                        data = pd.read_csv(file, encoding=encoding, sep=sep)
                    else:
                        data = pd.read_csv(file, encoding=encoding)  # 默认为制表符\t
                elif re.search('\.txt$', file):
                    if sep is not None:
                        data = pd.read_table(file, encoding=encoding, sep=sep)
                    else:
                        data = pd.read_table(file, encoding=encoding)  # 默认为空格


            # elif model_type == '1006':
            #     #  1006 文本分类读取数据，因无column name，暂分2模块
            #     #  后期考虑输入格式规范，可直接套用上面的，目前默认txt，且无columns name的
            #     if sep is not None:
            #         data = pd.read_table(file, encoding=encoding, sep=sep, header=None, names=['text_sentence', 'text_target'])
            #     else:
            #         data = pd.read_table(file, encoding=encoding, header=None, names=['text_sentence', 'text_target'])  # 默认为空格
            # 输出dataframe，两列
            elif model_type == '1007':  # 1007 图像分类读取数据,通过路径读取，路径为文件夹,返回是df，有img_array和target
                # 定义图像分类读取数据函数,预设操作不进行shuffle，后面可调整
                def Load_img_target_dict(datasetpath='./dataset_MNIST/train', random_flag=False):
                    file_path = datasetpath + '/'
                    file_label_list = os.listdir(file_path)  # label文件夹list
                    data_target = []
                    for j in file_label_list:  # 文件夹下输入，每个label都输入
                        data_target_path = file_path + j + '/'
                        file_img_list = os.listdir(data_target_path)
                        for k in file_img_list:
                            temp_img_label = []
                            width_max, height_max = 126,126
                            temp_img_label.append(np.asarray(Image.open(data_target_path + k).resize((width_max, height_max), Image.BILINEAR)))
                            temp_img_label.append(j)
                            data_target.append(temp_img_label)
                    if random_flag == True:
                        random.shuffle(data_target)
                    X = []
                    y = []
                    data = pd.DataFrame()
                    for i in range(len(data_target)):
                        X.append(data_target[i][0])
                        y.append(data_target[i][1])
                    data['img_array'] = X
                    data['img_target'] = y
                    return data

                # print('图像读取')
                # 读入dataframe,仅为img和label，因为考虑后续Algo模块不止一种算法，所以仅读取，PCA以及后续要用CNN都在Algo

                if re.search('\.zip$', file):  # 判别是否为zip结尾
                    file_zip = file
                    file = file_zip[:-4]
                    unzip_file(file_zip, file)  # 解压文件
                    # logging.info('路径列下文件:'+str(os.listdir(file)), exc_info=True)

                    # 选择第三层文件
                    value = []
                    file_path = fileCount(file, value,3)[0][0]
                    data = Load_img_target_dict(datasetpath=file_path)  # 暂不设置随机打乱，因为后续train_test_split会打乱

            elif model_type == '1008':  # 1008 音频分类读取数据,直接从音频提取特征
                ##读入文件夹下音频文件的特征，并将features转为df格式
                def extract_audio_feature(folder_path, mid_window, mid_step, short_window, short_step,
                                          compute_beat=False):
                    sampling_rate, signal = audioBasicIO.read_audio_file(folder_path)
                    signal = audioBasicIO.stereo_to_mono(signal)
                    if compute_beat:
                        mid_features, short_features, mid_feature_names = \
                            aF.mid_feature_extraction(signal, sampling_rate,
                                                      round(mid_window * sampling_rate),
                                                      round(mid_step * sampling_rate),
                                                      round(sampling_rate * short_window),
                                                      round(sampling_rate * short_step))
                        beat, beat_conf = aF.beat_extraction(short_features, short_step)
                    else:
                        mid_features, _, mid_feature_names = \
                            aF.mid_feature_extraction(signal, sampling_rate,
                                                      round(mid_window * sampling_rate),
                                                      round(mid_step * sampling_rate),
                                                      round(sampling_rate * short_window),
                                                      round(sampling_rate * short_step))

                    mid_features = np.transpose(mid_features)
                    mid_features = mid_features.mean(axis=0)
                    # long term averaging of mid-term statistics
                    if (not np.isnan(mid_features).any()) and \
                            (not np.isinf(mid_features).any()):
                        if compute_beat:
                            mid_features = np.append(mid_features, beat)
                            mid_features = np.append(mid_features, beat_conf)
                    return mid_features, folder_path, mid_feature_names

                if re.search('\.zip$', file):  # 判别是否为zip结尾
                    file_zip = file
                    file = file_zip[:-4]
                    unzip_file(file_zip, file)  # 解压文件

                    # 选择第三层文件
                    value = []
                    file_path = fileCount(file, value,3)[0][0]


                    label_path_list = []
                    for i in os.listdir(file_path):
                        label_path_list.append(file_path + '/' + i)

                    #     features, class_names, _ = aF.multiple_directory_feature_extraction(label_path_list, 1.0, 1.0,
                    #                                                                         aT.shortTermWindow,
                    #                                                                         aT.shortTermStep,
                    #                                                                         compute_beat=False)
                    data = pd.DataFrame()
                    try:
                        temp =[]
                        target_list =[]
                        for j in label_path_list:
                            for z in os.listdir(j):
                                feature, name, _ = extract_audio_feature(j+'/'+z, 1.0, 1.0,
                                                                         aT.shortTermWindow,
                                                                         aT.shortTermStep,
                                                                         compute_beat=False)

                                temp.append(feature)
                                target_list.append(j.split('/')[-1])

                        colunm_list =[]
                        for i in range(len(temp[0])):
                            colunm_list.append(str(i))

                        data = pd.DataFrame(temp,columns=colunm_list)
                        data['audio_target'] = target_list
                        logging.info(data)
                    except Exception as e:
                        logging.info('读取异常，内存orCPU爆炸', exc_info=True)
                        # error_code_msg = '1001' + ':' + errorcode.Data_Read_1001
                        # raise Exception(error_code_msg)

            elif model_type in ['1009','1010']:  #通过txt读取路径、box、lable返回list，格式为[file_path,[[box,lable]]]
                # 定义图像分类读取数据函数,预设操作不进行shuffle，后面可调整
                def Load_img_txt(datasetpath):
                    ##正则找出文件夹下txt标注的内容文件
                    datasetpath = datasetpath + '/'
                    s = os.listdir(datasetpath)
                    # findtxt = re.compile('[0-9a-zA-Z]+\.txt')
                    for string in s:
                        if os.path.splitext(string)[1] == '.txt':
                            txt = string
                        if os.path.splitext(string)[1] == '.png' or os.path.splitext(string)[1] == '.jpg':
                            folder = os.path.exists(datasetpath + 'train/images/')
                            if not folder:
                                os.makedirs(datasetpath + 'train/images/')  # makedirs 创建文件时如果路径不存在会创建这个路径
                            shutil.move(datasetpath + string, datasetpath + 'train/images/')

                    file = open(datasetpath + txt,'r',encoding = 'utf-8')
                    txt_list = file.readlines()
                    ##数据处理返回dataframe格式的文件路径、box、lable
                    data_list = []
                    for string in txt_list:
                        string = string.replace("\n", "")
                        string = eval(string)
                        temp_list = []
                        for box in string['box']:
                            temp_list.append([int(box['top']), int(box['left']), int(box['width']), int(box['height']),box['label']])
                        data_list.append([datasetpath + 'train/images/' + string['file'],temp_list])

                    return data_list

                ##读取txt内容主程序
                if re.search('\.zip$', file):  # 判别是否为zip结尾
                    file_zip = file
                    file = file_zip[:-4]
                    unzip_file(file_zip, file)  # 解压文件
                    # logging.info('路径列下文件:'+str(os.listdir(file)), exc_info=True)
                    # 选择第二层文件
                    value = []
                    file_path = fileCount(file, value,2)[0][0]
                    #读取txt文件写入dataframe
                    data = Load_img_txt(datasetpath=file_path)




        except Exception as e:
            logging.info(errorcode.Data_Read_1001 + ':' + str(e), exc_info=True)  # 读取本地数据失败
            error_code_msg = '1001' + ':' + errorcode.Data_Read_1001
            raise Exception(error_code_msg)


        return data

    """
    数据验证
    """

    def data_verify(self, data, data_feature, data_label, model_type):

        is_true = True
        # 分隔符验证(如果数据只有一列)
        n_col = data.shape[1]
        if n_col == 1:
            # logging.info(errorcode.Data_Type_1007)
            error_code_msg = '1007' + ':' + errorcode.Data_Type_1007
            raise Exception(error_code_msg)

        # 缺失值验证
        sum_null = sum(data.isnull().sum())
        if sum_null != 0:
            is_true = False
            # logging.info(errorcode.Data_Type_1005)
            error_code_msg = '1005' + ':' + errorcode.Data_Type_1005
            raise Exception(error_code_msg)

        # 数据类型验证(特征)
        # for index, value in data_feature.iteritems():
        #     is_string = is_string_dtype(value)  # 判断每一列是否为字符型
        #     if is_string:  # 如果是字符型
        #         is_f = []
        #         for i in value:
        #             try:
        #                 float(i)
        #                 is_f.append(True)
        #             except:
        #                 is_f.append(False)
        #         if sum(is_f) > 0 and sum(is_f) < len(is_f):  # 为部分可转为数值、部分不行，直接报错
        #             #logging.info(errorcode.Data_Type_1004)
        #             error_code_msg = '1004' + ':' + errorcode.Data_Type_1004
        #             raise Exception(error_code_msg)

        # 数据类型验证(标签)
        if model_type == '1002':  # 预测
            if is_string_dtype(data_label):  # 字符型
                # logging.info(errorcode.Data_Type_1006)
                error_code_msg = '1006' + ':' + errorcode.Data_Type_1006
                raise Exception(error_code_msg)

        return is_true

    """
    映射关系处理
    """

    # 映射关系
    # 特征映射
    def var_map(self, var):
        var_map = {}  # 存储映射关系 old-new

        # 映射关系构造
        var_label = var.unique().tolist()
        for i, j in enumerate(var_label):
            var_map[str(j)] = i

        # 将原label映射为数值型
        var_new = var.map(var_map)

        return var_new, var_map

    # 标签映射
    def var_l_map(self, var):
        var_map = {}  # 存储映射关系 new-old
        var_map_t = {}  # 用于映射

        # 映射关系构造
        var_label = var.unique().tolist()
        for i, j in enumerate(var_label):
            var_map[str(i)] = j
            var_map_t[j] = str(i)

        # 将原label映射为数值型
        var_new = var.map(var_map_t)

        return var_new, var_map

    def data_manipulate(self, data_f, data_label, model_type):

        """
        特征映射
        """
        data_feature = data_f.copy()
        feature_map = {}
        for index, value in data_feature.iteritems():
            is_string = is_string_dtype(value)  # 判断每一列是否为字符型
            if is_string:  # 如果是字符型、作映射关系
                value_new, value_map = self.var_map(value)
                data_feature.loc[:, index] = value_new
                feature_map[index] = value_map
        """
        标签映射
        """
        if model_type == '1001':  # 分类，则转换
            data_label, label_map = self.var_l_map(data_label)
        else:  # 预测
            label_map = {}

        all_map = dict()
        all_map['feature'] = feature_map
        all_map['label'] = label_map
        return data_feature, data_label, all_map

    def data_manipulate_apriori(self, data):
        """
        关联分析数据预处理
        :param: data:原始数据DataFrame类型
        :return: [['a', 'b'], ['a', 'b']]
        """
        n_col = data.shape[1]

        def make_data(data):
            res = []
            for index, value in data.iterrows():
                temp = []
                for v in value:
                    if not pd.isnull(v):
                        temp.append(v)
                res.append(temp)
            return res

        if n_col == 1:  # 如果数据仅有一列
            result = data.iloc[:, 0].map(lambda x: x.split(',')).to_list()
        else:  # 如果数据有多列
            result = make_data(data)
        return result

    def time_period_manupulate(self, date_series):
        """
        功能：判断一个时间序列是否是固定周期的
        :param date_series: 时间序列
        :return: boolean
        """
        # 转换为datetime64日期类型
        if is_string_dtype(date_series):
            date_series = pd.to_datetime(date_series)
        date_series_shfit = date_series.shift(-1, axis=0)

        # 计算间隔
        date1 = date_series[:-1]
        date2 = date_series_shfit[:-1]
        delta_day = (date2 - date1).dt.days
        delta_day_unique = delta_day.unique()

        # 判断
        if len(delta_day_unique) == 1:  # 所有间隔都相等
            result = date_series
        elif is_delta_my(delta_day_unique):  # 月
            result = date_series
        elif is_delta_my(delta_day_unique, type='Y'):  # 年
            result = date_series
        else:
            error_code_msg = '1013' + ':' + errorcode.Data_Type_1013
            raise Exception(error_code_msg)
        return result

    def period_manipulate(self, data):
        """
        功能：判断一个序列是否是固定周期的
        :param data:
        :return: boolean
        """
        # 计算间隔
        data_shift = data.shift(-1, axis=0)
        date1 = data[:-1]
        date2 = data_shift[:-1]
        delta_day = date2 - date1
        delta_day_unique = delta_day.unique()

        # 判断
        if len(delta_day_unique) == 1:  # 所有间隔都相等
            result = data
        else:
            error_code_msg = '1013' + ':' + errorcode.Data_Type_1013
            raise Exception(error_code_msg)

        return result

    def data_manipulate_ts(self, value_col, date_col):
        """
        功能：时间序列数据处理
        :param data: DataFrame
        :return: json
        """
        if date_col is None:  # 只有value
            if is_numeric_dtype(value_col):  # 如果是数值型
                value_col = value_col.reset_index(drop=True)  # 重新设置index 0 1 2
                result = value_col
            else:  # 不是数值型
                try:
                    value_col = value_col.astype('float')
                    value_col = value_col.reset_index(drop=True)  # 重新设置index 0 1 2
                    result = value_col
                except Exception as e:
                    error_code_msg = '1014' + ':' + errorcode.Data_Type_1014
                    raise Exception(error_code_msg)
        else:  # 有value列和date列
            # 判断date_col是否是规律的时间序列
            if is_string_dtype(date_col) or is_datetime64_dtype(date_col):
                index = self.time_period_manupulate(date_col)
            elif is_numeric_dtype(date_col):
                index = self.period_manipulate(date_col)
            else:
                error_code_msg = '1014' + ':' + errorcode.Data_Type_1014
                raise Exception(error_code_msg)

            # value_col处理
            if is_numeric_dtype(value_col):  # 如果是数值型
                value_col.index = index
                result = value_col
            else:  # 不是数值型
                try:
                    value_col = value_col.astype('float')
                    value_col.index = index
                    result = value_col
                except Exception as e:
                    error_code_msg = '1014' + ':' + errorcode.Data_Type_1014
                    raise Exception(error_code_msg)

        return result

    def param_to_database(self, con, table_name, param_data, update_condition):
        """
        将标定id的参数写入mysql
        :con: 连接信息
        :table_name: 表名称
        :param_data: 算法参数, type:dict
        :update_condition: 更新条件
        :return: 0
        """
        con.update_from_dict(table_name, param_data, update_condition)

    """
    主函数 获取最终数据
    """

    def load_data(self):
        # 初始化结果
        result = dict()
        result['model_type'] = None  # 0：分类，1：预测
        result['arithmetic'] = None  # 算法名称
        result['x_train'] = None
        result['x_test'] = None
        result['y_train'] = None
        result['y_test'] = None
        result['map_label'] = None
        result['var_name'] = None
        result['project_id'] = None
        result['creater'] = None
        result['class_num'] = None
        result['data_name'] = None
        result['parameter'] = None

        # 初始化数据连接
        dao = dbtool.Dao()

        #随机变量名，可以作映射label
        def ranstr(num):
            # 猜猜变量名为啥叫 H
            H = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'

            salt = ''
            for i in range(num):
                salt += random.choice(H)

            return salt


        # 递归判断文件在第几层文件夹
        def fileCount(file_path, value,target_floor, cnt=1):

            data_listdir_name = os.listdir(file_path)
            old_cnt = cnt
            # 判断文件夹下是否有文件夹，然后循环
            for path in data_listdir_name:
                cnt = old_cnt
                if os.path.isdir(file_path + '/' + path):
                    newfile_path = file_path + '/' + path
                    value, cnt = fileCount(newfile_path, value,target_floor,cnt)

            cnt += 1
            ##查看第几层的文件夹
            if cnt == target_floor:
                value.append(file_path)
                return value, cnt
            else:
                return value, cnt
        """
        获取数据、参数信息
        """
        data_inf = None
        try:
            # sql = 'select * from calibration_dataset where id = {id}'.format(id = self.id)
            # data_inf = dao.query_data(sql=sql)
            sql = 'select * from {material_info} where id = {id}'.format(material_info=config.material_table,
                                                                         id=self.id)
            data_inf = dao.query_data(sql=sql)
        except Exception as e:
            logging.info(errorcode.Error_Read_Data_1001 + ':' + str(e), exc_info=True)
            error_code_msg = '1001' + ':' + errorcode.Error_Read_Data_1001
            raise Exception(error_code_msg)

        data_inf2 = None
        try:
            sql2 = 'select * from {traintask_info} where id = {id}'.format(traintask_info=config.training_task_table,
                                                                           id=self.task_id)
            data_inf2 = dao.query_data(sql=sql2)
        except Exception as e:
            logging.info(errorcode.Error_Read_Data_1002 + ':' + str(e), exc_info=True)
            error_code_msg = '1002' + ':' + errorcode.Error_Read_Data_1002
            raise Exception(error_code_msg)

        """
        1:获取参数信息
        """
        data_inf = data_inf.reset_index(drop=True)
        data_inf = data_inf.iloc[0, :]
        data_inf2 = data_inf2.reset_index(drop=True)
        data_inf2 = data_inf2.iloc[0, :]

        # 训练参数
        model_type = data_inf2['type']  # 模型类型 1001:分类，1002:预测
        algorithm_name = data_inf2['algorithm_name']  # 算法名称
        target_column = data_inf2['target_column']  # 因变量名称
        split_proportion = data_inf2['proportion']  # 训练集比例
        variable_name = data_inf2['variable_name']  # 自变量名称
        result['parameter'] = data_inf2['extra_param'] #模型训练参数

        # 数据文件
        # sep = data_inf['classifier']  # 分隔符
        file_name = data_inf['file_name']  # 素材名称
        file_size = data_inf['file_size']  # 文件大小
        project_id = data_inf['project_id']  # 项目id
        creater = data_inf['creater']

        sql_syntax = data_inf['sql_syntax']
        conn_id = data_inf['conn_id']
        sql_file = data_inf['sql_file']
        # encoding_ = data_inf['encoding'] #本地上传数据编码
        bucket_dir = data_inf['bucket_dir']
        file_name2 = data_inf['file_name2']  # 文件名称

        # 写入结果
        result['model_type'] = model_type  # 0：分类，1：预测
        result['arithmetic'] = algorithm_name  # 算法名称
        result['project_id'] = project_id
        result['creater'] = creater
        # result['parameter'] = parameter

        # # 将参数信息写入training_task_data表
        # table_name = config.training_task_table
        # param_data = {'arithmetic':algorithm_name}
        # update_condition = {'id':self.task_id}
        # self.param_to_database(dao, table_name, param_data, update_condition)

        # 验证模型类型是否输入正确,1001分类、1002回归、1003聚类、1004关联规则,1005时序,1006文本分类，1007图像分类
        if model_type not in ['1001', '1002', '1003', '1004', '1005', '1006', '1007', '1008','1009','1010']:
            # logging.info(errorcode.Data_Type_1010)
            error_code_msg = '1010' + ':' + errorcode.Data_Type_1010
            raise Exception(error_code_msg)

        """
        获取数据
        """
        data_upload_name = None  # 上传数据的名称
        if sql_file == 'file':
            """
            从oss下载数据到本地、读取本地数据
            """
            # oss_client =  OSSClient(app_domain=config.app_domain,
            #                    auth_domain=config.auth_domain,
            #                    app_id=config.app_id,
            #                    app_key=config.app_key,
            #                    bucket=bucket_dir)
            # file_size = file_size
            # file_name = file_name


            # 1、将数据下载到本地
            download_path = None
            if(config.scp_use==0):
                download_path = bucket_dir + '/' + file_name2
            else:
                download_name = str(file_name2)
                try:
                    download_path = os.path.join(os.path.dirname(os.path.abspath('__file__')), 'data',
                                                 download_name)  # 构造下载数据的路径，默认为当前工作目录，文件名和数据库的保持一致
                    scp_client = SCPClient(ips=config.ips,
                                           port=config.port,
                                           user=config.user,
                                           password=config.password,
                                           bucket=bucket_dir)
                    scp_client.download(bucket_dir+'/'+download_name, download_path)  # 会将数据保存到当前目录下
                except Exception as e:
                    logging.info(errorcode.Data_Read_1003 + ':' + str(e), exc_info=True)
                    error_code_msg = '1003' + ':' + errorcode.Data_Read_1003
                    raise Exception(error_code_msg)

            ##判断文件是否存在并重命名，避免训练时使用重新解压原zip包冲突
            if os.path.exists(download_path) and re.search('\.zip$', download_path):
                new_path = download_path[:-4] + ''.join(random.sample('zyxwvutsrqponmlkjihgfedcba',3)) + '.zip'
                shutil.copy(download_path, new_path)
                download_path = new_path

            # 2、读取当前目录下的数据
            # download_path = os.path.join(bucket_dir, file_name2)  # 构造下载数据的路径，默认为当前工作目录，文件名和数据库的保持一致
            # download_path = bucket_dir + '/' + file_name2  # 构造下载数据的路径，默认为当前工作目录，文件名和数据库的保持一致

            data = None
            try:
                data = self.get_local_data(download_path, encoding='utf-8', model_type=model_type)
            except Exception as e:
                logging.info(errorcode.Data_Read_1001 + ':' + str(e), exc_info=True)
                error_code_msg = '1001' + ':' + errorcode.Data_Read_1001
                raise Exception(error_code_msg)
        elif sql_file == 'sql':
            """
            从数据库获取数据
            """
            try:
                data = dataupload.get_data_from_database(dao, conn_id, sql_syntax)
            except Exception as e:
                error_code_msg = '1004' + ':' + errorcode.Data_Read_1004
                raise Exception(error_code_msg)

            # bucket_dir_up = '/opt/image_dectection/provider/file/upload/txt_classify/zengjw5'+'/data'
            bucket_dir_up = bucket_dir
            if not os.path.exists(bucket_dir_up):
                os.makedirs(bucket_dir_up)
            file_name2_up = str(self.id) + '.csv'
            if (config.scp_use == 0):
                file_path = bucket_dir_up + '/' + file_name2_up
            else:
                file_path = os.path.join(os.getcwd(), 'data', file_name)
            data.to_csv(file_path, index=False, encoding='utf_8_sig')
            file_size_up = os.path.getsize(file_path)
            #将下载的数据上传至oss
            if(config.scp_use == 1):
                # dataupload.data_upload(data, data_upload_name)
                scp_client = SCPClient(ips=config.ips,
                                       port=config.port,
                                       user=config.user,
                                       password=config.password,
                                       bucket=bucket_dir_up)
                scp_client.upload(bucket_dir_up + '/' + file_name2_up,file_path)

            #将信息写入数据库
            # dao.update_from_dict('calibration_dataset', {'file_size':file_size_up, 'file_name':file_name_up}, {'id':self.id})
            dao.update_from_dict(config.material_table, {'bucket_dir': bucket_dir_up, 'file_size': file_size_up,
                                                         'file_name2': file_name2_up, 'sql_file': 'file'},{'id': self.id})

        # 时间序列模型数据处理
        if model_type == '1005':
            value_col_name = target_column
            date_col_name = variable_name

            value_col = data[value_col_name]

            # 是否有日期列
            if date_col_name:
                # 判断日期列是否有多列
                if ',' in date_col_name:
                    error_code_msg = '1014' + ':' + errorcode.Data_Type_1014
                    raise Exception(error_code_msg)
                else:
                    date_col = data[date_col_name]
                    data_ts = self.data_manipulate_ts(value_col, date_col)
            else:  # 没有日期列
                date_col = None
                data_ts = self.data_manipulate_ts(value_col, date_col)

            result['x_train'] = data_ts
            result['map_label'] = {'feature': {}, 'label': {}, 'date_name': [],"feature_name": [date_col_name], 'label_name': value_col_name}
            result['var_name'] = {'feature_name': date_col_name, 'label_name': value_col_name}
            return result

        # 1006， 1007 文本图像分类设置参数列名
        if model_type == '1006':  # 添加切词
            print('文本数据处理')
            # target_column = 'text_target'  # 读取数据中，图像类别的只有text_sentence图，text_target标签
            # variable_name = 'comment'  # 切词放在Algo模块部分，loaddata不做过多预处理

            # 读取停用词，第二步处理为array格式，具体路径后续更改，也可外部接入
            stopwords_path = os.path.join(os.path.dirname(os.path.abspath('__file__')), 'data', 'cn_stopwords.txt')
            stopwords = pd.read_csv(stopwords_path,
                                    index_col=False, quoting=3, sep="\t", names=['stopword'],
                                    encoding='utf-8')
            stopwords = stopwords['stopword'].values

            def preprocess_text(sentence, stopwords):  # 仅切词过滤，放在loaddata.py
                segs = jieba.lcut(sentence)
                # 去标点、停用词等
                segs = list(filter(lambda x: len(x) > 1, segs))
                segs = list(filter(lambda x: x not in stopwords, segs))
                # 将句子处理成  词语 词语 词语 ……的形式
                return " ".join(segs)

            # 切词处理
            data[variable_name] = data[variable_name].astype(str).apply(lambda x: preprocess_text(x, stopwords))

        if model_type == '1007':
            print('图像数据处理')
            target_column = 'img_target'  # 读取数据中，图像类别的只有img_array图，img_target标签
            variable_name = 'img_array'

        if model_type == '1008':
            print('音频数据处理')
            target_column = 'audio_target'  # 读取数据中，图像类别的只有img_array图，img_target标签
            variable_name = [x for x in data.columns if x != target_column]
            logging.info('定义audio_target和variable' + str(data.columns), exc_info=True)


        if model_type in ['1009','1010']:
            try:
                label_map = {}
                label_set = set()
                for colounm in data:
                    for box in colounm[1]:
                        label_set.add(box[4])
                labels = list(label_set)

                for label in labels:
                    label_map[ranstr(5)] = label
                data_replace = []
                index = 0

                while index < len(data):
                    temp_list = []
                    for box in data[index][1]:
                        temp_list.append([list(label_map.keys())[list(label_map.values()).index(i)] if i in list(label_map.values()) else i for i in box])
                    data_replace.append([data[index][0], temp_list])
                    index += 1


                if re.search('%', split_proportion):
                    split_pro = float(split_proportion.strip('%')) / 100
                else:
                    split_pro = float(split_proportion)
                    if split_pro > 1:
                        # logging.info(errorcode.Data_Type_1008)
                        error_code_msg = '1008' + ':' + errorcode.Data_Type_1008
                        raise Exception(error_code_msg)

                result['mid_path'] = download_path[:-4]
                # 选择第四层文件
                value = []
                result['mid_path'] = fileCount(result['mid_path'], value, 4)[0][0]

                x_train, x_test = model_selection.train_test_split(data_replace, test_size= 1 - split_pro)
                value_col_name = 'image_path'
                date_col_name = 'lable'

                result['x_train'] = x_train
                result['x_test'] = x_test
                result['map_label'] = {'feature': {}, 'label': label_map, "feature_name": ['image_path'], 'label_name': date_col_name}
                result['var_name'] = {'feature_name': date_col_name, 'label_name': value_col_name}
                return result
            except Exception as e:
                print(e)




        # 1001 1002时间格式处理
        date_col = which_is_datetime(data)
        data = transform_to_days(data)  # 将日期型格式数据转换为天数

        """
        3:特征、标签处理以及数据集验证(分隔符、缺失值、数据类型)
        """
        # 特征
        # 特征数据解析
        if model_type in ['1001', '1002', '1003', '1004', '1005', '1006', '1007']:  # 音频
            feature_name = variable_name.split(',')
            data_feature = data[feature_name]  # X数据 dataframe格式
        if model_type in ['1008']:
            feature_name = variable_name
            data_feature = data[feature_name]
        # 标签
        data_label = None
        target_name = target_column

        if model_type in ['1001', '1002']:
            # 将列的位置转为列名
            try:  # 列数
                col_target_index = int(target_column)
                if col_target_index > data.shape[1] - 1:  # 列数 > 数据的列数-1
                    error_code_msg = '1009' + ':' + errorcode.Data_Type_1009
                    raise Exception(error_code_msg)
                target_name = data.columns[col_target_index]
            except:  # 列名
                target_name = target_column
                # if target_name not in data.columns.to_list():#列数 > 数据的列数-1
                if target_name not in data.columns:
                    error_code_msg = '1009' + ':' + errorcode.Data_Type_1009
                    raise Exception(error_code_msg)
            data_label = data[target_name]

        # 数据验证
        if model_type in ['1001', '1002', '1003']:
            self.data_verify(data, data_feature, data_label, model_type)  # 如果验证不通过直接报错，通过则执行下面的步骤

        """
        4:标签映射、数据集划分
        """
        # 1、变量映射
        all_map = {}
        if model_type in ['1001', '1002', '1003']:
            data_feature, data_label, all_map = self.data_manipulate(data_feature, data_label, model_type)
        # 1006 1007 图像分类和语音分类label映射，以及img_array和text_sentence转Series
        if model_type in ['1006', '1007']:
            data_label = data[target_name]
            data_label, label_map = self.var_l_map(data_label)
            all_map['feature'] = {}
            all_map['label'] = label_map
            data_feature = data_feature[variable_name]  # 图像的img_array和句子的text_sentence转回Series类别
        # 1008 音频分类，实际上也为结构化数据，用了pyAudioAnalysis的特征提取
        if model_type in ['1008']:
            data_label = data[target_name]
            data_label, label_map = self.var_l_map(data_label)
            all_map['feature'] = {}
            all_map['label'] = label_map
            logging.info('audio读取为data_label和feature', exc_info=True)

        # 3、自变量、因变量
        label_name = target_name
        all_map['feature_name'] = feature_name
        all_map['label_name'] = label_name
        all_map['date_name'] = date_col

        # 限制all_map的最大长度
        all_map_json = json.dumps(all_map, ensure_ascii=False)
        all_map_json_len = len(all_map_json)
        if all_map_json_len > 20000:
            error_code_msg = '1011' + ':' + errorcode.Data_Type_1011
            raise Exception(error_code_msg)

        var_name = {}
        var_name['feature_name'] = feature_name
        var_name['label_name'] = label_name

        # 获取标签分类数目
        class_num = None
        if model_type in ['1001', '1002', '1006', '1007', '1008','1009']:
            class_num = len(data_label.unique().tolist())

        # 划分训练集、测试集
        x_train = None
        x_test = None
        y_train = None
        y_test = None
        if model_type in ['1001', '1002', '1006', '1007', '1008']:  # 分类切割训练和测试集
            # 将百分比的比例变为数值型
            if re.search('%', split_proportion):
                split_pro = float(split_proportion.strip('%')) / 100
            else:
                split_pro = float(split_proportion)
                if split_pro > 1:
                    # logging.info(errorcode.Data_Type_1008)
                    error_code_msg = '1008' + ':' + errorcode.Data_Type_1008
                    raise Exception(error_code_msg)

            x_train, x_test, y_train, y_test = train_test_split(data_feature, data_label,
                                                                test_size=1 - split_pro,
                                                                random_state=0)  # 将字符型的split_proportion转为数值型, random_state保证每次划分结果一样

        """
        4-1 1003、1004模型的训练数据
        """
        if model_type == '1004':
            x_train = self.data_manipulate_apriori(data_feature)
        if model_type == '1003':
            x_train = data_feature
        """
        5:返回结果
        """
        if model_type in ['1001','1002']:
            column_list = [i for i in range(len(variable_name.split(',')))]
            x_train.columns = column_list
            x_test.columns = column_list

        result['x_train'] = x_train
        result['x_test'] = x_test
        result['y_train'] = y_train
        result['y_test'] = y_test
        result['map_label'] = all_map
        result['var_name'] = var_name
        result['class_num'] = class_num
        # result['data_name'] = data_upload_name
        return result


if __name__ == '__main__':
    loaddata = LoadData(id=1, task_id=1)
    data = loaddata.load_data()
    print(data)




