# -*- coding: utf-8  -*-
import sys, os, re
import configparser
import nibabel as nib
import requests
import joblib 
import json
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split

import imageio  # 转换成图像
from PIL import Image
from keras.utils import np_utils
import math
import tensorflow as tf
import copy
import random
import ants
import time

# 递归删除目录
#  os.removedirs()

# from config import ROOT_URL

# ROOT_URL = "http://125.39.136.211:5000"

# 本地
ROOT_URL = "http://127.0.0.1:5000"

# 27
# ROOT_URL = "172.18.13.27:5000"

model = None

# 返回数组arr对应类别序号
def labelToIndex(arr):

    # print("arr:", arr)

    C = []

    for i in arr:
        if i not in C:
            C.append(i)

    if type(arr) == pd.Series:

        for i in arr.index:
            arr[i] = C.index(arr[i])
    else:

        for i in range(len(arr)):
            arr[i] = C.index(arr[i])
    
    return arr

# 生成的图片缓存的根目录
imageTemPath = "/home/huangyunyou/crzzy/imageTem"

def select_model(pre_model, pooling):
    if pre_model == 'DenseNet201':  # 图片尺寸224*224，返回数据尺寸1*1920
        input_image = tf.keras.layers.Input([None, None, 3], dtype=tf.uint8)
        x = tf.cast(input_image, tf.float32)
        x = tf.keras.applications.densenet.preprocess_input(x)
        net = tf.keras.applications.DenseNet201(include_top=False, weights='imagenet', pooling=pooling)
        net.trainable = False
        x = net(x)
        model = tf.keras.Model(inputs=[input_image], outputs=[x])
        return model
    else:
        return 0



# MRI图像配准
def peizhun(inputPath="", outoutPath="", image_type='MRI', weighting="T1"):

    # 图像配准
    image_fix_path = '/home/huangyunyou/mxx/Mxx/template/'
    fixed_T1 = ants.image_read(image_fix_path + 'mni_icbm152_lin_nifti/icbm_avg_152_t1_tal_lin.nii')
    fixed_T2 = ants.image_read(image_fix_path + 'mni_icbm152_lin_nifti/icbm_avg_152_t2_tal_lin.nii')
    fixed_PD = ants.image_read(image_fix_path + 'mni_icbm152_lin_nifti/icbm_avg_152_pd_tal_lin.nii')

    # registration_file_path = registration_save + image_id + '.nii'


    if  os.path.exists(outoutPath):
        return -1


    if image_type == 'MRI':
        moving = ants.image_read(inputPath)
        tmp = moving.numpy()
        if tmp.ndim == 4:
            frames = int(tmp.shape[3])
            slice_index = math.ceil(frames / 2.0)
            tmp = tmp[:, :, :, slice_index - 1]
        moving = ants.from_numpy(tmp)
        fixed = fixed_T1
        if weighting == 'T2':
            fixed = fixed_T2
        elif weighting == 'PD':
            fixed = fixed_PD
        mytx = ants.registration(fixed=fixed, moving=moving, type_of_transform='SyN')
        # 把moving配准到fixed来
        # print(mytx)
        warped_moving = mytx['warpedmovout']

        ants.image_write(warped_moving, outoutPath)
        if not os.path.exists(outoutPath):
            return False
        else:
            return True
        # registration_file_path：图片id.nii


    elif image_type == 'PET':
        radiopharmaceutical = row['Radiopharmaceutical']
        moving = ants.image_read(inputPath)
        tmp = moving.numpy()
        if tmp.ndim == 4:
            frames = int(tmp.shape[3])
            slice_index = math.ceil(frames / 2.0)
            tmp = tmp[:, :, :, slice_index - 1]
        moving = ants.from_numpy(tmp)
        fixed = fixed_FDG_PET
        if radiopharmaceutical == '18F-AV45':
            fixed = fixed_AV45

        # 套用模板后
        mytx = ants.registration(fixed=fixed, moving=moving, type_of_transform='SyN')
        # print(mytx)
        warped_moving = mytx['warpedmovout']
        # 套好模板的3D
        # if not os.path.exists(outoutPath):
        ants.image_write(warped_moving, outoutPath)
        if not os.path.exists(outoutPath):
            return False
        else:
            return True





def convert_3Dto2DtoRGB(image_path,image_tmp_save_path,image_save_path):
    print(image_path)
    img = nib.load(image_path)  # 读取nii
    img_fdata = img.get_fdata()
    #fname = f.replace('.nii', '')  # 去掉nii的后缀名
    img_f_path = image_tmp_save_path
    # 创建nii对应的图像的文件夹
    if not os.path.exists(img_f_path):
        os.makedirs(img_f_path)  # 新建文件夹

    if not os.path.exists(image_save_path):
        os.makedirs(image_save_path)  # 新建文件夹

    # 开始转换为图像
    # print(img.shape)
    (x, y, z) = img.shape
    for i in range(z):  # z是图像的序列
        silce = img_fdata[:, :, i]  # 选择哪个方向的切片都可以
        tmp_image_path=os.path.join(img_f_path, '{}.jpeg'.format(i))
        if not os.path.exists(tmp_image_path):
            imageio.imwrite(tmp_image_path, silce)
        img = Image.open(tmp_image_path)
        img1 = img.convert('RGB')
        if not os.path.exists(os.path.join(image_save_path, '{}.jpeg'.format(i))):
            img1.save(os.path.join(image_save_path, '{}.jpeg'.format(i)))
        # 保存图像



def tezhengtiqu(inputPath, image_tmp_save_path, image_save_path, pre_model='DenseNet201', pooling='avg', start=-1, end=-1):

    global model

    # 注册之后变成一个3D图像，然后这里是把3D图像切割成2D，一张3D可以切割为100多张的2D图像
    # 得到图片之后，可以做迁移学习

    convert_3Dto2DtoRGB(inputPath, image_tmp_save_path, image_save_path)

    # if os.path.exists(inputPath):
    #     os.remove(inputPath)

    # if os.path.exists(image_tmp_save_path):
    #     del_dir(image_tmp_save_path)

    image_count = len(os.listdir(image_save_path))  # 每一个image ID(.nii的3D图片，一张3D图片切割成了100多张的2D图片)图片的数量

    if start == -1:
        start = 0
    if end == -1:
        end = image_count
    image_array = []
    for i in range(start, end):
        tmp_image_path = image_save_path + '/' + str(i) + '.jpeg'   # 都是2D灰度图片？？  是217*181？？？
        tmp_image = tf.image.decode_jpeg(tf.io.read_file(tmp_image_path))  # tf.io.read_file：读取图片
        # tf.image.decode_jpeg：channels 为1得到的是灰度图，为0则按照图片格式来读，读取的是什么格式，就decode什么格式

        tmp_image = tf.image.resize(tmp_image, (224, 224), method='nearest')   # 直接对图片大小做修改，图片大小本身发生变化


        tmp_image = tf.reshape(tmp_image, [-1, 224, 224, 3])  #  相当于对副本图片做改变，图片大小本身没有变
        if i == 0 or i == start:
            image_array = tmp_image   #  先把第一张图片给image_array保存着
        else:
            image_array = tf.concat([image_array, tmp_image], axis=0)  #   然后第一张图片开始第二、三、四、五。。。张图片 拼接起来 把181张图片全部拼接起来后
            # tf.concat：拼接张量，所有输入模型里的数据类型是张量？？？
            # 张量是tensorflow的主要数据载体
            # axis=0  代表在第0个维度拼接
            # axis=1  代表在第1个维度拼接
            # axis=-1表示在倒数第1个维度拼接
    if model == None:
        model = select_model(pre_model, pooling)  # 得到一个模型
    # result=model(image_array).numpy()
    result = model.predict(image_array)  # 把拼接后的图片放到模型里  ，得到一个的特征向量？？？

    # Image size:    (181, 1920)

    if pooling == 'max':
        image_array_data = np.max(result, axis=0).tolist()
    elif pooling == 'avg':
        image_array_data = np.mean(result, axis=0).tolist()


    # print('Image size:   ', image_array_data.shape)

    return image_array_data



class SCClient(object):

    def __init__(self, ):

        self.mycf = None
    
    # 获取配置文件cf对应section章节的key值
    def getConfig(self, cf, section, key):

        if section not in cf.sections():
            print("配置%s不存在" % section)
            return None

        # keys = [item[0] for item in cf.options[section]]
        if key not in cf.options(section):

            print("配置项%s不存在" % key)

            return None
        
        return cf[section][key]

    def dataInit(self, iniFile):

        url = ROOT_URL + '/dataInit'
        
        
        files = {"file": iniFile}
        
        param = {

        }


        res = requests.post(url, params=param, files=files)

        return res.json()
    
    # 分段下载拼接好图片的数据
    def __getData(self, client, start, uuid, allData):

        url = ROOT_URL + '/getData'
        param = {
            "client": client,
            "start": start,
            "uuid": uuid,
            "allData": allData
        }
        # print("geting...")
        res = requests.get(url, params=param)
        # print("end geting...")
        return res.json()

    # 分段下载拼接好的数据
    def completeData(self, iniPath, dataSavePath):

        if not os.path.exists(iniPath):
            print("%s 文件不存在！" % iniPath)
            return 


        self.mycf = configparser.ConfigParser(allow_no_value=False, comment_prefixes=('#', ';'))
        
        # 读取配置信息
        self.mycf.read(iniPath, encoding='utf-8')

        sectionName = self.mycf.sections()[0]
        
        SCName = self.getConfig(self.mycf, sectionName, "name")
        verName = self.getConfig(self.mycf, sectionName, "version")

        iniFile = open(iniPath, 'rb')
        resInit = self.dataInit(iniFile)
        # res = {
        #     "status": 0,
        #     "msg": "success",
        #     "uuid": myUuid,
        #     "start": 0,
        #     "client": 0
        # }
        
        print(resInit)
        
        uuid = uuid=resInit["uuid"]

        # 初始化数据不成功
        if resInit["status"] != 0:
            print(resInit["msg"])
            return 
        # 如果是一次性返回
        if resInit["allData"] == 1:
            dataRes = self.__getData(client=resInit["client"], start=resInit["start"], uuid=uuid, allData=1)
            # print(dataRes)
            dataSavePath = os.path.join(dataSavePath, SCName, verName)
            if not os.path.exists(dataSavePath):
                os.makedirs(dataSavePath)
            joblib.dump(dataRes["data"], dataSavePath + "/data.dat")
        else:
            # print("start get data")
            dataRes = self.__getData(client=resInit["client"], start=resInit["start"], uuid=uuid, allData=0)
            # print("end get data")
            dataSavePath = os.path.join(dataSavePath, SCName, verName)
            if not os.path.exists(dataSavePath):
                os.makedirs(dataSavePath)
            joblib.dump(dataRes["data"], os.path.join(dataSavePath, "{}_{}.dat".format(resInit["client"], resInit["start"])))
            while dataRes["over"] == 0:
                dataRes = self.__getData(client=dataRes["client"], start=dataRes["start"], uuid=uuid, allData=0)
                joblib.dump(dataRes["data"], os.path.join(dataSavePath, "{}_{}.dat".format(dataRes["client"], dataRes["start"])))
        return 

    
    
    def isFill(ftype, element, constraint, cur_train, cur_verif, cur_test):
        """
        该函数判断element是否能够填充到ftype的数据集中
        element: 一行数据
        ftype: 0,1,2 分别表示训练集、验证集、测试集
        constraint: 限制条件数组
        """
        # 保存分别和训练集、验证集、测试集有关的限制条件
        myTrainCons, myVerifCons, myTestCons = [], [], []
        for cons in constraint:
            if cons[1] == 0:
                myTrainCons.append(cons)
            elif cons[1] == 1:
                myVerifCons.append(cons)
            elif cons[1] == 2:
                myTestCons.append(cons)
            
            if cons[1] == cons[2]:
                continue
            
            if cons[2] == 0:
                myTrainCons.append([cons[0], cons[2], cons[1]])
            elif cons[2] == 1:
                myVerifCons.append([cons[0], cons[2], cons[1]])
            elif cons[2] == 2:
                myTestCons.append([cons[0], cons[2], cons[1]])

        myCons = []
        if ftype == 0:
            myCons = myTrainCons

        elif ftype == 1:
            myCons = myVerifCons

        elif ftype == 2:
            myCons = myTestCons

        for cons in myCons:
            # 训练集
            if cons[2] == 0:
                if element[cons[0]] in cur_train[:, cons[0]]:
                    return False
            # 验证集
            if cons[2] == 1:
                if element[cons[0]] in cur_verif[:, cons[0]]:
                    return False
            # 测试集
            if cons[2] == 2:
                if element[cons[0]] in cur_test[:, cons[0]]:
                    return False

        return True
    
    # 获取基本数据
    def __baseData(self, iniFile):

        url = ROOT_URL + '/baseData'
        
        
        files = {"file": iniFile}
        
        param = {

        }


        res = requests.post(url, params=param, files=files)
        
        return res.json()

    # 下载文件数据
    def __downloadFile(self, filePath, savePath, fileType="image"):
        
        if filePath == '' or filePath == None:
            return ''
            
        
        url = ROOT_URL + '/downloadFile'
        
        data = {
            "filePath": filePath
        }


        res = requests.post(url, data=json.dumps(data))

        # print(filePath)

        name = filePath.split('/')[-1]


        mySavePath = os.path.join(savePath, fileType)

        if not os.path.exists(mySavePath):
            os.makedirs(mySavePath)
        
        # 写入磁盘
        
        fileSavePath = os.path.join(mySavePath, name)
        with open(fileSavePath, "wb") as f:
            f.write(res.content)

        return fileSavePath




    # 转换数据格式，给用户返回

    """
    
    {
        客户端数组：
        "client":[
            {
                "列名1": [客户端1列名1的列数据],
                "列名2": [客户端1列名2的列数据],
            },
        ],
        "labelName": [标签列名],
        "imageCol": [图像列名称],
        "audioCol": [音频列名称],
        "videoCol": [视频列名称],
    }

    ====>>

    
    
    """

    def __toMatrix(self, source, savePath):

        # 引用全局变量
        global imageTemPath

        allData = {}
        Xdata = {}
        ydata = {}

        # 仅考虑单机版
        for k in source['client'][0]:
            allData[k] = source['client'][0][k]

            # # 标签列
            # if k in source['labelName']:
            #     ydata[k] = source['client'][0][k]
            # # 数据列
            # else:
            #     Xdata[k] = source['client'][0][k]



        # X = pd.DataFrame(Xdata)
        # y = pd.DataFrame(ydata)

        data = pd.DataFrame(allData)

        """
        数据预处理
        列： PTETHCAT，PTGENDER，PTRACCAT，PTMARRY，Visit
        """

        labelToIndex(data['PTETHCAT'])
        labelToIndex(data['PTGENDER'])
        labelToIndex(data['PTRACCAT'])
        labelToIndex(data['PTMARRY'])

        # labelToIndex(data['Visit'])
        
        
        

        t1 = time.time()
        # 处理图片
        if len(source['imageCol']) > 0:

            # 删除掉图片没数据的行
            dropIndex = []
            for i in range(data.shape[0]):
                if str(data.iloc[i, :][source["imageCol"][0]]).strip() == "":
                    dropIndex.append(i)
            data.drop(dropIndex, inplace=True)

            
            imageValue = []

            pznum = 1
            # 图片列只保留图片名称
            # for i in range(data.shape[0]):
            
            for i in range(1000):
                # 遍历图片列（如果有多列）
                for imageCol in source['imageCol']:
                    # X[imageCol][i] = os.path.join(savePath, 'image', X[imageCol][i].split('/')[-1])
                    imagePath = data.iloc[i, :][imageCol]
                    iamgeName = imagePath.split('/')[-1].split('.')[0]
                    if not os.path.exists(os.path.join(imageTemPath, 'peizhun')):
                         os.mkdir(os.path.join(imageTemPath, 'peizhun'))  # 配准的缓存目录
                    outputPath = os.path.join(imageTemPath, 'peizhun', iamgeName + ".nii")
                    
                    r = peizhun(imagePath, outputPath, image_type='MRI',weighting="T1")
                    # if r == -1:
                    #     continue
                    #     print("%s 图像配准失败\n 配准总数：%d, 已配准数：%d" % (imagePath, data.shape[0], pznum))
                    #     sys.exit()
                    pznum +=1
                    imageValue.append(tezhengtiqu(inputPath=outputPath, \
                        image_tmp_save_path= os.path.join(imageTemPath, iamgeName, "Tem"),  \
                        image_save_path= os.path.join(imageTemPath, iamgeName, "Save",)))
                    
                    if int(pznum % 100 == 0):
                        print("已完成%d张图像..."%pznum)
                    
            t2 = time.time()

            print("总共处理%d张图片，共耗时%f秒" % (data.shape[0], float(t2-t1)))


            """
            删除和训练无关的列
            """
            # 删除图片列
            data.drop(source['imageCol'], axis=1, inplace=True)
            # 提取标签列
            y = data[source['labelName'][0]]
            # 删除标签列
            data.drop(source['labelName'], axis=1, inplace=True)

            # 删除Modality列
            data.drop(['Modality'], axis=1, inplace=True)
            # 删除Visit列
            data.drop(['Visit'], axis=1, inplace=True)
            # 删除VISCODE列
            data.drop(['VISCODE'], axis=1, inplace=True)

            # DataFrame转成数值型
            data = data.astype("float")

            basicX = data.values 
            X = []
            # for n in range(basicX.shape[0]):
            for n in range(1000):
                X.append(basicX[n].tolist() + imageValue[n])

            X = np.array(X)
            # 保存临时结果


        labelset_float_squeeze = labelToIndex(y[:1000])
        Y = tf.keras.utils.to_categorical(labelset_float_squeeze)

        joblib.dump(X, os.path.join(savePath, "X.res"))
        joblib.dump(Y, os.path.join(savePath, "Y.res"))
        return X, Y

    def __download(self, iniPath, dataSavePath):

        if not os.path.exists(iniPath):
            print("%s 文件不存在！" % iniPath)
            return None

        

        self.mycf = configparser.ConfigParser(allow_no_value=False, comment_prefixes=('#', ';'))
        
        # 读取配置信息
        self.mycf.read(iniPath, encoding='utf-8')

        sectionName = self.mycf.sections()[0]
        
        SCName = self.getConfig(self.mycf, sectionName, "name")
        verName = self.getConfig(self.mycf, sectionName, "version")

        dataSavePath = os.path.join(dataSavePath, SCName, verName)

        if not os.path.exists(dataSavePath):
            os.makedirs(dataSavePath)
        
        # 已存在文件
        if os.path.exists(dataSavePath + "/data.dat"):

            print("已经存在数据，无需重新下载")

            return joblib.load(dataSavePath + "/data.dat")

        iniFile = open(iniPath, 'rb')
        resInit = self.__baseData(iniFile)

        # 初始化数据不成功
        if resInit["status"] != 0:
            # print(resInit)
            print(resInit["msg"])
            return None
        
        dataRes = resInit["data"]



        print("====== 正在下载文件数据 ======")
        # 如果有图片数据
        if dataRes["imageCol"]:
            imageList = []
            
            for imageCol in dataRes["imageCol"]:
                # 遍历每个客户端数据
                for client in dataRes["client"]:
                    # 汇总图片路径
                    imageList += client[imageCol]
            
            print("图片总数一共%s张..." % len(imageList))
            # 去重
            imageList = list(set(imageList))

            # 去掉空字符串

            if "" in imageList:
                del imageList[imageList.index("")]
            # for index in range(len(imageList)):
            #     if imageList[index] == '':
            #         del imageList[index]
            #         break

            print("待下载的图片一共%s张..." % len(imageList))

            # cur = 0
            # for imagePath in imageList[:]:
            #     # 开始下载图片
            #     print("[%.3f%%]待下载的图片：%s" % (round(cur/len(imageList)*100, 3), imagePath))
            #     self.__downloadFile(filePath=imagePath, savePath=dataSavePath, fileType="image")
            #     cur += 1

            # 下载完成之后，修改数据中的图片路径，改为本地
            for i, item in enumerate(dataRes["client"][0][dataRes["imageCol"][0]]):
                if str(item.split('/')[-1]).strip() != "":
                    dataRes["client"][0]["SavePath"][i] = os.path.join(dataSavePath, "image", item.split('/')[-1])

        # 等待下载完图片之后，保存基本数据，下次调用该方法，判断是否存在该文件，存在则不重复下载
        joblib.dump(dataRes, dataSavePath + "/data.dat")

        print("====== 下载文件数据完成 ======")


        # 下载完成之后，将数据转换成TFrecord格式保存在磁盘

        

        return dataRes



    # 方式一：分段下载，图片读取后一起拼接返回，加载数据集,格式如下：
    """
    {
        # 
        "data": res[c][i].tolist(), 
        "label": lableData,
        "labelName": myLabel,
        "headers": {
            "basicData":{"range": [0, len(res[c][i].tolist()) - 1]},
            "image": [],
            "audio": [],
            "video": []   
        }
    }
    """
    def loadData(self, iniPath, dataSavePath, proportion=[7, 2, 1], constraint=[]):
        X, y = [], []
        if os.path.exists(dataSavePath + '/X.res') and  os.path.exists(dataSavePath + '/Y.res'):
            print("已存在处理好的数据...")
            X = joblib.load(dataSavePath + '/X.res')
            y = joblib.load(dataSavePath + '/Y.res')
        else:
            data = self.__download(iniPath, dataSavePath)

            if data == None:

                print("加载数据有误")

                return []

            X, y = self.__toMatrix(data, dataSavePath)

        print("X shape, y shape \n", X.shape, y.shape)

        p_train = proportion[0] / sum(proportion)
        p_verif = proportion[1] / sum(proportion)
        p_test = proportion[2] / sum(proportion)
        X_train, y_train, X_verif, y_verif, X_test, y_test = [], [], [], [], [], []

        # 有限制条件，在对应概率下，能填充就填充，不能的话则看看其他数据集是否能填充
        if len(constraint) > 0:
            # 和限制条件冲突的数据

            for n in range(X.shape[0]):
                P = np.random.rand()
                cur_train = np.array(X_train)
                cur_verif = np.array(X_verif)
                cur_test = np.array(X_test)

                # 训练集概率
                if P < p_train:
                    
                    if self.isFill(ftype=0, element=X[n], constraint=constraint, cur_train=cur_train, cur_verif=cur_verif, cur_test=cur_test):
                        X_train.append(X[n])
                        y_train.append(y[n])
                    elif self.isFill(ftype=1, element=X[n], constraint=constraint, cur_train=cur_train, cur_verif=cur_verif, cur_test=cur_test) and p_verif != 0:
                        X_verif.append(X[n])
                        y_verif.append(y[n])
                    elif self.isFill(ftype=2, element=X[n], constraint=constraint, cur_train=cur_train, cur_verif=cur_verif, cur_test=cur_test) and p_test != 0:
                        X_test.append(X[n])
                        y_test.append(y[n])
                # 验证集概率
                elif P >= p_train and P < p_verif + p_train:
                    if self.isFill(ftype=1, element=X[n], constraint=constraint, cur_train=cur_train, cur_verif=cur_verif, cur_test=cur_test):
                        X_verif.append(X[n])
                        y_verif.append(y[n])
                    elif self.isFill(ftype=0, element=X[n], constraint=constraint, cur_train=cur_train, cur_verif=cur_verif, cur_test=cur_test) and p_train != 0:
                        X_train.append(X[n])
                        y_train.append(y[n])
                    elif self.isFill(ftype=2, element=X[n], constraint=constraint, cur_train=cur_train, cur_verif=cur_verif, cur_test=cur_test) and p_test != 0:
                        X_test.append(X[n])
                        y_test.append(y[n])

                # 剩下的是测试集概率
                else:
                    if self.isFill(ftype=2, element=X[n], constraint=constraint, cur_train=cur_train, cur_verif=cur_verif, cur_test=cur_test):
                        X_test.append(X[n])
                        y_test.append(y[n])
                    elif self.isFill(ftype=0, element=X[n], constraint=constraint, cur_train=cur_train, cur_verif=cur_verif, cur_test=cur_test) and p_train != 0:
                        X_train.append(X[n])
                        y_train.append(y[n])
                    elif self.isFill(ftype=1, element=X[n], constraint=constraint, cur_train=cur_train, cur_verif=cur_verif, cur_test=cur_test) and p_verif != 0:
                        X_verif.append(X[n])
                        y_verif.append(y[n])
        
        
        # 无限制条件，按照训练集、验证集、测试集对应的概率分配
        else:

            for n in range(X.shape[0]):
                P = np.random.rand()

                # 训练集概率
                if P < p_train:
                    X_train.append(X[n])
                    y_train.append(y[n])
                # 验证集概率
                elif P >= p_train and P < p_verif + p_train:
                    X_verif.append(X[n])
                    y_verif.append(y[n])
                # 剩下的是测试集概率
                else:
                    X_test.append(X[n])
                    y_test.append(y[n])


        # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)
        X_train = np.array(X_train)
        y_train = np.array(y_train)
        X_verif = np.array(X_verif)
        y_verif = np.array(y_verif)
        X_test = np.array(X_test)
        y_test = np.array(y_test)

        return X_train, y_train, X_verif, y_verif, X_test, y_test


def NN(inputNum, labelset_onehot, dataset):

    print("inputNum=%d" % inputNum)

    input_x = tf.keras.Input(shape=(inputNum,))
    hidden1 = tf.keras.layers.Dense(32,activation='relu')(input_x)
    hidden2 = tf.keras.layers.Dense(16,activation='relu')(hidden1)
    pred = tf.keras.layers.Dense(labelset_onehot.shape[1],activation='softmax')(hidden2)


    model = tf.keras.Model(input_x,pred)

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.0001),
                loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
                metrics=['accuracy']
                )


    dataset_n = dataset/255  # 归一化 
    print('+++++++++++++++++++++++')
    print("dataset_n shape:")
    print(dataset_n.shape)
    # print(dataset_n)
    history = model.fit(dataset_n, labelset_onehot, epochs=50)

    return history, model

if __name__ == '__main__':

    # 1.配置文件路径
    # iniPath = 'C:\\Users\\Crzzy\\Desktop\\场景配置\\服务器测试\\3个场景的配置文件-服务器\\dataset.ini'
    iniPath = '/home/huangyunyou/crzzy/ini/dataset.ini'
    # 2.数据集保存路径
    # dataSavePath = 'C:\\Users\\Crzzy\\Documents\\scconfig\\datasave'
    dataSavePath = '/home/huangyunyou/crzzy/downloadData'
    # 3.创建场景配置对象
    client = SCClient()
    # 4.按照配置文件加载数据集（如果下载好了直接加载，没下载的先下载后加载返回）
    
    # proportion: 训练集 验证集 测试集的比例， 暂时默认数据 0.7为训练集，0.2为验证集，0.1为测试集
    # constraint: 限制条件，[(6, 0, 2), (0, 0, 1)] 表示有两组限制，训练集、验证集、测试集对应编号0、1、2， 
    # 第一组限制为：数据的第6行相同的数据不能同时出现在训练集和验证集，第二组限制为：数据的第0行相同的数据不能同时出现在训练集和验证集
    X_train, y_train, X_verif, y_verif, X_test, y_test = client.loadData(iniPath, dataSavePath, proportion=[7, 2, 1], constraint=[])

    print("训练集维度：")
    print(X_train.shape)
    print("验证集维度：")
    print(X_verif.shape)
    print("测试集维度：")
    print(X_test.shape)
    print("测试集：")
    print(X_test)

    history, model = NN(X_train.shape[1], y_train, X_train)

    print("Loss: \n ", history.epoch, history.history.get('loss'))
    print("accuracy: \n", history.epoch,history.history.get('accuracy'))

    result =  model.predict(X_test)

    print("predict:")
    print(result)





