from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import optimizers
import numpy as np
from sip import voidptr
from npend import NpendReader as NR

def Model():
    input_shape=((30,6))
    # model=keras.Sequential(
    #     [
    #         keras.Input(shape=input_shape),
    #         layers.Reshape((30*6,)),
    #         layers.Dense(512,activation="softplus"),
    #         layers.Dense(256, activation="softplus"),
    #         layers.Dense(5,activation="softmax")
    #     ]
    # )
    model=keras.Sequential(
        [
            keras.Input(shape=input_shape),
            layers.Reshape((30*6,)),
            layers.Dense(units=128, activation="softplus"),
            layers.Reshape((8, 8, 2)),
            layers.Conv2D(16, kernel_size=(5, 5), padding="Same", activation="softplus", input_shape=input_shape),
            layers.AveragePooling2D(pool_size=(2, 2)),
            layers.Flatten(),
            layers.Dense(32, activation="softmax"),
            layers.Dense(5,activation="softmax")
        ]
    )
    model.summary()
    opt=optimizers.Adam(lr=0.001)
    model.compile(loss='categorical_crossentropy',optimizer=opt,metrics=['accuracy'])
    return model

def loadData():
    dataPath = '../data/data.npd'
    labelPath = '../data/label.npd'
    data=NR(dataPath).read()
    label=NR(labelPath).read()
    #每一类样本数据量不一致，取最少的个数
    min=label.min()
    max=label.max()
    indiceGroup=[]
    lens=[]
    for i in range(min,max+1):
        indice=np.where(label == i)[0]
        indiceGroup.append(indice)
        lens.append(indice.shape[0])
    #按照最少样本的分类的个数重组
    size=np.min(lens)
    for i in range(min,max+1):
        indiceGroup[i]=indiceGroup[i][:size]
    indice=np.array(indiceGroup).reshape(-1)
    #打乱
    np.random.shuffle(indice)
    data=data[indice]
    label=label[indice]
    return data,label

if __name__=="__main__":
    data,label=loadData()
    label=keras.utils.to_categorical(label,num_classes=5)
    model=Model()
    N = data.shape[0]
    x_train,x_test=data[:int(N*0.9)],data[int(N*0.9):]
    y_train,y_test=label[:int(N*0.9)],label[int(N*0.9):]
    model.fit(x_train, y_train, batch_size=64, epochs=200, validation_split=0.1)
    savePath="modelCNN.tf"
    model.save(savePath)
    score = model.evaluate(x_test, y_test, verbose=0)
    print("Test loss:", score[0])
    print("Test accuracy:", score[1])