import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder
from multiprocessing import Manager, Process


def runModel(data):
    plt.rcParams["font.sans-serif"] = ["SimHei"]
    plt.rcParams['axes.unicode_minus'] = False
    np.random.seed(1986)
    x = data["x"]
    encoder = data["encoder"]
    train_x = data["train_x"]
    train_y = data["train_y"]
    test_x = data["train_x"]
    test_y = data["train_y"]
    # 创建模型
    model = Sequential()
    model.add(Dense(units=8, input_dim=x.shape[1], activation='relu'))
    model.add(Dense(units=len(encoder.classes_), activation='softmax'))
    # 打印模型信息
    # model.summary()
    # 编译模型
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # 训练模型
    history = model.fit(train_x, train_y,
              epochs=150, verbose=2)
    '''
    # 绘制训练过程(准确度) 
    plt.plot(history.history["acc"])
    plt.title("模型准确度")
    plt.xlabel("epoch")
    plt.ylabel("accuracy")
    plt.show()
    # 绘制训练过程(损失函数)
    plt.plot(history.history["loss"])
    plt.title("模型损失函数")
    plt.xlabel("epoch")
    plt.ylabel("loss_value")
    plt.show()
    '''
    # 评估模型
    scores = model.evaluate(train_x, train_y, verbose=2)
    print("训练集,%s=%.2f" % (model.metrics_names[1], scores[1]))
    scores = model.evaluate(test_x, test_y, verbose=2)
    print("测试集,%s=%.2f" % (model.metrics_names[1], scores[1]))
    # 预测
    x = np.array([[1, 2, 3, 4], [1.1, 2.1, 3.1, 4.1]])
    # 类别
    print(encoder.inverse_transform(model.predict_classes(x)))
    # 准确性
    proba = model.predict_proba(x)
    proba = [max(x) for x in proba]
    print(proba)


def main():
    with Manager() as manager:
        # 导入数据
        data_source = pd.read_excel("iris.xlsx")
        x = data_source[['花萼长度', '花萼宽度', '花瓣长度', '花瓣宽度']]
        y = data_source['属种']
        encoder = LabelEncoder()
        y = encoder.fit_transform(y)
        # 拆分训练集和测试集
        (train_x, test_x, train_y, test_y) = train_test_split(
            x, y, train_size=0.8, test_size=0.2)
        
        data = manager.dict()
        data["x"] = x
        data["encoder"] = encoder
        data["train_x"] = train_x
        data["train_y"] = train_y
        data["train_x"] = test_x
        data["train_y"] = test_y
        p1 = Process(target=runModel, args=(data,))
        p2 = Process(target=runModel, args=(data,))
        p3 = Process(target=runModel, args=(data,))
        
        p1.start()
        p2.start()
        p3.start()
        
        p1.join()
        p2.join()
        p3.join()


if __name__ == "__main__":
    main()
    
