import os
import pandas as pd
import numpy as np
from tensorflow.keras import models
from tensorflow.keras.layers import Dropout
from tensorflow.keras import layers, optimizers
from sklearn.model_selection import train_test_split
import tensorflow as tf

def build_model(layers_num=1, learning_rate=-0.01, ac_fun="", final_fun="", dropout=0.0, neuron_num = -1):
    model = models.Sequential()
    if neuron_num == -1:
        neuron_num = 32
    if ac_fun == "" or ac_fun == "please choose...":
        ac_fun = "relu"
    if final_fun == "" or final_fun == "please choose...":
        final_fun = "sigmoid"
    model.add(layers.Dense(neuron_num,activation=ac_fun, input_shape=(900, )))
    model.add(Dropout(dropout))
    for i in range(0, layers_num):
        model.add(layers.Dense(neuron_num,activation=ac_fun))
        model.add(Dropout(dropout))
    model.add(layers.Dense(1,activation=final_fun))
    model.summary()
    #opt = optimizers.Nadam(lr=0.001)
    if learning_rate <= 0:
        model.compile(optimizer='nadam', loss='binary_crossentropy', metrics=['accuracy'])
    else:
        opt = optimizers.Nadam(learning_rate=learning_rate)
        model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
    return model

def preprocessing(folder_1, folder_2, whitename):
    all = []
    for parent, dirnames, filenames in os.walk(folder_1):
        for filename in filenames:
            file_path = os.path.join(parent, filename)
            data = pd.read_csv(file_path, sep='\t', header=None)
            colu = np.array(data[1])
            colu = np.append(colu, 1)
            all.append(colu)

    for parent, dirnames, filenames in os.walk(folder_2):
        for filename in filenames:
            file_path = os.path.join(parent, filename)
            data = pd.read_csv(file_path, sep='\t', header=None)
            colu = np.array(data[1])
            colu = np.append(colu, 0)
            all.append(colu)

    w = pd.read_csv(whitename, sep='\t', header=None)
    white = np.array(w[1])
    all_arr = np.array(all)
    white = white[200:1100]
    X = all_arr[:, 200:1100]
    Y = all_arr[:, 2048]
    X = X/white * 0.97
    return X, Y

def train_process(X, Y, test_size, layers_num=1, learning_rate=-0.01, ac_fun="", final_fun="", dropout=0.3, neuron_num = -1):
    tf.random.set_seed(1)
    train_data, test_data, train_labels, test_labels = train_test_split(X, Y, test_size=test_size, random_state=1)
    x_train = train_data
    x_test = test_data
    y_train = np.asarray(train_labels).astype('float32')
    y_test = np.asarray(test_labels).astype('float32')
    model = build_model(layers_num, learning_rate, ac_fun, final_fun, dropout, neuron_num)
    model.fit(x_train,
              y_train,
              epochs=10,  # 在全数据集上迭代
              batch_size=10  # 每个batch的大小
              )
    results = model.evaluate(x_test, y_test, batch_size=1)
    model.save("mo1.h5")
    return results

