import datetime
import math
import os
import time

import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.callbacks import LearningRateScheduler
from tensorflow.python.keras.layers import Conv1D, Dropout, MaxPool1D, Flatten, Dense

print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))

LEN = 400
N = 5
MIN_LEN = 300

def read_feat(path, test_mode=False):
    df = pd.read_csv(path)
    df = df.iloc[::-1]

    if test_mode:
        df['type'] = df['type'].map({'拖网': 0, '围网': 1, '刺网': 2})
        Y = np.array(df['type'].iloc[0])
    else:
        Y = None

    df['time'] = df['time'].apply(lambda x: datetime.datetime.strptime(x, "%m%d %H:%M:%S"))
    X = df[["x", "y", "速度", '方向']].apply(lambda x: (x - np.mean(x)) / np.std(x))
    for column in list(X.columns[X.isnull().sum() > 0]):
        mean_val = X[column].mean()
        X[column].fillna(mean_val, inplace=True)
    X = X.dropna(axis=0)
    X = np.array(X)
    cols = X.shape[1]
    rows = X.shape[0]
    if rows < MIN_LEN:
        return None, None, 0
    for i in range(rows, LEN):
        b = np.zeros((1, cols))
        for j in range(N):
            b += X[i - j - 1]
        X = np.row_stack((X, b / N))
    return X[:LEN], Y


def load_data(X_file="./npy/data_x.npy", Y_file="./npy/data_y.npy", new=False):
    if os.path.exists(X_file) and os.path.exists(Y_file) and not new:
        X = np.load(X_file)
        Y = np.load(Y_file)
        print('X.shape =', X.shape, ', Y.shape =', Y.shape)
        return np.array(X), np.array(Y)
    else:
        path = './hy_round1_train_20200102'
        train_file = os.listdir(path)
        X = []
        Y = []
        for i, each in enumerate(train_file):
            if not i % 1000:
                print(i)
            each_path = os.path.join(path, each)
            print(each_path)
            x, y= read_feat(each_path, True)
            if x is not None:
                X.append(x)
                Y.append(y)
        X = np.array(X)
        Y = np.array(Y)
        np.save(X_file, X)
        np.save(Y_file, Y)
        return X, Y


#按特定比例进行训练集切分
X, Y = load_data()
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
batchsize=512
# 模型构建
model = Sequential()
model.add(Conv1D(filters=32, kernel_size=(5,), activation='relu', input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(0.5))
model.add(MaxPool1D(pool_size=(2,)))
model.add(Conv1D(filters=32, kernel_size=(3,), activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPool1D(pool_size=(2,)))
model.add(Flatten())
model.add(Dense(3, activation='softmax'))
print(model.summary())

# # learning rate schedule
# def step_decay(epoch):
#     initial_lr = 0.005
#     if epoch < 80:
#         return initial_lr
#     # elif epoch < 80:
#     #     return initial_lr * 0.1
#     else:
#         return initial_lr


# lr_scheduler = LearningRateScheduler(step_decay)
# lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
#     initial_learning_rate=0.002,
#     decay_steps=60 * math.ceil(X.shape[0] / batchsize),
#     decay_rate=0.1,
#     staircase=True)
# opt = keras.optimizers.Adam(learning_rate=lr_schedule)
opt = keras.optimizers.SGD(learning_rate=0.002, momentum=0.9, nesterov=True)
model.compile(optimizer=opt,
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# 训练模型
start = time.time()
history = model.fit(X_train, y_train, batch_size=batchsize, epochs=100, validation_split=0.1)
elapsed = time.time() - start
print("train finished in %d min %d s" % (elapsed // 60, elapsed % 60))

# 绘制准确率图像
data = {'accuracy': history.history['accuracy'], 'val_accuracy': history.history['val_accuracy']}
pd.DataFrame(data).plot(figsize=(8, 5))
plt.grid(True)
plt.axis([0, 100, 0, 1])
plt.show()
# 绘制损失图像
data = {}
data['loss'] = history.history['loss']
data['val_loss'] = history.history['val_loss']
pd.DataFrame(data).plot(figsize=(8, 5))
plt.grid(True)
plt.axis([0, 100, 0, 1])
plt.show()

predicted = model.predict(X_test)  # 模型预测
predicted = [list(x).index(max(x)) for x in predicted]
accuracy = accuracy_score(y_test, predicted)
print("accuracy", accuracy)
print("f1_score", f1_score(y_test, predicted, labels=[0, 1, 2], average='macro'))
