import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import pandas as pd
import  numpy as np
import tensorflow as tf
from tensorflow.keras import layers, optimizers, Sequential

model = Sequential([
    layers.Dense(32, activation='relu'),
    layers.Dense(64, activation='relu'),
    layers.Dense(18)
])
def preposs(x,y):
    '''数据预处理'''
    x = tf.cast(x, dtype=tf.float32) / 50.0
    y = tf.cast(y, dtype=tf.int32)

    return x, y

def get_Data(mode='train'):

    data = pd.read_csv('Pokemon.csv')
    # print(data.columns)
    # 取出类别数据
    type1 = data['Type 1']
    # print(type1.head())
    # print(len(set(list(type1))))

    # 创建一个字典
    class_dict = {}
    for i, n in enumerate(set(list(type1))):
        class_dict[n] = i
    # print(class_dict)

    # 创建一个容器，将类别转化称数字
    label = np.ones([len(data), ])
    for i, n in enumerate(type1):
        label[i] = class_dict[n]
    # print(label.shape, label)

    # 取出属性值
    HP = data['HP']
    Attack = data['Attack']
    Defense = data['Defense']
    Sp_Atk = data['Sp. Atk']
    Sp_Def = data['Sp. Def']
    Speed = data['Speed']

    # 创建训练数据
    x = np.ones([len(data), 6])
    for i in range(800):
        x[i][0] = HP[i]
        x[i][1] = Attack[i]
        x[i][2] = Defense[i]
        x[i][3] = Sp_Atk[i]
        x[i][4] = Sp_Def[i]
        x[i][5] = Speed[i]
    if mode == 'train': # 80%
        x = x[:int(0.8 * len(x))]
        label = label[:int(0.8 * len(label))]
    else:
        x = x[int(0.8 * len(x)):]
        label = label[int(0.8 * len(label)):]
    # print(x.shape, x)
    x_train = tf.convert_to_tensor(x)
    y_train = tf.convert_to_tensor(label)

    return x_train, y_train

def main():

    batchsz = 32
    x_train, y_train = get_Data(mode='train')
    x_test, y_test = get_Data(mode='test')

    db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
    db_train = db_train.map(preposs).shuffle(1000).batch(batchsz)

    db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
    db_test = db_test.map(preposs).shuffle(1000).batch(batchsz)

    optimizer = optimizers.Adam(lr=0.001)
    for epoch in range(2000):
        for step, (x, y) in enumerate(db_train):
            with tf.GradientTape() as tape:
                # [b, 18]
                out = model(x)
                y = tf.one_hot(y, depth=18)
                loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y, out, from_logits=True))
            grads = tape.gradient(loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            if step%5 == 0:
                print(epoch, step, float(loss))

        total_num = 0
        total_acc = 0
        for x, y in db_test:
            # [b, 18]
            out = model(x)
            # [b]
            prob = tf.argmax(out, axis=1)
            prob = tf.cast(prob, dtype=tf.int32)
            accuracy = tf.reduce_sum(tf.cast(tf.equal(prob, y), dtype=tf.int32))

            total_num += x.shape[0]
            total_acc += int(accuracy)

        acc = total_acc / total_num
        print('acc:', acc)

    model.save_weights('saved/weight.ckpt')
    print('saving weight!')

if __name__ == '__main__':
    main()


