import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers, optimizers, activations, losses, metrics, \
    callbacks, utils
import sys
import os
from python_ai.common.xcommon import *
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt

np.random.seed(777)
tf.random.set_seed(777)
filename = os.path.basename(__file__)

LEN_DICT = 1000
N_STEPS = 80
N_EMBEDDING = 300
N_BATCH_SIZE = 128

(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=LEN_DICT)
check_shape(x_train, 'x_train')
check_shape(y_train, 'y_train')
check_shape(x_test, 'x_test')
check_shape(y_test, 'y_test')

print('PAD')
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=N_STEPS)
x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=N_STEPS)
check_shape(x_train, 'x_train')
check_shape(y_train, 'y_train')
check_shape(x_test, 'x_test')
check_shape(y_test, 'y_test')

print('DS')
ds_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))\
    .shuffle(buffer_size=1000)\
    .batch(N_BATCH_SIZE, drop_remainder=True)\
    .prefetch(tf.data.experimental.AUTOTUNE)  # ATTENTION drop_remainder

for i, (bx, by) in enumerate(ds_train):
    sep(i)
    check_shape(bx, 'bx')
    check_shape(by, 'by')

print('OVER')
