import cv2
import os
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
import numpy as np


class Data():
    def train_data_set(self, *args, **kwargs):
        raise NotImplementedError("you must implement train_data fc")

    def val_data_set(self, *args, **kwargs):
        raise NotImplementedError("you must implement val_data fc")

    def test_data_set(self):
        pass


class Customter(Data):
    class_map = {"1": 0, "2": 1, "3": 2, "4": 3}

    def __init__(self, path, input_size=22, augment=None):
        x, y = self._get_path_label(path)
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(x, y, test_size=0.33, random_state=42,
                                                                                shuffle=True)
        self.train_num = len(self.X_train)
        self.test_num = len(self.X_test)
        self.class_num = len(self.class_map.keys())
        self.input_size = input_size
        self.augment = augment
        self.class_names = [j[0] for j in sorted([(k, v) for k, v in self.class_map.items()], key=lambda x: x[1])]

    def _get_path_label(self, path):
        x = []
        y = []
        for label in os.listdir(path):
            for img_name in os.listdir(os.path.join(path, label)):
                x.append(os.path.join(path, label, img_name))
                y.append(label)
        return x, y

    def _genrate_warp(self, X, y, weights=None, train=False):
        if weights is None or not train:
            weights = [1 for _ in range(len(y))]

        def genrate():
            for img_path, label, weight in zip(X, y, weights):
                img = cv2.resize(cv2.imread(img_path), (self.input_size, self.input_size))
                if self.augment is not None and train:
                    img = self.augment(img)
                label = to_categorical(self.class_map[label], len(self.class_map.keys()))
                yield (img / 255.).astype(np.float), label.astype(np.int), weight

        return genrate

    def train_data_set(self, batch_size):
        data_set = tf.data.Dataset.from_generator(self._genrate_warp(self.X_train, self.y_train, train=True),
                                                  (tf.float32, tf.int8), (
                                                      tf.TensorShape([self.input_size, self.input_size, 3]),
                                                      tf.TensorShape([self.class_num])))
        data_set = data_set.shuffle(buffer_size=100)
        data_set = data_set.batch(batch_size)
        data_set = data_set.repeat(5)
        return data_set

    def val_data_set(self, batch_size):
        data_set = tf.data.Dataset.from_generator(self._genrate_warp(self.X_test, self.y_test, train=False),
                                                  (tf.float32, tf.int8), (
                                                      tf.TensorShape([self.input_size, self.input_size, 3]),
                                                      tf.TensorShape([self.class_num])))
        data_set = data_set.shuffle(buffer_size=100)
        data_set = data_set.batch(batch_size)
        return data_set


class BNN_Data_tf14(Customter):
    def __init__(self, path, input_size=22, augment=None):
        super(BNN_Data_tf14, self).__init__(path=path, input_size=input_size, augment=augment)

    def _batch_warp(self, X, y, batch, repeat, train=False):
        genrate = self._genrate_warp(X, y, train=train)

        def train_batch():
            samples = []
            targets = []
            weights = []
            for _ in range(repeat):
                for x, y, weight in genrate():
                    samples.append(x)
                    targets.append(y)
                    weights.append(weight)
                    if len(samples) == batch:
                        img = np.array(samples)
                        label = np.array(targets)
                        weight = np.array(weights)
                        samples = []
                        targets = []
                        weights = []
                        yield img, label, weight

        def val_batch():
            samples = []
            targets = []
            weights = []
            for _ in range(repeat):
                for x, y, weight in genrate():
                    samples.append(x)
                    targets.append(y)
                    weights.append(weight)
                    if len(samples) == batch:
                        img = np.array(samples)
                        label = np.array(targets)
                        weight = np.array(weights)
                        samples = []
                        targets = []
                        weights = []
                        yield img, label, weight

        if train:
            return train_batch
        else:
            return val_batch

    def train_data_set(self, batch_size):
        batch_gen = self._batch_warp(self.X_train, self.y_train, batch_size, repeat=100, train=True)
        return batch_gen()

    def val_data_set(self, batch_size):
        batch_gen = self._batch_warp(self.X_test, self.y_test, batch_size, repeat=100, train=False)
        return batch_gen()
