import tensorflow as tf
import tensorflow.contrib.eager as tfe
from sklearn.metrics import r2_score
from .utils import mape_score
tfe.enable_eager_execution()


def l2_loss(predictions, labels):
    return tf.reduce_mean((predictions - labels) ** 2 / 2.)


class OneLayerRegression(tfe.Network):
    def __init__(self, feature_key):
        super(OneLayerRegression, self).__init__(name='')
        self.fc1 = self.track_layer(tf.layers.Dense(1, activation=tf.identity))
        self.feature_key = feature_key

    def call(self, inputs, training):
        x = self.fc1(inputs[self.feature_key])
        x = tf.squeeze(x)
        # 必须要是(batch_size, )的形状而不是(batch_size, 1)
        return x

    def loss(self, inputs, labels, training=True):
        return l2_loss(
            self(inputs, training=training),
            labels
        )

    def score(self, inputs, labels, metrics="r2"):
        predictions = self(inputs, training=False)
        if metrics == "r2":
            return r2_score(labels, predictions)
        elif metrics == "mape":
            return mape_score(labels, predictions)
        else:
            raise NotImplementedError


class TypicalNetwork(tfe.Network):
    def __init__(self, feature_key, hidden_size=10, use_dropout=True):
        super(TypicalNetwork, self).__init__(name='')
        self.fc1 = self.track_layer(tf.layers.Dense(hidden_size, activation=tf.nn.relu))
        self.fc2 = self.track_layer(tf.layers.Dense(1, activation=tf.identity))
        self.use_dropout = use_dropout
        if self.use_dropout:
            self.dropout = self.track_layer(tf.layers.Dropout(0.5))
        self.feature_key = feature_key
        self.hidden_size = hidden_size
        self.use_dropout = use_dropout


    def call(self, inputs, training):
        x = self.fc1(inputs[self.feature_key])
        x = self.fc2(x)
        if training and self.use_dropout:
            self.dropout(x)
        x = tf.squeeze(x)
        # 必须要是(batch_size, )的形状而不是(batch_size, 1)
        return x

    def loss(self, inputs, labels, training=True):
        return l2_loss(
            self(inputs, training=training),
            labels
        )

    def score(self, inputs, labels, metrics="r2"):
        predictions = self(inputs, training=False)
        if metrics == "r2":
            return r2_score(labels, predictions)
        elif metrics == "mape":
            return mape_score(labels, predictions)
        else:
            raise NotImplementedError


class Proposed(tfe.Network):
    def __init__(self, use_dropout=False):
        super(Proposed, self).__init__(name='')
        self.fc_wa = self.track_layer(tf.layers.Dense(1, activation=tf.nn.tanh))
        self.fc_oa = self.track_layer(tf.layers.Dense(1, activation=tf.identity))
        self.fc_wb = self.track_layer(tf.layers.Dense(1, activation=tf.nn.tanh))
        self.fc_ob = self.track_layer(tf.layers.Dense(1, activation=tf.identity))
        self.use_dropout = use_dropout
        if self.use_dropout:
            self.dropouta = self.track_layer(tf.layers.Dropout(0.5))
            self.dropoutb = self.track_layer(tf.layers.Dropout(0.5))

    def call(self, inputs, training):
        wa = self.fc_wa(inputs['feature_a'])
        if training and self.use_dropout:
            wa = self.dropouta(wa)
        oa = self.fc_oa(inputs['feature_a'])
        wb = self.fc_wb(inputs['feature_b'])
        if training and self.use_dropout:
            wb = self.dropoutb(wb)
        ob = self.fc_ob(inputs['feature_b'])
        output = tf.squeeze(wa * oa + wb * ob)
        return oa, ob, output

    def loss(self, inputs, labels, training=True):
        oa, ob, o = self(inputs, training=training)
        return l2_loss(o, labels) + l2_loss(oa, labels) + l2_loss(ob, labels)

    def score(self, inputs, labels, metrics="r2"):
        oa, ob, o = self(inputs, training=False)
        if metrics == "r2":
            return r2_score(labels, o)
        elif metrics == "mape":
            return mape_score(labels, o)
        else:
            raise NotImplementedError


class ProposedTwoLayers(tfe.Network):
    def __init__(self, hidden_a=20, hidden_b=20, use_dropout=False):
        super(ProposedTwoLayers, self).__init__(name='')
        self.fc_wa = self.track_layer(tf.layers.Dense(1, activation=tf.nn.tanh))
        self.fc_oa1 = self.track_layer(tf.layers.Dense(hidden_a, activation=tf.nn.relu))
        self.fc_oa2 = self.track_layer(tf.layers.Dense(1, activation=tf.identity))
        self.fc_wb = self.track_layer(tf.layers.Dense(1, activation=tf.nn.tanh))
        self.fc_ob1 = self.track_layer(tf.layers.Dense(hidden_b, activation=tf.nn.relu))
        self.fc_ob2 = self.track_layer(tf.layers.Dense(1, activation=tf.identity))
        self.use_dropout = use_dropout
        if self.use_dropout:
            self.dropouta = self.track_layer(tf.layers.Dropout(0.9))
            self.dropoutb = self.track_layer(tf.layers.Dropout(0.9))

    def call(self, inputs, training):
        wa = self.fc_wa(inputs['feature_a'])
        oa = self.fc_oa1(inputs['feature_a'])
        if training and self.use_dropout:
            oa = self.dropouta(oa)
        oa = self.fc_oa2(oa)
        wb = self.fc_wb(inputs['feature_b'])
        ob = self.fc_ob1(inputs['feature_b'])
        if training and self.use_dropout:
            ob = self.dropoutb(ob)
        ob = self.fc_ob2(ob)
        output = tf.squeeze(wa * oa + wb * ob)
        return wa, oa, wb, ob, output

    def loss(self, inputs, labels, training=True):
        wa, oa, wb, ob, o = self(inputs, training=training)
        return l2_loss(o, labels) + l2_loss(oa, labels) + l2_loss(ob, labels)

    def score(self, inputs, labels, metrics="r2"):
        wa, oa, wb, ob, o = self(inputs, training=False)
        if metrics == "r2":
            return r2_score(labels, o)
        elif metrics == "mape":
            return mape_score(labels, o)
        else:
            raise NotImplementedError


class ProposedTwoLayersPlusOut(tfe.Network):
    def __init__(self, hidden_a=20, hidden_b=20, use_dropout=False):
        super(ProposedTwoLayersPlusOut, self).__init__(name='')
        self.fc_wa = self.track_layer(tf.layers.Dense(1, activation=tf.nn.tanh))
        self.fc_oa1 = self.track_layer(tf.layers.Dense(hidden_a, activation=tf.nn.relu))
        self.fc_oa2 = self.track_layer(tf.layers.Dense(1, activation=tf.identity))
        self.fc_wb = self.track_layer(tf.layers.Dense(1, activation=tf.nn.tanh))
        self.fc_ob1 = self.track_layer(tf.layers.Dense(hidden_b, activation=tf.nn.relu))
        self.fc_ob2 = self.track_layer(tf.layers.Dense(1, activation=tf.identity))
        self.use_dropout = use_dropout
        if self.use_dropout:
            self.dropouta = self.track_layer(tf.layers.Dropout(0.9))
            self.dropoutb = self.track_layer(tf.layers.Dropout(0.9))

    def call(self, inputs, training):
        oa = self.fc_oa1(inputs['feature_a'])
        if training and self.use_dropout:
            oa = self.dropouta(oa)
        oa = self.fc_oa2(oa)
        wa = self.fc_wa(tf.concat([inputs['feature_a'], oa], axis=1))  # 这里我们用feature_a加上输出，来期待获得一个更好的效果
        ob = self.fc_ob1(inputs['feature_b'])
        if training and self.use_dropout:
            ob = self.dropoutb(ob)
        ob = self.fc_ob2(ob)
        wb = self.fc_wb(tf.concat([inputs['feature_b'], ob], axis=1))  # 这里我们用feature_b加上输出，来期待获得一个更好的效果
        output = tf.squeeze(wa * oa + wb * ob)
        return wa, oa, wb, ob, output

    def loss(self, inputs, labels, training=True):
        wa, oa, wb, ob, o = self(inputs, training=training)
        return l2_loss(o, labels) + l2_loss(oa, labels) + l2_loss(ob, labels)

    def score(self, inputs, labels, metrics="r2"):
        wa, oa, wb, ob, o = self(inputs, training=False)
        if metrics == "r2":
            return r2_score(labels, o)
        elif metrics == "mape":
            return mape_score(labels, o)
        else:
            raise NotImplementedError
