import tensorflow as tf


class Derivative(object):
    """
    求导
    """
    def __init__(self):
        # self.x = tf.Variable(0.0, name='x', dtype=tf.float32)
        self.a = tf.constant(1.0)
        self.b = tf.constant(-2.0)
        self.c = tf.constant(1.0)

    @tf.function
    def tensor_simple(self, x):
        """
        张量求导：y = a * x ** 2 + b * x + c
        :param x:
        :return:
        """
        x = tf.cast(x, dtype=tf.float32)
        with tf.GradientTape() as tape:
            tape.watch(x)
            y = self.a * tf.pow(x, 2) + self.b * x + self.c

        # 仅对张量 x 求导
        dy_dx = tape.gradient(y, x)
        return x, dy_dx

    @tf.function
    def tensor_constant_simple(self, x):
        """
        张量+ 常量求导：y = a * x ** 2 + b * x + c
        :param x:
        :return:
        """
        x = tf.cast(x, dtype=tf.float32)
        with tf.GradientTape() as tape:
            tape.watch([x, self.a, self.b, self.c])
            y = self.a * tf.pow(x, 2) + self.b * x + self.c

        dy_dx, dy_da, dy_db, dy_dc = tape.gradient(y, [x, self.a, self.b, self.c])
        # 对 x 的求导
        tf.print('dy_dx: ', dy_dx)
        # 对 a 的求导
        tf.print('dy_da: ', dy_da)
        # 对 b 的求导
        tf.print('dy_db: ', dy_db)
        # 对 c 的求导
        tf.print('dy_dc: ', dy_dc)

    @tf.function
    def derivative_2(self, x):
        """
        二阶求导：y = a * x ** 2 + b * x + c
        :param x:
        :return:
        """
        x = tf.cast(x, dtype=tf.float32)
        with tf.GradientTape() as tape2:
            tape2.watch(x)
            with tf.GradientTape() as tape1:
                tape1.watch(x)
                y = self.a * tf.pow(x, 2) + self.b * x + self.c
            dy_dx = tape1.gradient(y, x)
        dy_dx_dx = tape2.gradient(dy_dx, x)
        # 对 x 的二阶求导
        return x, dy_dx_dx


class Optimize(object):
    """
    最优化求极值: y = a * x **2 + b * x + c 最小值
    """
    def __init__(self):
        # self.x = tf.Variable(0.0, name='x', dtype=tf.float32)
        self.a = tf.constant(1.0)
        self.b = tf.constant(-2.0)
        self.c = tf.constant(1.0)

    def optmize_simple_1(self, x, learning_rate=0.01, iterates=1000):
        """
        最优化，使用optimizer.apply_gradients
        :param x:
        :param learning_rate:
        :param iterates:
        :return:
        """
        x = tf.Variable(x, name="x", dtype=tf.float32)
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
        for _ in range(iterates):
            with tf.GradientTape() as tape:
                y = self.a * tf.pow(x, 2) + self.b * x + self.c
            dy_dx = tape.gradient(y, x)
            optimizer.apply_gradients(grads_and_vars=[(dy_dx, x)])
        return x, y

    def optmize_simple_2(self, x, learning_rate=0.01, iterates=1000):
        """
        最优化，使用optimizer.minimize
        :param x:
        :param learning_rate:
        :param iterates:
        :return:
        """
        # 无参数
        @tf.function
        def func():
            return self.a * tf.pow(x, 2) + self.b * x + self.c

        x = tf.Variable(x, name="x", dtype=tf.float32)
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)

        for _ in range(iterates):
            optimizer.minimize(func, [x])
        return x, func()


def run():

    #  求导
    derivative = Derivative()
    rs1 = derivative.tensor_simple(x=0)
    rs2 = derivative.tensor_simple(x=1)
    rs3 = derivative.tensor_simple(x=-1)
    print('x: {}, y: {}\nx: {}, y: {}\nx: {}, y: {}'.format(rs1[0], rs1[1], rs2[0], rs2[1], rs3[0], rs3[1]))

    derivative.tensor_constant_simple(x=0)
    derivative.derivative_2(x=0)
    """

    # 最优化求极值
    optimize = Optimize()

    opt_rs1 = optimize.optmize_simple_1(x=0)
    print('x: {}， y: {}'.format(opt_rs1[0].numpy(), opt_rs1[1]))
    print('x: ', opt_rs1[0].numpy(), '；y: ', opt_rs1[1].numpy())

    opt_rs2 = optimize.optmize_simple_2(x=0)
    print('x: {}， y: {}'.format(opt_rs2[0].numpy(), opt_rs2[1]))
    """


if __name__ == '__main__':
    run()
