import tensorflow as tf
import timeit

x = tf.ones((2, 2), dtype=tf.dtypes.float32)
y = tf.constant([[1, 2],
                 [3, 4]], dtype=tf.dtypes.float32)
z = tf.matmul(x, y)
print(z)
# tf.Tensor(
# [[4. 6.]
#  [4. 6.]], shape=(2, 2), dtype=float32)
print(z.numpy())
# [[4. 6.]
# [4. 6.]]


random_value = tf.random.uniform([], 0, 1)
x = tf.reshape(tf.range(0, 4), [2, 2])
print(random_value)
if random_value.numpy() > 0.5:
  y = tf.matmul(x, x)
else:
  y = tf.add(x, x)

print("计算梯度")
w = tf.Variable([[1.0]])
with tf.GradientTape() as tape:
  loss = w * w + 2. * w + 5.
grad = tape.gradient(loss, w)
print(grad)  # => tf.Tensor([[ 4.]], shape=(1, 1), dtype=float32)


# Function
def f(x, y):
  print(x, y)
  return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)


g = tf.function(f)
x = tf.constant([[2.0, 3.0]])
y = tf.constant([[3.0, -2.0]])
# `f` and `g` will return the same value, but `g` will be executed as a
# TensorFlow graph.
assert f(x, y).numpy() == g(x, y).numpy()
# tf.Tensor([[2. 3.]], shape=(1, 2), dtype=float32) tf.Tensor([[ 3. -2.]], shape=(1, 2), dtype=float32)
# Tensor("x:0", shape=(1, 2), dtype=float32) Tensor("y:0", shape=(1, 2), dtype=float32)


conv_layer = tf.keras.layers.Conv2D(100, 3)


@tf.function
def conv_fn(image):
  return conv_layer(image)


image = tf.zeros([1, 200, 200, 100])
# warm up
conv_layer(image);
conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
# 单纯的卷积差距不是很大
# Eager conv: 0.44013839924952197
# Function conv: 0.3700763391782858
lstm_cell = tf.keras.layers.LSTMCell(10)


@tf.function
def lstm_fn(input, state):
  return lstm_cell(input, state)


input = tf.zeros([10, 10])
state = [tf.zeros([10, 10])] * 2
# warm up
lstm_cell(input, state);
lstm_fn(input, state)
print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10))
print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10))


# 对于LSTM比较heavy的计算，Graph执行要快很多
# eager lstm: 0.025562446062237565
# function lstm: 0.0035498656569271647


@tf.function
def compute_z1(x, y):
  return tf.add(x, y)


@tf.function
def compute_z0(x):
  return compute_z1(x, tf.square(x))


z0 = compute_z0(2.)
z1 = compute_z1(2., 2.)
print("z0:", z0)
print("z1:", z1)


@tf.function
def double(a):
  print("Tracing with", a)
  return a + a


# 限制dType必须是float32类型的，如果不是则报错
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def testFloatAdd(a):
  print("testFloatAdd Tracing with:", a)
  return a + a;


print("--->>>\t", double(tf.constant(1)))
print("--->>>\t", double(tf.constant(1.1)))
print("--->>>\t", double(tf.constant([1, 2])))
print("--->>>\t", double(tf.constant([1, 2, 3, 4.2, 5.1])))

print("testFloatAdd--->>>\t", testFloatAdd(tf.constant(1.1)))
print("testFloatAdd--->>>\t", testFloatAdd(tf.constant([1, 2, 3, 4.2, 5.1])))


def sum_even(items):
  s = 0
  for c in items:
    if c % 2 > 0:
      continue
    s += c
  return s


sum_even_autograph_on = tf.function(sum_even, autograph=True)
sum_even_autograph_off = tf.function(sum_even, autograph=False)
x = tf.constant([10, 12, 15, 20])
print(sum_even(x))
print(sum_even_autograph_on(x))


# TypeError: Tensor objects are only iterable when eager execution is enabled
# sum_even_autograph_off(x)


class ScalarModel(object):
  def __init__(self):
    self.v = tf.Variable(0)

  @tf.function
  def increment(self, amount):
    self.v.assign_add(amount)


model1 = ScalarModel()
model1.increment(tf.constant(3))
assert int(model1.v) == 3
model1.increment(tf.constant(4))
assert int(model1.v) == 7
model2 = ScalarModel()  # model1和model2 拥有不同变量
model2.increment(tf.constant(5))
assert int(model2.v) == 5


@tf.function
def print_element(items):
  print(items)
  print(items.shape)
  if items.shape.__len__() == 1:
    for c in items:
      tf.print(c)
  else:
    print(items.shape.__iter__())


x = tf.constant([1, 5, 6, 8, 3])
print(x)
print_element(x)
print("------------------")


@tf.function
def f(x):
  return tf.square(x)


print(f(tf.constant(1, dtype=tf.int32)))
print(f(tf.constant(1.0, dtype=tf.float32)))


@tf.function
def f(x, use_multiply):
  return tf.multiply(x, x) if use_multiply else tf.square(x)


print(f(tf.constant(2.0), True))
print(f(tf.constant(2.0), False))


def t(num):
  return True if num == 0 else False


print("测试return: ", t(1))

def weights_with_loss(shape, stddev, wl):
  var = tf.truncated_normal(stddev=stddev, shape=shape)
  if wl is not None:
    weight_loss = tf.multiply(tf.nn.l2_loss(var), wl, name='weight_loss')
    tf.add_to_collection('losses', weight_loss)
  return tf.Variable(var)

weights_with_loss()