# coding:utf8
import time

import tensorflow as tf
import numpy as np

# sess = tf.InteractiveSession()
# x = tf.constant([[11, 12, 13], [21, 22, 23]])
# y = tf.constant([[31, 32, 33], [41, 42, 43]])
#
# z = tf.stack([x, y])
# print("*" * 20)
# print(x)
# print(x.eval())
# print("*" * 20)
# print(z)
# print(z.eval())
# print("*" * 20)
#
# sess.close()
#########################################################
tf.enable_eager_execution()  # 渴望模式
# print(tf.add(1, 2))
# print(tf.add([1, 2], [3, 4]))
# print(tf.square(5))
# print(tf.reduce_sum([1, 2, 3]))
# print(tf.encode_base64("hello world"))

# # Operator overloading is also supported
# print(tf.square(2) + tf.square(3))

# x = tf.matmul([[1]], [[2, 3]])
# print('x',x)
# print(x.shape)
# print(x.dtype)
#
# import numpy as np
#
# #
# ndarray = np.ones([3, 3])
#
# print("TensorFlow operations convert numpy arrays to Tensors automatically")
# tensor = tf.multiply(ndarray, 42)
# print(tensor)
#
# print("And NumPy operations convert Tensors to numpy arrays automatically")
# print(np.add(tensor, 1))
#
# print("The .numpy() method explicitly converts a Tensor to a numpy array")
# print(tensor.numpy())
# print('&'*50)
#
# def time_matmul(x):
#     s = time.time()
#     tf.matmul(x, x)
#     print(time.time() - s)
#
#
# # %timeit tf.matmul(x, x)
#
# # Force execution on CPU
# print("On CPU:")
# with tf.device("CPU:0"):
#     x = tf.random_uniform([1000, 1000])
#     assert x.device.endswith("CPU:0")
#     time_matmul(x)

# # Force execution on GPU #0 if available
# if tf.test.is_gpu_available():
#     with tf.device("GPU:0"):  # Or GPU:1 for the 2nd GPU, GPU:2 for the 3rd etc.
#         x = tf.random_uniform([1000, 1000])
#         assert x.device.endswith("GPU:0")
#         time_matmul(x)
# ######################################

ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])

# Create a CSV file
import tempfile

_, filename = tempfile.mkstemp()

with open(filename, 'w') as f:
    f.write("""Line 1
Line 2
Line 3
  """)

ds_file = tf.data.TextLineDataset(filename)

tfe = tf.contrib.eager
print(tfe)

from math import pi


def f(x):
    return tf.square(tf.sin(x))


assert f(pi / 2).numpy() == 1.0, "pris"


# grad_f will return a list of derivatives of f
# with respect to its arguments. Since f() has a single argument,
# grad_f will return a list with a single element.
grad_f = tfe.gradients_function(f)  # 计算其导数 返回其导数函数
assert tf.abs(grad_f(pi / 2)[0]).numpy() < 1e-7
#
#
# def f(x):
#     return tf.square(tf.sin(x))
#
#
# def grad(f):
#     return lambda x: tfe.gradients_function(f)(x)[0]
#
#
# x = tf.lin_space(-2 * pi, 2 * pi, 100)  # 100 points between -2π and +2π

# import matplotlib.pyplot as plt

# plt.plot(x, f(x), label="f")
# plt.plot(x, grad(f)(x), label="first derivative")
# plt.plot(x, grad(grad(f))(x), label="second derivative")
# plt.plot(x, grad(grad(grad(f)))(x), label="third derivative")
# plt.legend()
# plt.show()

# x = tf.ones((2, 2))

# # a single t.gradient() call when the bug is resolved.
# with tf.GradientTape(persistent=True) as t:
#     t.watch(x)
#     y = tf.reduce_sum(x)
#     z = tf.multiply(y, y)
# print('8'*20)
# # Use the same tape to compute the derivative of z with respect to the
# # intermediate value y.
# dz_dy = t.gradient(z, y)
# print('8',dz_dy.numpy())

# Derivative of z with respect to the original input tensor x
# dz_dx = t.gradient(z, x)
# for i in [0, 1]:
#     for j in [0, 1]:
#         print(dz_dx[i][j].numpy())
# # assert dz_dx[i][j].numpy() == 8.0
# print('*' * 20)

# x = tf.constant(3.0)
# with tf.GradientTape(persistent=True) as t:
#   t.watch(x)
#   y = x * x
#   z = y * y
# dz_dx = t.gradient(z, x)  # 108.0 (4*x^3 at x = 3)
# dy_dx = t.gradient(y, x)  # 6.0
# del t  # Drop the reference to the tape

# x = tf.constant(1.0)  # Convert the Python 1.0 to a Tensor object

# with tf.GradientTape() as t:
#     with tf.GradientTape() as t2:
#         y = x * x * x
#     # Compute the gradient inside the 't' context manager
#     # which means the gradient computation is differentiable as well.
#     dy_dx = t2.gradient(y, x)  # 直接计算梯度
# d2y_dx2 = t.gradient(dy_dx, x)  # 6

# assert dy_dx.numpy() == 3.0
# assert d2y_dx2.numpy() == 6.0

##############################################
# class Model(object):
#     def __init__(self):
#         self.W = tf.Variable(5.0)
#         self.b = tf.Variable(0.0)
#
#     def __call__(self, x, *args, **kwargs):
#         return self.W * x + self.b
#
#
# model = Model()

# assert model(3.0).numpy() == 15.0


# def loss(predicted_y, desired_y):
#     # 均方误差
#     return tf.reduce_mean(tf.square(predicted_y - desired_y))

# TRUE_W = 3.0
# TRUE_b = 2.0
# NUM_EXAMPLES = 1000

# inputs = tf.random_normal(shape=[NUM_EXAMPLES])
# noise = tf.random_normal(shape=[NUM_EXAMPLES])
# outputs = inputs * TRUE_W + TRUE_b + noise
# print(outputs.shape,type(outputs))

# import matplotlib.pyplot as plt

# plt.scatter(inputs, outputs, c='b')
# plt.scatter(inputs, model(inputs), c='r')
# plt.show()

# print('Current loss: '),
# print(loss(model(inputs), outputs).numpy())

# def train(model,inputs,outputs,learning_rate):
#     with tf.GradientTape() as t:
#         current_loss = loss(model(inputs),outputs)
#     dw,db=t.gradient(current_loss,[model.W,model.b]) #REW:会对元素进行微分
#     model.W.assign_sub(learning_rate*dw)
#     model.b.assign_sub(learning_rate*db)
# model = Model()
# Ws,bs = [],[]
# epochs = range(10)
# for epoch in epochs:
#     Ws.append(model.W.numpy())
#     bs.append(model.b.numpy())
#     current_loss = loss(model(inputs),outputs)
#     train(model,inputs,outputs,learning_rate=0.1)
#     print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' %
#           (epoch, Ws[-1], bs[-1], current_loss))
#
# # Let's plot it all
# plt.plot(epochs, Ws, 'r',
#          epochs, bs, 'b')
# plt.plot([TRUE_W] * len(epochs), 'r--',
#          [TRUE_b] * len(epochs), 'b--')
# plt.legend(['W', 'b', 'true W', 'true_b'])
# plt.show()

class MyDenseLayer(tf.keras.layers.Layer):
    def __init__(self, num_outputs):
        super(MyDenseLayer, self).__init__()
        self.num_outputs = num_outputs

    def build(self, input_shape): # 会去调用call
        # print(f'input_shape:{input_shape},----------,jsf')
        self.kernel = self.add_variable("kernel",
                                        shape=[int(input_shape[-1]),
                                               self.num_outputs])
        print(f'self.kernel:{self.kernel},------')# 接call
    def call(self, input):
        print('input',input,'----------------------------')
        return tf.matmul(input, self.kernel)


layer = MyDenseLayer(10)
# layer(tf.zeros([10, 5])) # 调用build or call
# print(layer.variables)

class ResnetIdentityBlock(tf.keras.Model):
    def __init__(self, kernel_size, filters):
        super(ResnetIdentityBlock, self).__init__(name='')
        filters1, filters2, filters3 = filters

        self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
        self.bn2a = tf.keras.layers.BatchNormalization()

        self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
        self.bn2b = tf.keras.layers.BatchNormalization()

        self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
        self.bn2c = tf.keras.layers.BatchNormalization()

    def call(self, input_tensor, training=False):
        print('call call')
        x=self.conv2a(input_tensor)
        x=self.bn2a(x,training=training)
        x = tf.nn.relu(x)

        x=self.conv2b(x)
        x=self.bn2b(x,training=training)
        x=tf.nn.relu(x)

        x=self.conv2c(x)
        x=self.bn2c(x,training=training)

        x+=input_tensor
        return tf.nn.relu(x)


block = ResnetIdentityBlock(1,[1,2,3])
# print(block(tf.zeros([1,2,3,3])))
block(tf.zeros([1,2,3,3]))
# print([x.name for x in block.variables])

my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
                               tf.keras.layers.BatchNormalization(),
                               tf.keras.layers.Conv2D(2, 1,
                                                      padding='same'),
                               tf.keras.layers.BatchNormalization(),
                               tf.keras.layers.Conv2D(3, (1, 1)),
                               tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))

########################################
import os
import matplotlib.pyplot as plt
train_dataset_url = "http://download.tensorflow_.org/data/iris_training.csv"

train_dataset_fp = tf.keras.utils.get_file(fname=r"F:\Archive\files\iris_training.csv",
                                           origin=train_dataset_url)

print("Local copy of the dataset file: {}".format(train_dataset_fp))

# column order in CSV file
column_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']

feature_names = column_names[:-1]
label_name = column_names[-1]
print("Features: {}".format(feature_names))
print("Label: {}".format(label_name))
# 0：山鸢尾
# 1：变色鸢尾
# 2：维吉尼亚鸢尾
class_names = ['Iris setosa', 'Iris versicolor', 'Iris virginica']
batch_size = 32

train_dataset = tf.contrib.data.make_csv_dataset(
    train_dataset_fp,
    batch_size,
    column_names=column_names,
    label_name=label_name,
    num_epochs=1)

features, labels = next(iter(train_dataset))


# 请创建一个函数以将特征字典重新打包为形状为 (batch_size, num_features) 的单个数组。
# 此函数使用 tf.stack 方法，该方法从张量列表中获取值，并创建指定维度的组合张量
def pack_features_vector(features, labels):
    """Pack the features into a single array."""
    features = tf.stack(list(features.values()), axis=1)
    return features, labels
train_dataset = train_dataset.map(pack_features_vector)
features, labels = next(iter(train_dataset))
print(features)
model = tf.keras.Sequential([
  tf.keras.layers.Dense(10, activation=tf.nn.relu, input_shape=(4,)),  # input shape required
  tf.keras.layers.Dense(10, activation=tf.nn.relu),
  tf.keras.layers.Dense(3)
])

predictions = model(features)
# print(tf.nn.softmax(predictions[:5]))
def loss(model, x, y):#返回样本的平均损失
    y_ = model(x)
    return tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
l = loss(model, features, labels)
print("Loss test: {}".format(l))
def grad(model,inputs,targets):
    with tf.GradientTape() as tape:
        loss_value = loss(model,inputs,targets)
    return loss_value,tape.gradient(loss_value,model.trainable_variables)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)

global_step = tf.train.get_or_create_global_step()
loss_value, grads = grad(model, features, labels)
print("Step: {}, Initial Loss: {}".format(global_step.numpy(),
                                          loss_value.numpy()))

optimizer.apply_gradients(zip(grads, model.variables), global_step)

print("Step: {},         Loss: {}".format(global_step.numpy(),
                                          loss(model, features, labels).numpy()))
## Note: Rerunning this cell uses the same model variables

# keep results for plotting
train_loss_results = []
train_accuracy_results = []

num_epochs = 201

for epoch in range(num_epochs):
    epoch_loss_avg = tfe.metrics.Mean()
    epoch_accuracy = tfe.metrics.Accuracy()
    # Training loop - using batches of 32
    for x, y in train_dataset:
        # Optimize the model
        loss_value, grads = grad(model, x, y)
        optimizer.apply_gradients(zip(grads, model.variables),
                                  global_step)
        # Track progress
        epoch_loss_avg(loss_value)  # add current batch loss
        # compare predicted label to actual label
        epoch_accuracy(tf.argmax(model(x), axis=1, output_type=tf.int32), y)
        # end epoch
    train_loss_results.append(epoch_loss_avg.result())
    train_accuracy_results.append(epoch_accuracy.result())

    if epoch % 50 == 0:
        print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch,
                                                                    epoch_loss_avg.result(),
                                                                    epoch_accuracy.result()))
fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))
fig.suptitle('Training Metrics')

axes[0].set_ylabel("Loss", fontsize=14)
axes[0].plot(train_loss_results)

axes[1].set_ylabel("Accuracy", fontsize=14)
axes[1].set_xlabel("Epoch", fontsize=14)
axes[1].plot(train_accuracy_results)
plt.show()