import tensorflow as tf
# 强制tensor转换为该数据类型
# tf.cast（张量名,dtype=数据类型)
# 计算张量维度上元素的最小值
# tf.reduce_min(张量名)
# 计算张量维度上元素的最大值
# tf.reduce_max(张量名

# 操作轴 axis=0为纵向（经度方向） axis=1为横向（维度方向）

# 对应元素的四则运算:tf.add，tf.subtract，tf.multiply，tf.divide 只有维度相同的张量才能进行四则运算
# 平方、次方与开方:tf.square,tf.pow，tf.sqrt
# 矩阵乘:tf.matmul

# # 切分传入张量的第一维度，生成输入特征标签对，构建数据集（numpy能也用）
# # data = tf.data.Dataset.from_tensor_slices((输入特征,标签)
# features = tf.constant([12,23,10,17])
# labels = tf.constant([0,1, 1,0])
# dataset = tf.data.Dataset.from_tensor_slices((features, labels))
# print(dataset)
# for element in dataset:
#     print(element)
# # <TensorSliceDataset shapes: ((), ()), types: (tf.int32, tf.int32)>
# # (<tf.Tensor: shape=(), dtype=int32, numpy=12>, <tf.Tensor: shape=(), dtype=int32, numpy=0>)
# # (<tf.Tensor: shape=(), dtype=int32, numpy=23>, <tf.Tensor: shape=(), dtype=int32, numpy=1>)
# # (<tf.Tensor: shape=(), dtype=int32, numpy=10>, <tf.Tensor: shape=(), dtype=int32, numpy=1>)
# # (<tf.Tensor: shape=(), dtype=int32, numpy=17>, <tf.Tensor: shape=(), dtype=int32, numpy=0>)

# with tf.GradientTape() as tape:
#     w=tf.Variable(tf.constant(3.0))
#     loss= tf.pow(w,2)
# grad = tape.gradient(loss,w)
# print(grad)
# # tf.Tensor(6.0,shape=0, dtype=float32)

# seq=['one', 'two', 'three']
# for i, element in enumerate(seq):
#     print(i, element)

# classes =3
# labels = tf.constant([1,0,2])#输入的元素值最小为0，最大为2
# output = tf.one_hot(labels, depth=classes)
# print(output)
# [[0.1.0]
#  [1.0.0]
#  [0.0.1]], shape=(3,3), dtype=float32)

# y = tf.constant ([1.01,2.01, -0.66])
# y_pro= tf.nn.softmax(y)
# print("After softmax, y_pro is:",y_pro)
# After softmax, y_pro is: tf.Tensor([0.25598174 0.69583046 0.0481878], shape=(3,), dtype=float32)

# w=f.Variable(4)
# w.assign_sub()
# print(w)
# <tf.Variable 'Variable:0' shape=() dtype=int32, numpy=3>

# import numpy as np
# test = np.array([[1,2,3],[2,3,4],[5, 4,3],[8,7,2]])
# print(test)
# print(tf.argmax(test, axis=0)) #返回每一列（经度)最大值的索引
# print(tf.argmax(test, axis=1)) #返回每一行（纬度）最大值的索引


