import tensorflow as tf
import numpy as np 
import matplotlib.pyplot as plt 

#将特征值与标签绑定
features = tf.constant([12,23,10,17])
labels = tf.constant([0,1,1,0])
dataset = tf.data.Dataset.from_tensor_slices((features,labels))
print(dataset)
for element in dataset:
    print(element)


#求导
with tf.GradientTape() as tape:
    w = tf.Variable(tf.constant(3.0))
    loss = tf.pow(w, 2)
grad = tape.gradient(loss, w)
print(grad)
print('______________________')

#枚举
seq=['one','two','three']
for i,element in enumerate(seq):
    print(i,element)

#在分类问题中，常用独热码做标标价类别 1 是 0 非
print('______________________')
classes=3
labels = tf.constant([1,0,2])
output=tf.one_hot(labels,depth=classes)
print(output)
print('______________________')
#输出符合概率分布
input_vector = tf.constant([1.0, 2.0, 3.0])
softmax_output = tf.nn.softmax(input_vector)
print(softmax_output.numpy())
#参数自更新
var = tf.Variable(5.0)
# 定义要减去的值
sub_value = 2.0
# 使用assign_sub操作更新变量的值
var.assign_sub(sub_value)
# 打印更新后的值
print(var.numpy())  # 输出为3.0

#返回最大值索引  tf.argmax
x = tf.constant([[1, 3, 2],
                [4, 2, 1]])

# 在指定维度上找到最大值的索引
max_indices = tf.argmax(x, axis=1)

# 打印最大值的索引
print(max_indices.numpy())  # 输出为[1 0]