import sys
import os

'''
0：显示所有日志（默认）。
1：屏蔽 INFO 日志。
2：屏蔽 INFO 和 WARNING 日志。
3：屏蔽所有日志（包括 ERROR）。
'''
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

import tensorflow as tf
import numpy as np

import sklearn as sl
import pandas as pd

### 显示工具链是否可正常使用
variables = {
    "TensorFlow Version": tf.__version__,
    "GPU available": tf.config.list_physical_devices('GPU'),
    "Python Version": sys.version,
    "Numpy Version": np.__version__,
    "Sklearn Version": sl.__version__,
    "pandas Version": pd.__version__,
}

max_width = max([len(element) for element in variables]) + 1
for key, value in variables.items():
    print(f"{key:<{max_width}}: {value}")

### 以下为测试

## 1.监测tensor是否能正常使用
# a = tf.constant([1.0, 2.0], name="a")
# b = tf.constant([1.0, 2.0], name="b")
# result = tf.add(a, b, name="add")
# print(result)

## 2.降维操作，更新数据类型，找出最大/最小张量 tf.case  tf.reduce_min  tf.reduce_max
# x1 = tf.constant([1.,2.,3.],
#                  dtype=tf.float64)
# print(x1)
#
# x2 = tf.cast(x1, tf.int32)
# print(x2)
#
# print(tf.reduce_min(x1),
#       tf.reduce_max(x2))

## 3.降维操作，使用维度方向
# x1 = tf.constant([[1,2,3], [2,2,3]])
# x = tf.cast(x1, tf.float64)
#
# print(x)
# print(tf.reduce_sum(x))
# print(tf.reduce_sum(x, axis=0))
# print(tf.reduce_sum(x, axis=1))
# print(tf.reduce_mean(x)) #全局平均值
# print(tf.reduce_mean(x, axis=0)) #纵轴平均值(列)
# print(tf.reduce_mean(x, axis=1)) #横轴平均值(行)

## 4.标记为待训练参数 tf.Variable
# r = tf.random.normal([2,2], mean=0, stddev=1) #生成服从正态分布的随机数的函数(正态分布的均值为0，标准差为1)
# w = tf.Variable(r) #初始化权重参数w
# print(r)
# print(w)

## 5.数学运算(复习线代)
'''
1、四则运算: tf.add  tf.subtract  tf.multiply  tf.divide
2、平方、次方、开方: tf.square  tf.pow  tf.sqrt
3、矩阵乘: tf.matmul
'''

## 6.创建 tf.data.Dataset 对象，特征和标签配对
## dataset = tf.data.Dataset.from_tensor_slices((输入特征, 标签)) #Numpy\Tensor格式都可用该语句读入数据
# features = tf.constant([12,23,10,17])
# labels = tf.constant([0,1,1,0])
# dataset = tf.data.Dataset.from_tensor_slices((features, labels))
# print(dataset)
# for element in dataset:
#     print(element)

## 7.自动计算梯度 tf.GradientTape(tape.gradient、tape.watch)
# x = tf.Variable(3.0)
# y = tf.Variable(2.0)
#
# with tf.GradientTape() as tape:
#     z = tf.pow(x, 2) + tf.pow(y, 2)
# dz_dx, dz_dy = tape.gradient(z, [x, y])
# print(dz_dx)
# print(dz_dy)
# print(dz_dx.numpy())
# print(dz_dy.numpy())

## 8.遍历 enumerate
# seq = ['one', 'two', 'three']
# for i, element in enumerate(seq):
#     print(i, element)
## 9.独热编码 tf.one_hot
# classes = 3
# labels = tf.constant([1,0,2])
# output = tf.one_hot(labels, depth=classes)
# print(output)

## 10.归一化,将得分变成概率 tf.nn.softmax
# y = tf.constant([1.01, 2.01, -0.66])
# y_pro = tf.nn.softmax(y)
# print(y_pro)

## 11.更新参数的值 assign_sub\assign_add
# w = tf.Variable(4)
# print(w)
# w.assign_sub(1)
# print(w)

## 11.返回张量沿指定维度最大值的索引 tf.argmax
# test = np.array([[1,12,3],[4,5,16],[7,8,9],[10,11,12]])
# print(tf.argmax(test, axis = 0)) #[3, 0, 1]
# print(tf.argmax(test, axis = 1)) #[1, 2, 2, 2]