import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import tensorflow as tf
import pandas as pd


plt.rcParams['font.sans-serif'] = ['SimHei'] #运行配置参数中的字体（font）为黑体（SimHei）
plt.rcParams['axes.unicode_minus'] = False #运行配置参数总的轴（axes）正常显示正负号（minus）

TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1],TRAIN_URL)

TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1],TEST_URL)
column_names = ['SepalLength','SepalWidth','PetalLength','PetalWidth','Species']

df_iris_train = pd.read_csv(train_path,header=0)
df_iris_test = pd.read_csv(test_path,header=0)

iris_train = np.array(df_iris_train)
iris_test = np.array(df_iris_test)
print(iris_train.shape,iris_test.shape)
#iris_train[:,2:4]取下标值为23的两列
x_train = iris_train[:,0:4]#花瓣长度、宽度
y_train = iris_train[:,4]#标签

x_test = iris_test[:,0:4]
y_test = iris_test[:,4]

num_train = len(x_train)
num_test = len(x_test)
#数据处理中心化
x_train = x_train - np.mean(x_train,axis=0)#求列属性平均值，对源数据进行中心化处理
x_test = x_test - np.mean(x_test,axis=0)

X_train = tf.cast(x_train,dtype=tf.float32)
#相当于将多个数值联合放在一起作为多个相同类型的向量，可用于表示各自的概率分布
Y_train = tf.one_hot(tf.constant(y_train,dtype=tf.int32),3)
X_test = tf.cast(x_test,dtype=tf.float32)
Y_test = tf.one_hot(tf.constant(y_test,dtype=tf.int32),3)

# 第三步：设置超参数
learn_rate = 0.5  #学习率
iter = 100     #迭代次数
display_step = 10 #显示间隔

# 第四步：设置模型参数初始值
np.random.seed(612)
w = tf.Variable(np.random.randn(4,3),dtype=tf.float32)#w取正态分布随机值作为初始值
b = tf.Variable(np.zeros([3]),dtype=tf.float32)

#第五步：训练模型
ce_train = []#记录训练集上的损失
ce_test = []#记录测试集上的损失
acc_train = []#准确率
acc_test = []


for i in range(0,iter+1):
  #自动求导
  with tf.GradientTape() as tape:#梯度带对象的with语句，实现对w的自动监视
    #tf.matmul（）将矩阵a乘以矩阵b，生成a * b
    #tf.multiply（）两个矩阵中对应元素各自相乘
    pred_train = tf.nn.softmax(tf.matmul(X_train,w)+b)
    loss_train = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_true=Y_train,y_pred=pred_train))
    pred_test = tf.nn.softmax(tf.matmul(X_test,w)+b)
    loss_test = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_true=Y_test,y_pred=pred_test))
  accuracy_train = tf.reduce_mean(tf.cast(tf.equal(
    tf.argmax(pred_train.numpy(),axis=1),y_train),tf.float32))#用于在给定轴上找到张量中最大值的索引
  accuracy_test = tf.reduce_mean(tf.cast(tf.equal(
    tf.argmax(pred_test.numpy(),axis=1),y_test),tf.float32))

  ce_train.append(loss_train)# 把训练集得到的均方误差加入列表 ce_train
  ce_test.append(loss_test) # 把训练集得到的均方误差加入列表 ce_test

  acc_train.append(accuracy_train)
  acc_test.append(accuracy_test)

  dl_dw,dl_db = tape.gradient(loss_train,[w,b])#求偏导
 
  # 然后使用迭代公式更新 w
  w.assign_sub(learn_rate*dl_dw)
  b.assign_sub(learn_rate*dl_db)
  #训练误差和测试误差都是一直单调递减的，在测试集上，损失的下降更快。

  if i % display_step == 0:
    print("i:",i," ,trainacc:",accuracy_train.numpy()," ,trainloss:",loss_train.numpy(),
       " ,testacc:",accuracy_test.numpy(), " ,testloss:",loss_test.numpy())

plt.figure(figsize=(10,4))

plt.subplot(121)
plt.plot(ce_train,color="blue",label="train")
plt.plot(ce_test,color="red",label="test")
plt.xlabel("迭代次数")
plt.ylabel("loss")
plt.legend()

plt.subplot(122)
plt.plot(acc_train,color="blue",label="train")
plt.plot(acc_test,color="red",label="test")
plt.xlabel("迭代次数")
plt.ylabel("accuracy")
plt.legend()

plt.suptitle("训练集和测试集的损失曲线和迭代率曲线",fontsize=14)

plt.show()