import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import tensorflow as tf
import pandas as pd


plt.rcParams['font.sans-serif'] = ['SimHei'] #运行配置参数中的字体（font）为黑体（SimHei）
plt.rcParams['axes.unicode_minus'] = False #运行配置参数总的轴（axes）正常显示正负号（minus）
TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1],TRAIN_URL)
TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1],TEST_URL)
#四个属性：花萼长度、宽度，花瓣长度、宽度
# [5. 3. 1.6 0.2 0. ]
#一个标签：0山鸢尾 1变色鸢尾 2维吉尼亚鸢尾
column_names = ['SepalLength','SepalWidth','PetalLength','PetalWidth','Species']
df_iris_train = pd.read_csv(train_path,header=0)
df_iris_test = pd.read_csv(test_path,header=0)
iris_train = np.array(df_iris_train)
iris_test = np.array(df_iris_test)

def data(x,y):
    # iris_train[:,2:4]取下标值为23的两列
    x_train = iris_train[:, 2:4]  # 花瓣长度、宽度
    y_train = iris_train[:, 4]  # 标签
    x_test = iris_test[:, 2:4]
    y_test = iris_test[:, 4]
    num_train = len(x_train)
    num_test = len(x_test)
    return x_train,x_test,y_train,y_test,num_train,num_test

x_train,x_test,y_train,y_test,num_train,num_test=data(iris_train,iris_test)

def dataDeal(x,y,num):
    # 生成多元模型属性矩阵（合并）
    # tf.cast将数据格式转化成dtype数据类型,tf.concat用来拼接张量的函数
    x0_train = np.ones(num).reshape(-1, 1)  # 创建全1数组并转置
    X = tf.cast(tf.concat((x0_train, x), axis=1), dtype=tf.float32)
    # 相当于将多个数值联合放在一起作为多个相同类型的向量，可用于表示各自的概率分布
    Y = tf.one_hot(tf.constant(y, dtype=tf.int32), 3)
    # 由于one-hot类型数据长度为depth位,其中只用一位数字表示原输入数据，
    # indices = 0 对应的输出是[1, 0 … 0, 0], indices = 1 对应的输出是[0, 1 … 0, 0],
    # 依次类推，最大可能值的输出是[0, 0 … 0, 1]。
    return X,Y

X_train,Y_train=dataDeal(x_train,y_train,num_train)
X_test,Y_test=dataDeal(x_test,y_test,num_test)

# 第三步：设置超参数
learn_rate = 0.2#学习率
iter = 700#迭代次数
display_step = 100 #显示间隔
# 第四步：设置模型参数初始值
np.random.seed(612)
w = tf.Variable(np.random.randn(3,3),dtype=tf.float32)#w取正态分布随机值作为初始值
#第五步：训练模型
ce_train = []#记录训练集上的损失
ce_test = []#记录测试集上的损失
acc_train = []#准确率
acc_test = []

for i in range(0,iter+1):
    #自动求导
    with tf.GradientTape() as tape:#梯度带对象的with语句，实现对w的自动监视
        #tf.matmul（）将矩阵a乘以矩阵b，生成a * b
        #tf.multiply（）两个矩阵中对应元素各自相乘
        pred_train = tf.nn.softmax(tf.matmul(X_train,w))
        loss_train = -tf.reduce_sum(Y_train * tf.math.log(pred_train))/num_train
        pred_test = tf.nn.softmax(tf.matmul(X_test,w))
        loss_test = -tf.reduce_sum(Y_test * tf.math.log(pred_test))/num_test
    #预测值与实际值对比确定准确率
    #tf.cast将数据格式转化成dtype数据类型
    #argmax预测值中的最大索引
    accuracy_train = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred_train.numpy(),axis=1),y_train),tf.float32))#用于在给定轴上找到张量中最大值的索引
    accuracy_test = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred_test.numpy(),axis=1),y_test),tf.float32))
    ce_train.append(loss_train)# 把训练集得到的均方误差加入列表 ce_train
    ce_test.append(loss_test) # 把训练集得到的均方误差加入列表 ce_test
    acc_train.append(accuracy_train)
    acc_test.append(accuracy_test)
    dl_dw = tape.gradient(loss_train,w)#求偏导

    # 然后使用迭代公式更新 w
    w.assign_sub(learn_rate*dl_dw)
    #训练误差和测试误差都是一直单调递减的，在测试集上，损失的下降更快。
    if i % display_step == 0:
        print("i:",i," ,trainacc:",accuracy_train.numpy()," ,trainloss:",loss_train.numpy()," ,testacc:",accuracy_test.numpy(), " ,testloss:",loss_test.numpy())

cm_bg = mpl.colors.ListedColormap(['#A0FFA0','#FFA0A0','#A0A0FF'])
def draw_classification(x,y):
    # 绘制分类图
    M = 500
    x1_min, x2_min = x.min(axis=0)  # 取花萼长度
    x1_max, x2_max = x.max(axis=0)  # 取花萼宽度
    t1 = np.linspace(x1_min, x1_max, M)
    t2 = np.linspace(x2_min, x2_max, M)
    m1, m2 = np.meshgrid(t1, t2)

    m0 = np.ones(M * M)
    x_ = tf.cast(np.stack((m0, m1.reshape(-1), m2.reshape(-1)), axis=1), tf.float32)
    y_ = tf.nn.softmax(tf.matmul(x_, w))
    y_ = tf.argmax(y_.numpy(), axis=1)
    n_train = tf.reshape(y_, m1.shape)
    plt.pcolormesh(m1, m2, n_train, cmap=cm_bg)
    plt.scatter(x[:, 0], x[:, 1], marker='.', c=y, cmap="brg")
    plt.show()

draw_classification(x_train,y_train)
draw_classification(x_test,y_test)