import tensorflow as tf
import csv
import numpy as np


class IrisClassfiy:
    '''
        初始化，处理数据集，定义placeholder
    '''
    def __init__(self,
                 filepath="Iris.csv",  # 文件路径
                 encoding="utf-8",  # 读取使用字符集
                 startRow=1,  # 数据从第几列开始
                 startCol=1,  # 数据从第几行开始
                 labelRow=-1):  # 标签所在的列
        with open(filepath, "r", encoding=encoding) as datacsv:
            read = csv.reader(datacsv)  # 读取csv文件数据
            rows = [row[startCol:] for row in read]  # 从开始的列读取数据
            del rows[startRow - 1]  # 删除不需要的列
            self.datas = np.zeros([len(rows), len(rows[0])])  # 创建数据集 用于保存csv内的数据
            self.key_dict = {}  # 创建 label的字典
            key = set(i[labelRow] for i in rows)  # label去重
            self.output_size = key.__len__()
            for i, k in enumerate(key):
                self.key_dict[k] = i  # 给每个label赋予对应的int值
            for i, d in enumerate(rows):
                d[labelRow] = self.key_dict[d[labelRow]]  # 用int替换label
                self.datas[i] = d  # 将数据放入数据集
            self.input = tf.placeholder(tf.float32, [None, len(self.datas[0]) - 1])  # 定义输入层的输入placeholder
            self.taget = tf.placeholder(tf.float32, [None, self.output_size])  # 定义label的输入placeholder

    def train(self):
        with tf.variable_scope('layer'):
            layer = tf.layers.dense(self.input, self.output_size,
                                    activation=tf.nn.softmax)  # 定义一个普通的神经网络层，使用softmax激励函数，为了更能提现每个label预测几率
        with tf.variable_scope('loss'):
            cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.taget * tf.log(layer), reduction_indices=[1]))  # 计算cost
            # cross_entropy = tf.losses.softmax_cross_entropy(self.taget, tf.log(layer))  # 这种方法也是可行的，但是上面的计算cost方法，相当于是这个方法的实现，更加直观
        with tf.variable_scope('train'):
            train = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)  # 梯度下降 反向传递
        return layer, cross_entropy, train

    '''
        将数据集随机打乱，以及按比例分割train，test数据
    '''
    def cross_date(self, test_rate=0.1):
        test_num = int(self.datas.__len__() * test_rate)
        np.random.shuffle(self.datas)
        sample_index = np.random.choice(self.datas.__len__(), test_num, replace=False)
        test_data = self.datas[sample_index, :-1]
        test_label = np.zeros([test_num, self.output_size])
        for i, d in enumerate(self.datas[sample_index, -1]):
            test_label[i, d.astype(np.int32)] = 1
        train_data = np.zeros([self.datas.__len__() - test_num, self.datas[0].__len__() - 1])
        train_label = np.zeros([self.datas.__len__() - test_num, self.output_size])
        index = 0
        for i, d in enumerate(self.datas):
            if i not in sample_index:
                train_data[index] = d[:-1]
                train_label[index, d[-1].astype(np.int8)] = 1
                index += 1
        return test_data, test_label, train_data, train_label

    '''
        训练以及展示预测数据的情况，最后总结整体正确率
    '''
    def main(self, test_times=10, every_train_times=1000):
        layer, loss, train = self.train()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            test_total_times = 0
            test_right_times = 0
            for runtimes in range(test_times):
                test_data, test_label, train_data, train_label = self.cross_date()
                for i in range(every_train_times):
                    losses, _ = sess.run([loss, train], feed_dict={self.input: train_data, self.taget: train_label})
                for i in range(test_data.__len__()):
                    flat = sess.run(layer, feed_dict={self.input: test_data[i][np.newaxis, :]})
                    index = np.argmax(flat)
                    rate = ' ['
                    for j in range(len(flat[0])):
                        rate += '%.2f%% ' % (flat[0][j] * 100)
                    rate += '] \t'
                    t_f = test_label[i, index] == 1
                    test_total_times += 1
                    if t_f:
                        test_right_times += 1
                    print('times:%d \t| rate:%s| flat:%s \t| label: %s \t| T&F:%s' % (
                        runtimes + 1, rate, index, test_label[i], t_f))
            print('test total times: %d \t test wrong times: %d \t right rate: %.2f%%' % (
                test_total_times, test_total_times - test_right_times, test_right_times / test_total_times * 100))


if __name__ == '__main__':
    IrisClassfiy().main(test_times=10, every_train_times=1000)
