"""
1.把csv文件名生成dataset数据集  tf.data.Dataset.list_files(filenames)
2.到文件里取数据 然后合成大的数据集 tf.data.TextLineDataset(filename)   .interleave()
3.把取到的数据集(一行csv数据) 解析成我们要的数据x和标签y

"""
import tensorflow as tf 
import numpy as np
from tensorflow import keras

# 文件名
train_filenames = ['E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_00.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_01.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_02.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_03.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_04.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_05.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_06.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_07.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_08.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_09.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_10.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_11.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_12.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_13.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_14.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_15.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_16.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_17.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_18.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\train_19.csv']
valid_filenames = ['E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_00.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_01.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_02.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_03.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_04.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_05.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_06.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_07.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_08.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\valid_09.csv']
test_filenames = ['E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_00.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_01.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_02.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_03.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_04.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_05.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_06.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_07.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_08.csv', 'E:\\Microsoft VS Code\\code\\tensorflow_learning\\tf_data\\generate\\test_09.csv']

# 解析数据集 tf.io.decode_csv()
def parse_csv_line(line,n_fields=9):  # n_fielsd表示一行csv数据有多少个需要解析的数据 房价数据8个features+1个label
    parse_list  = [tf.constant(1.)] * n_fields  # 解码方式以列表的形式传入 每个数据按照对应位置的type解析
    parsed_data = tf.io.decode_csv(line,record_defaults=parse_list)
    x = tf.stack(parsed_data[:8])  # 把解析出来的一个一个的tensor张量(一维)合成一个tensor数组(二维)
    y = tf.stack(parsed_data[-1])

    return x,y

# 加载csv文件
def load_csv_to_datasets(filenames,cycle_length=5,batch_size=32,num_parallel_calls=5,shuffle=False):
    dataset = tf.data.Dataset.list_files(filenames)  # 得到csv文件名组成数据集
    datasets = dataset.interleave(
        lambda v:tf.data.TextLineDataset(v).skip(1),  # 提取各个文件的内容组成大数据集 跳过第一行的header
        cycle_length = cycle_length
    )
    datasets = datasets.map(parse_csv_line,num_parallel_calls=num_parallel_calls)  # 并行化程度=5 解析读取的内容
    datasets = datasets.batch(batch_size)
    if shuffle:
        datasets = datasets.shuffle(10000)
    datasets = datasets.repeat()  # 不断产生数据 不然无法训练多个epoch

    return datasets

# 利用自己写的加载方式加载数据集 并且在keras模型中使用
train_dataset = load_csv_to_datasets(train_filenames,shuffle=True)
valid_dataset = load_csv_to_datasets(valid_filenames,shuffle=True)
test_dataset = load_csv_to_datasets(test_filenames,shuffle=True)

model = keras.Sequential([
    keras.layers.Dense(10,activation='selu',input_shape=(8,)),
    keras.layers.Dense(1)
])

model.compile(loss='mse',optimizer='sgd')
# 从csv读取的数据集不知道每一步的步数 所以要自己计算 否则进度条显示unknow 不推荐使用 可能触发未知错误
# 因此要先统计好样本的数量 
model.fit(
    train_dataset,validation_data=valid_dataset,epochs=10,
    steps_per_epoch=11160 // 32, validation_steps= 3870 // 32) 
model.evaluate(test_dataset, steps = 5160 // 32)

