'''
import pandas as pd
import numpy as np
import tensorflow as tf


def generate_data():
    num = 20
    label = np.asarray(range(0, num))
    images = np.random.random([num, 4])
    print((type(images)))
    print('label size :{}, image size {}'.format(label.shape, images.shape))
    return images,label

def get_batch_data():
    images, label, = generate_data()
    input_queue = tf.train.slice_input_producer([images, label], shuffle=False,num_epochs=1)
    image_batch, label_batch = tf.train.batch(input_queue, batch_size=2, num_threads=1, capacity=64,allow_smaller_final_batch=False)
    return image_batch,label_batch


images,label = get_batch_data()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())#就是这一行
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess,coord)
try:
    while not coord.should_stop():
        i,l = sess.run([images,label])
        print(i)
        # print((type(i)))
        print(l)
except tf.errors.OutOfRangeError:
    print('Done training')
finally:
    coord.request_stop()
coord.join(threads)
sess.close()
'''

''' import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np

path = '/media/wuqi/ubuntu/dataset/kitti/data_odometry_color/dataset/sequences/00/image_2/000000.png'  # 绝对路径
file_queue = tf.train.string_input_producer([path])  # 创建输入队列
image_reader = tf.WholeFileReader()  # reader
_, image = image_reader.read(file_queue)  # reader读取序列
image = tf.image.decode_jpeg(image)  # 解码，tensor
src_image_1 = tf.slice(image, [0, 0, 0], [200, -1 , -1])
with tf.Session() as sess:
    coord = tf.train.Coordinator()  # 协同启动的线程
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)  # 启动线程运行队列
    image1 = sess.run(image)
    image2 = sess.run(src_image_1)
    # sess.run(file_queue)
    print(type(image))  # tensor
    coord.request_stop()  # 停止所有的线程
    coord.join(threads)
    print(type(image.eval()))  # ndarray
    print(src_image_1.eval().shape)  # 240×320×3
    print(image.eval().dtype)  # uint8
    plt.figure(1)
    plt.imshow(image.eval())
    plt.show()
 '''
# import numpy as np
# t1 = [[[1, 2, 3], [4, 5, 6]]]
# t2 = np.array(t1)
# t3 = t2[0,:,:]
# print(t2.shape)
# print(t3.shape)
#
# t4 = [[1, 2, 3], [4, 5, 6]]

'''
import tensorflow as tf

images = ['img1', 'img2', 'img3', 'img4', 'img5']
labels= [1,2,3,4,5]

epoch_num=8

f = tf.train.slice_input_producer([images, labels],num_epochs=None,shuffle=False)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    for i in range(epoch_num):
        k = sess.run(f)
        print('-----------------')
        print(i,k)

    coord.request_stop()
    coord.join(threads)
'''

'''
# print(2**3)
# for s in range(4):
#     print(s)

# tmp = np.array([0, 1])
# print(tmp.shape)
# ref_exp_mask = np.tile(tmp,
#                        (4,
#                         8,
#                         2))
# print(ref_exp_mask.shape)
# print(ref_exp_mask)
'''

# #our NN's output
# logits=tf.constant([[1.0,2.0,3.0],[1.0,2.0,3.0],[1.0,2.0,3.0]])
# #step1:do softmax
# y=tf.nn.softmax(logits)
# #true label
# y_=tf.constant([[0.0,0.0,1.0],[0.0,0.0,1.0],[0.0,0.0,1.0]])
# #step2:do cross_entropy
# cross_entropy = -tf.reduce_sum(y_*tf.log(y))
# #do cross_entropy just one step
# cross_entropy2=tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels= y_))#dont forget tf.reduce_sum()!!
#
# with tf.Session() as sess:
#     softmax=sess.run(y)
#     c_e = sess.run(cross_entropy)
#     c_e2 = sess.run(cross_entropy2)
#     c_e2 = sess.run(y_*tf.log(y))
#
#     print("step1:softmax result=")
#     print(softmax)
#     print("step2:cross_entropy result=")
#     print(c_e2)
#     print(c_e)
#     print("Function(softmax_cross_entropy_with_logits) result=")


# import pycuda.driver as drv
# import pycuda.tools
# import pycuda.autoinit
# import numpy
# import numpy.linalg as la
# from pycuda.compiler import SourceModule
# mod = SourceModule(
#     """
#     __global__ void multiply_them(float * dest, float * a, float * b)
#     {
#         const int m = threadIdx.x + blockIdx.x * blockDim.x;
#         const int n = threadIdx.y + blockIdx.y * blockDim.y;
#         const int i = 5 *n + m;
#         dest[i] = a[i] * b[i];
#         printf("i = %d des = %f\\n", i,dest[i]);
#     }
#      """)
# multiply_them = mod.get_function("multiply_them")
# # a = numpy.random.randn(50).astype(numpy.float32)
# # b = numpy.random.randn(50).astype(numpy.float32)
# a = numpy.random.randn(50).astype(numpy.float32)
# b = numpy.random.randn(50).astype(numpy.float32)
# dest = numpy.zeros_like(a)
# multiply_them(drv.Out(dest), drv.In(a), drv.In(b), block=(1, 10, 1), grid = (5,1 ,1))
# print(dest)

#
# from timeit import default_timer as timer
#
# import numpy as np
# import pycuda.driver as drv
# from pycuda.compiler import SourceModule
#
# mod = SourceModule("""
# __global__ void func(float *a, float *b, size_t N)
# {
#   const int i = blockIdx.x * blockDim.x + threadIdx.x;
#   if (i >= N)
#   {
#     return;
#   }
#   float temp_a = a[i];
#   float temp_b = b[i];
#   a[i] = (temp_a * 10 + 2 ) * ((temp_b + 2) * 10 - 5 ) * 5;
#   // a[i] = a[i] + b[i];
# }
# """)
#     #  int xindex = threadIdx.x + blockIdx.x * blockDim.x;
#     #  int xindex =  = atomicSub(&imgth, 1) -1;
#     #  int index = xindex + imgwidth * yindex;
# # mod_image = SourceModule("""
# # __global void imagge_sample(float *a, float *b,int ingheight, int imgwidth, int x)
# # {
# #      int yindex = threadIdx.y + blockIdx.y * blockDim.y;
#
# #      if (yIndex > 0 && yIndex < imgHeight)
# #      {
# #          for(int xindex = imgwidth -1; xindex >=0; xindex --)
# #          {
# #              int index = xindex + imgwidth * yindex;
# #          }
# #      }
# # }
# # """)
#
# func = mod.get_function("func")
#
#
# def test(N):
#     # N = 1024 * 1024 * 90   # float: 4M = 1024 * 1024
#
#     print("N = %d" % N)
#
#     N = np.int32(N)
#
#     a = np.random.randn(N).astype(np.float32)
#     b = np.random.randn(N).astype(np.float32)
#     # copy a to aa
#     aa = np.empty_like(a)
#     aa[:] = a
#     print("run")
#     # GPU run
#     nTheads = 256
#     nBlocks = int((N + nTheads - 1) / nTheads)
#     start = timer()
#     func(
#         drv.InOut(a), drv.In(b), N,
#         block=(nTheads, 1, 1), grid=(nBlocks, 1))
#     run_time = timer() - start
#     print("gpu run time %f seconds " % run_time)
#     # cpu run
#     start = timer()
#     aa = (aa * 10 + 2) * ((b + 2) * 10 - 5) * 5
#     run_time = timer() - start
#
#     print("cpu run time %f seconds " % run_time)
#
#     # check result
#     r = a - aa
#     print(min(r), max(r))
#
#
# def read_calib_file(path):
#     # taken from https://github.com/hunse/kitti
#     float_chars = set("0123456789.e+- ")
#     data = {}
#     with open(path, 'r') as f:
#         for line in f.readlines():
#             key, value = line.split(':', 1)
#             value = value.strip()
#             data[key] = value
#             if float_chars.issuperset(value):
#                 # try to cast to float array
#                 try:
#                     # data[key] = np.array(map(float, value.split(' ')))
#                     data[key] = np.array([float(v) for v in value.split(' ')])
#                 except ValueError:
#                     # casting error: data[key] already eq. value, so pass
#                     pass
#
#     return data
#
# def main():
#     # velo2cam = read_calib_file('/media/wuqi/ubuntu/dataset/2011_09_26/calib_velo_to_cam.txt')
#     # print('end')
#     for n in range(1, 4, 4):
#         N = 128 * 128 * (n * 1)
#         print("------------%d---------------" % n)
#         test(N)
#
#
# if __name__ == '__main__':
#     main()


# import tensorflow as tf
# import numpy as np
#
# print(tf.__version__)
#
# image_value = tf.read_file('/media/wuqi/ubuntu/dataset/kitti/resulting_odometry_data/train.txt' )

import tensorflow as tf
import numpy as np
import os
import random

def format_file_list( data_root, split):
    with open(data_root + '/%s.txt' % split, 'r') as f:
        frames = f.readlines()
    subfolders = [x.split(' ')[0] for x in frames]
    frame_ids = [x.split(' ')[1][:-1] for x in frames]
    image_file_list = [os.path.join(data_root, subfolders[i],
                                    frame_ids[i] + '.jpg') for i in range(len(frames))]
    cam_file_list = [os.path.join(data_root, subfolders[i],
                                  frame_ids[i] + '_cam.txt') for i in range(len(frames))]
    rel_pose__file_list = [os.path.join(data_root, subfolders[i],
                                        frame_ids[i] + '_gt_rel_pose.txt') for i in range(len(frames))]
    all_list = {}
    all_list['image_file_list'] = image_file_list
    all_list['cam_file_list'] = cam_file_list
    all_list['rel_pose__file_list'] = rel_pose__file_list
    return all_list

def main():

    # file_list = format_file_list('/media/wuqi/ubuntu/dataset/kitti/resulting_odometry_data', 'train')
    # seed = random.randint(0, 2 ** 31 - 1)
    # image_paths_queue = tf.train.string_input_producer(
    #     file_list['image_file_list'],
    #     seed=seed,
    #     shuffle=True)
    # cam_paths_queue = tf.train.string_input_producer(
    #     file_list['cam_file_list'],
    #     seed=seed,
    #     shuffle=True)
    # rel_pose_path_quene = tf.train.string_input_producer(
    #     file_list['rel_pose__file_list'],
    #     seed=seed,
    #     shuffle=True)
    #
    # # Load images
    # img_reader = tf.WholeFileReader()
    # _, image_contents = img_reader.read(image_paths_queue)  ###一次性读图？
    # image_seq = tf.image.decode_jpeg(image_contents)
    #
    # # Load camera intrinsics
    # cam_reader = tf.TextLineReader()
    # _, raw_cam_contents = cam_reader.read(cam_paths_queue)
    # rec_def = []
    # for i in range(9):
    #     rec_def.append([1.])
    # raw_cam_vec = tf.decode_csv(raw_cam_contents,
    #                             record_defaults=rec_def)
    # raw_cam_vec = tf.stack(raw_cam_vec)
    # intrinsics = tf.reshape(raw_cam_vec, [3, 3])
    #
    # #load pose
    # pose_reader = tf.WholeFileReader()
    # _, pose_contents = pose_reader.read(rel_pose_path_quene)
    # rec_def2 = []
    # for i in range(24):
    #     rec_def2.append([1.])
    # raw_pose_vec = tf.decode_csv(pose_contents, record_defaults=rec_def2)
    # raw_pose_vec = tf.stack(raw_pose_vec)
    # rel_pose = tf.reshape(raw_pose_vec, [4, 6])
    #
    #
    # img_batch = tf.train.batch([image_seq, intrinsics], batch_size=20, capacity=100)
    #


    # filename_queue = tf.train.string_input_producer(['/media/wuqi/ubuntu/dataset/kitti/resulting_odometry_data/train.txt'], num_epochs=1)
    # reader = tf.TextLineReader()
    # key, value = reader.read(filename_queue)
    # split_line = tf.string_split([value]).values
    # pose_path = tf.string_join(['/media/wuqi/ubuntu/dataset/kitti/resulting_odometry_data/', split_line[1]])
    # pose_path = tf.convert_to_tensor(pose_path)
    #
    # reader_2 = tf.TextLineReader(pose_path)
    # _,pose_content = reader_2.read((pose_path))
    #
    # image_path = tf.string_join(['/media/wuqi/ubuntu/dataset/kitti/resulting_odometry_data/', split_line[0]])

    # x = tf.constant([1,])
    # y = tf.constant([2,])
    # z = tf.constant([3,])
    # a = tf.stack([x, y, z])  # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
    # b = tf.stack([x, y, z], axis=1)  # [[1, 2, 3], [4, 5, 6]]

    # with tf.Session() as sess:
    #     sess.run(tf.initialize_local_variables())
    #     a = sess.run(a)
    #     print(a)
    #     tf.train.start_queue_runners()
    #     num_examples = 0
    #     try:
    #         while True:
    #             # s_key, s_value = sess.run([key, value])
    #             s_pose_path = sess.run(img_batch)
    #             print(s_pose_path)
    #             num_examples += 1
    #     except tf.errors.OutOfRangeError:
    #         print ("There are", num_examples, "examples")

    string1 = 'a,b c\nd e'
    set1 =set(' \n')
    print(string1.split(','))
    print(set1)

if __name__ == "__main__":
    main()


# # coding=utf-8
# import csv
# #要保存后csv格式的文件名
# file_name_string="file.csv"
# with open(file_name_string, 'wb') as csvfile:
#     #编码风格，默认为excel方式，也就是逗号(,)分隔
#     spamwriter = csv.writer(csvfile, dialect='excel')
#     # 读取txt文件，每行按逗号分割提取数据
#     with open('ex1data1.txt', 'rb') as file_txt:
#         for line in file_txt:
#             line_datas= line.strip('\n').split(',')
#             spamwriter.writerow(line_datas)


