'''import cv2.cv as cv
import numpy as np
import Image

MAX_CORNERS = 500;
# Initialize, load two images from the file system, and
# allocate the images and other structures we will need for
# results.
#
imgA = cv.LoadImage("OpticalFlow0.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE);
imgB = cv.LoadImage("OpticalFlow1.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE);
img_sz = cv.GetSize(imgA);
win_size = 10;
imgC = cv.LoadImage("OpticalFlow1.jpg", cv.CV_LOAD_IMAGE_UNCHANGED);

# The first thing we need to do is get the features
# we want to track.
#
eig_image = cv.CreateImage(img_sz, cv.IPL_DEPTH_32F, 1);
tmp_image = cv.CreateImage(img_sz, cv.IPL_DEPTH_32F, 1);
corner_count = MAX_CORNERS;

cornersA = []
# CvPoint2D32f* cornersA        = new CvPoint2D32f[ MAX_CORNERS ];
# cornersA =cvPointTo32f(MAX_CORNERS)

cornersA = cv.GoodFeaturesToTrack(
    imgA,  # image
    eig_image,  # Temporary floating-point 32-bit image
    tmp_image,  # Another temporary image
    #       cornersA,#number of coners to detect
    corner_count,  # number of coners to detect
    0.01,  # quality level
    5.0,  # minDistace
    useHarris=0,
);
cornerA = cv.FindCornerSubPix(
    imgA,
    cornersA,
    #   corner_count,
    (win_size, win_size),
    (-1, -1),
    (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)
);
# Call the Lucas Kanade algorithm
#
# features_found = [ MAX_CORNERS ];
# feature_errors = [ MAX_CORNERS ];
pyr_sz = (imgA.width + 8, imgB.height / 3);
pyrA = cv.CreateImage(pyr_sz, cv.IPL_DEPTH_32F, 1);
pyrB = cv.CreateImage(pyr_sz, cv.IPL_DEPTH_32F, 1);
cornersB = [];
cornersB, features_found, feature_errors = cv.CalcOpticalFlowPyrLK(
    imgA,
    imgB,
    pyrA,
    pyrB,

    cornersA,

    # corner_count,
    (win_size, win_size),
    5,
    (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03),
    0
);
# Now make some image of what we are looking at:
#
for i in range(100):
    if (features_found[i] == 0 or feature_errors[i] > 550):
    # printf("Error is %f/n",feature_errors[i]);
    continue;

    print("Got it");

    p0 = (
        cv.Round(cornersA[i][1]),  # how ot get the (x, y)
        cv.Round(cornersA[i][1])
    )
    p1 = (
        cv.Round(cornersB[i][1]),
        cv.Round(cornersB[i][1])
    )
    cv.Line(imgC, p0, p1, cv.CV_RGB(255, 0, 0), 2);

cv.NamedWindow("ImageA", 0);
cv.NamedWindow("ImageB", 0);
cv.NamedWindow("LKpyr_OpticalFlow", 0);
cv.ShowImage("ImageA", imgA);
cv.ShowImage("ImageB", imgB);
cv.ShowImage("LKpyr_OpticalFlow", imgC);
cv.WaitKey(0);'''
#
# import numpy as np
# import cv2
#
# step=10
#
# if __name__ == '__main__':
#     cam = cv2.VideoCapture("/home/wuqi/Videos/zed1.avi")
#     ret, prev = cam.read()
#     prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
#
#     while True:
#         ret, img = cam.read()
#         gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#         # 使用Gunnar Farneback算法计算密集光流
#         flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 21, 5, 7, 1.2, 0)
#         prevgray = gray
#         # 绘制线
#         h, w = gray.shape[:2]
#         y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1).astype(int)
#         fx, fy = flow[y, x].T
#         print((x))
#         print((fx))
#         lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
#         lines = np.int32(lines)
#
#         line = []
#         for l in lines:
#             if l[0][0]-l[1][0]>3 or l[0][1]-l[1][1]>3:
#                 line.append(l)
#
#         cv2.polylines(img, line, 0, (0,255,255))
#         cv2.imshow('flow', img)
#
#         ch = cv2.waitKey(5)
#         if ch == 27:
#             break
#     cv2.destroyAllWindows()
# ---------------------
# 作者：阿卡蒂奥
# 来源：CSDN
# 原文：https://blog.csdn.net/akadiao/article/details/80548875
# 版权声明：本文为博主原创文章，转载请附上博文链接！

# import cv2
# import numpy as np
# cap = cv2.VideoCapture("/home/wuqi/Videos/zed1.avi")
#
# ret, frame1 = cap.read()
# prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
# hsv = np.zeros_like(frame1)
# hsv[...,1] = 255
# num = 1
# while(1):
#     ret, frame2 = cap.read()
#     next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
#     flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 6, 3, 5, 1.25, 0)
#     mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
#     hsv[...,0] = ang*180/np.pi
#     hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
#
#     rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
#     print(hsv[...,2])
#     cv2.imshow("frame",frame2)
#     cv2.imshow("flow",rgb)
#     k = cv2.waitKey(30) & 0xff
#     if k == 27:
#         break
#     elif k == ord('s'):
#         cv2.imwrite('opticalfb.png',frame2)
#         cv2.imwrite('opticalhsv.png',rgb)
#     prvs = next
#
# cap.release()
# cv2.destroyAllWindows()

# import cv2
# import numpy as np
# def optical_flow(one, two):
#     """
#     method taken from (https://chatbotslife.com/autonomous-vehicle-speed-estimation-from-dashboard-cam-ca96c24120e4)
#     """
#     one_g = cv2.cvtColor(one, cv2.COLOR_RGB2GRAY)
#     two_g = cv2.cvtColor(two, cv2.COLOR_RGB2GRAY)
#     hsv = np.zeros((376, 1241, 3))
#     # set saturation
#     hsv[:,:,1] = cv2.cvtColor(two, cv2.COLOR_RGB2HSV)[:,:,1]
#     # obtain dense optical flow paramters
#     flow = cv2.calcOpticalFlowFarneback(one_g, two_g, flow=None,
#                                         pyr_scale=0.5, levels=1, winsize=15,
#                                         iterations=2,
#                                         poly_n=5, poly_sigma=1.1, flags=0)
#     # convert from cartesian to polar
#     mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
#     # hue corresponds to direction
#     hsv[:,:,0] = ang * (180/ np.pi / 2)
#     # value corresponds to magnitude
#     hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
#     print(hsv[:,:,2])
#     # convert HSV to int32's
#     hsv = np.asarray(hsv, dtype= np.float32)
#     rgb_flow = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
#     return rgb_flow
#
#
# im1 = cv2.imread('/media/wuqi/ubuntu/dataset/kitti/data_odometry_color/dataset/sequences/00/image_2/000000.png')
# im2 = cv2.imread('/media/wuqi/ubuntu/dataset/kitti/data_odometry_color/dataset/sequences/00/image_2/000001.png')
#
# im3 = optical_flow(im1, im2)
#
# cv2.imshow("im1",im1)
# cv2.imshow("flow",im3)
# cv2.waitKey(0)





# from scipy import ndimage
# import scipy.misc
# import skimage.io as im_io
# import numpy as np
#
#
# im = scipy.misc.imread('/media/wuqi/ubuntu/dataset/kitti/data_odometry_color/dataset/sequences/01/image_2/000002.png')
# im2 = scipy.misc.imread('/media/wuqi/ubuntu/dataset/kitti/data_odometry_color/dataset/sequences/01/image_2/000003.png')
# im_concat = np.concatenate((im,im2),axis=2)
# im_concat_gray = np.stack((np.mean(im_concat[:,:,:3],axis=2,dtype=np.float32), np.mean(im_concat[:,:,3:],axis=2,dtype=np.float32)),axis=2)
# im_gray = np.mean(im,axis=2,dtype=np.float32)
# # im_gray_3 = np.tile(im_gray,(1,1,3))
# # scipy.misc.imshow(im_gray_3)
#
# sx_g = ndimage.sobel(im_gray, axis=0)
# sy_g = ndimage.sobel(im_gray, axis=1)
# sob_g = np.hypot(sx_g, sy_g)
# scipy.misc.imshow(im_concat_gray[:,:,0])
# scipy.misc.imshow(im_concat_gray[:,:,1])
#
# # im_io.use_plugin('gtk', 'imshow')
# # im_io.imshow(im_gray)
# # im_io.show()
#
# # sx0 = ndimage.sobel(im[:,:,0], axis=0, mode='constant')
# # sy0 = ndimage.sobel(im[:,:,0], axis=1, mode='constant')
# # sob0 = np.hypot(sx0, sy0)
# #
# # sx1 = ndimage.sobel(im[:,:,1], axis=0, mode='constant')
# # sy1 = ndimage.sobel(im[:,:,1], axis=1, mode='constant')
# # sob1 = np.hypot(sx1, sy1)
# #
# # sx2 = ndimage.sobel(im[:,:,2], axis=0, mode='constant')
# # sy2 = ndimage.sobel(im[:,:,2], axis=1, mode='constant')
# # sob2 = np.hypot(sx2, sy2)
# #
# # # sob = np.mean([sob0,sob1,sob2],axis=)
# #
# # sob = np.stack((sob0,sob1,sob2),axis=2)
# # sob = np.mean(sob,axis=2)
# # scipy.misc.imshow(sob2)




#
import matplotlib.pyplot as plt  # plt 用于显示图片
import matplotlib.image as mpimg  # mpimg 用于读取图片
import numpy as np
import tensorflow as tf
import cv2

myimg = mpimg.imread('/media/wuqi/ubuntu/dataset/kitti/data_odometry_color/dataset/sequences/06/image_2/000006.png',0)  # 读取和代码处于同一目录下的图片
# plt.imshow(myimg)  # 显示图片
# plt.axis('off')  # 不显示坐标轴
# plt.show()
# print(myimg.shape)

edges_opencv = cv2.Laplacian(np.uint8(myimg),cv2.CV_16S,ksize = 3)

plt.imshow(edges_opencv)  # 显示图片
plt.axis('off')  # 不显示坐标轴
plt.show()
print(myimg.shape)
#
# full = np.reshape(myimg, [1, *myimg.shape])
# inputfull = tf.Variable(tf.constant(1.0, shape=[1, *myimg.shape]))
#
# filter = tf.Variable(tf.constant([[-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0],
#                                   [-1.0, -1.0, -1.0], [8.0, 8.0, 8.0],    [-1.0, -1.0, -1.0],
#                                   [-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]],
#                                  shape=[3, 3, 3, 1]))
#
# op = tf.nn.conv2d(inputfull, filter, strides=[1, 1, 1, 1], padding='SAME')  # 3个通道输入，生成1个feature ma
# o = tf.cast(tf.minimum(tf.nn.relu(op), 1), tf.uint8)
# # o = tf.cast(((op - tf.reduce_min(op)) / (tf.reduce_max(op) - tf.reduce_min(op))) * 255, tf.uint8)
#
# with tf.Session() as sess:
#     sess.run(tf.global_variables_initializer())
#
#     t, f = sess.run([o, filter], feed_dict={inputfull: full})
#     print(t.shape)  # (1, 512, 512, 1)
#     t = np.reshape(t, myimg.shape[:2])
#     print(t.shape)  # (512, 512)
#     plt.imshow(t, cmap='gray')  # 显示图片
#     plt.axis('off')  # 不显示坐标轴
#     plt.show()
#
# # #







# import tensorflow as tf
# import os
# import matplotlib.pyplot as plt
#
# filename='/media/wuqi/ubuntu/dataset/kitti/data_odometry_color/dataset/sequences/06/image_2/000006.png'
# img = tf.gfile.FastGFile(filename, 'rb').read() #读取三通道图片
# image_data = tf.image.decode_png(img)#进行编码
# image_show = tf.image.convert_image_dtype(image_data, dtype = tf.float32)#tensorflow中操作多为浮点型，而图片多为int型，故作此转化
# image_batch = tf.expand_dims(image_show, 0)#原来的单张图片为三维（height,width,channel）而下文中卷积核定义为四维（batchsize，height,width,channel），因此通过该方法在0维添加一维，相当于将原来的单张图片三维数据放在一个另一个集合中从而形成四维
#
# #边缘检测滤波器
#
# kernel=tf.constant([
# [
# [[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],
# [[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],
# [[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]
# ],
# [
# [[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],
# [[8.,0.,0.],[0.,8.,0.],[0.,0.,8.]],
# [[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]
# ],
# [
# [[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],
# [[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],
# [[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]
# ]
# ])
# with tf.Session() as sess:
#     sess.run(image_show)
#     # plt.imshow(image_show.eval())
#     # plt.show()  # 读入tensorflow后画出原图
#     conv2d = tf.nn.conv2d(image_batch, kernel, [1, 1, 1, 1], padding='SAME')
#     activation_map = sess.run(tf.minimum(tf.nn.relu(conv2d), 1))  # 激活措施加均值操作将颜色值置于（0~255）以内的区间
#     # activation_map = sess.run(tf.cast(((conv2d - tf.reduce_min(conv2d)) / (tf.reduce_max(conv2d) - tf.reduce_min(conv2d))) * 255, tf.uint8))
#     print(activation_map.shape)
#     encoded_image = activation_map.reshape([376, 1241, 3])  # 注意该步骤将得到的四维np.array数据还原为三维。plt显示的前提   
#
#      # encoded_image = tf.image.encode_jpeg(activation_map)
#      # with tf.gfile.GFile('/home/ubuntu/images/output.jpg', 'wb') as f:
#      # plt(encoded_image.eval())
#      # plt.show()
#      # f.write(encoded_image.eval())
#
#     active = tf.image.convert_image_dtype(encoded_image, dtype=tf.uint8)
#     a_uint8 = active.eval()
#     plt.imshow(active.eval())
#     plt.show()





import numpy as np

filter_lap = np.array([[-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0],
                                   [-1.0, -1.0, -1.0], [8.0, 8.0, 8.0],    [-1.0, -1.0, -1.0],
                                   [-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]]).reshape([3,3,3,1])

print(filter_lap[:,:,1,0])



