#以下为基于python的opencv视觉识别基础
#安装Opencv-python库: pip install opencv-python
#安装matplotlib库：sudo apt-get install python3-matplotlib  https://matplotlib.org/stable/plot_types/index.html
#windows系统下安装matplotlib库:python -m pip install matplotlib


import cv2 #opencv读取的格式是BGR
import matplotlib.pyplot as plt
import numpy as np 

# 快速注释与取消注释 同时按住 ctrl和?

# 数据读取-图像
# cv2.IMREAD_COLOR：彩色图像
# cv2.IMREAD_GRAYSCALE：灰度图像
# img=cv2.imread('.\cat.jpg', cv2.IMREAD_GRAYSCALE) 
# #E:/课件资料/仿人机器人基础实践/calss2/opencv basic/ocr_a_reference.png
# #图像的显示,也可以创建多个窗口
# cv2.imshow('image',img) 
# #保存
# cv2.imwrite('mycat1.png',img)
# #查看图片大小
# print('img shape:',img.shape)
# print('img size:',img.size)
# # 等待时间，毫秒级，0表示任意键终止
# cv2.waitKey(0) 
# cv2.destroyAllWindows()


# #数据读取-视频 适用于nanocamera
# camera = nano.Camera(width=640,height=480,fps=30,enforce_fps=True)
# print (' CSI camera is ready?', camera.isReady())
# try: 
#     while camera.isReady():
#         frame = camera.read()
#         cv2.imshow('vdieo frame', frame)
#         if cv2.waitKey(100) & 0xFF == ('q'):
#             break

# finally:
#     camera.release
#     del camera
#     cv2.destroyAllWindows()


# #截取部分图像数据
# img=cv2.imread('cat.jpg')
# cat1=img[0:50,0:100] 
# cv2.imshow('cat',img)
# cv2.imshow('cat1',cat1)
# # 等待时间，毫秒级，0表示任意键终止
# cv2.waitKey(0) 
# cv2.destroyAllWindows()



# #颜色通道提取
# img=cv2.imread('cat.jpg')
# b,g,r=cv2.split(img)
# print('r',r)
# print('r shape:',r.shape)
# print('b',b)
# img2= cv2.merge((b,g,r))
# cv2.imshow('img2',img2)
# cv2.waitKey(0) 
# cv2.destroyAllWindows()


#边界填充
# img=cv2.imread('cat.jpg')
# top_size,bottom_size,left_size,right_size = (50,50,50,50)
# replicate = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size, borderType=cv2.BORDER_REPLICATE)
# reflect = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size,cv2.BORDER_REFLECT)
# reflect101 = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size, cv2.BORDER_REFLECT_101)
# wrap = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size, cv2.BORDER_WRAP)
# constant = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size,cv2.BORDER_CONSTANT, value=0)

# plt.subplot(231), plt.imshow(img, 'gray'), plt.title('ORIGINAL')
# plt.subplot(232), plt.imshow(replicate, 'gray'), plt.title('REPLICATE')
# plt.subplot(233), plt.imshow(reflect, 'gray'), plt.title('REFLECT')
# plt.subplot(234), plt.imshow(reflect101, 'gray'), plt.title('REFLECT_101')
# plt.subplot(235), plt.imshow(wrap, 'gray'), plt.title('WRAP')
# plt.subplot(236), plt.imshow(constant, 'gray'), plt.title('CONSTANT')

# plt.show()
# cv2.waitKey(0) 
# cv2.destroyAllWindows()



# #改变图片大小 
# img=cv2.imread('cat.jpg')
# img2=cv2.resize(img,(0,0),fx=4,fy=4)
# cv2.imshow('img2', img2)
# cv2.waitKey(0) 
# cv2.destroyAllWindows()



# 转灰度图及HSV
# img=cv2.imread('cat.jpg')
# img_gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# hs= cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# cv2.imshow('img_gray', img_gray)
# cv2.imshow('hsv',hs)
# cv2.waitKey(0) 
# cv2.destroyAllWindows()



#图像阈值
# ret, dst = cv2.threshold(src, thresh, maxval, type)
# src： 输入图，只能输入单通道图像，通常来说为灰度图
# dst： 输出图
# thresh： 阈值
# maxval： 当像素值超过了阈值（或者小于阈值，根据type来决定），所赋予的值
# type：二值化操作的类型，包含以下5种类型： cv2.THRESH_BINARY； cv2.THRESH_BINARY_INV； cv2.THRESH_TRUNC； cv2.THRESH_TOZERO；cv2.THRESH_TOZERO_INV
# cv2.THRESH_BINARY 超过阈值部分取maxval（最大值），否则取0
# cv2.THRESH_BINARY_INV THRESH_BINARY的反转
# cv2.THRESH_TRUNC 大于阈值部分设为阈值，否则不变
# cv2.THRESH_TOZERO 大于阈值部分不改变，否则设为0
# cv2.THRESH_TOZERO_INV THRESH_TOZERO的反转

# img=cv2.imread('cat.jpg')
# img_gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# ret, thresh1 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY)
# ret, thresh2 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY_INV)
# ret, thresh3 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_TRUNC)
# ret, thresh4 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_TOZERO)
# ret, thresh5 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_TOZERO_INV)
# titles = ['Original Image', 'BINARY', 'BINARY_INV', 'TRUNC', 'TOZERO', 'TOZERO_INV']
# images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
# for i in range(6):
#     plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
#     plt.title(titles[i])
#     plt.xticks([]), plt.yticks([])
# plt.show()
# cv2.waitKey(0) 
# cv2.destroyAllWindows()



#图像平滑
# img=cv2.imread('cat.jpg')
# blur = cv2.blur(img,(3,3))  # 均值滤波，简单的平均卷积操作
# box1 = cv2.boxFilter(img, -1,(3,3),normalize=True) # 方框滤波，基本和均值一样，可以选择归一化
# box2 = cv2.boxFilter(img, -1,(3,3),normalize=False) # 方框滤波，基本和均值一样，可以选择归一化
# aussian = cv2.GaussianBlur(img, (5, 5), 1) # 高斯滤波，高斯模糊的卷积核里的数值是满足高斯分布，相当于更重视中间的
# median = cv2.medianBlur(img, 5) # 中值滤波，相当于用中值代替
# res = np.hstack((img,blur,box1,box2,aussian,median))
# cv2.imshow('d', res)
# cv2.waitKey(0) 
# cv2.destroyAllWindows()


# #形态学-腐蚀操作,膨胀操作

# img=cv2.imread('cat.jpg')
# kernel = np.ones((3,3),np.uint8) 
# erosion = cv2.erode(img,kernel,iterations = 1) #腐蚀
# dilate = cv2.dilate(img,kernel,iterations = 1)  #膨胀
# res1 = np.hstack((erosion,dilate))
# pie = cv2.imread('pie.png')
# kerne2 = np.ones((30,30),np.uint8) 
# dilate_1 = cv2.dilate(pie,kerne2,iterations = 1)
# dilate_2 = cv2.dilate(pie,kerne2,iterations = 2)
# dilate_3 = cv2.dilate(pie,kerne2,iterations = 3)
# res2 = np.hstack((dilate_1,dilate_2,dilate_3))
# cv2.imshow('res1', res1)
# cv2.imshow('res2', res2)
# cv2.waitKey(0) 
# cv2.destroyAllWindows()



#开运算与闭运算
# 先腐蚀后膨胀叫开运算（因为先腐蚀会分开物体，这样容易记住），其作用是：分离物体，消除小区域。
# 先膨胀后腐蚀（先膨胀会使白色的部分扩张，以至于消除/"闭合"物体里面的小黑洞，所以叫闭运算）
# img = cv2.imread('dige.png')
# cv2.imshow('original', img)
# kernel = np.ones((5,5),np.uint8) 
# opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
# cv2.imshow('opening', opening)
# kernel = np.ones((5,5),np.uint8) 
# closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
# cv2.imshow('closing', closing)
# cv2.waitKey(0) 
# cv2.destroyAllWindows()




# # 礼帽与黑帽
# # 礼帽 = 原始输入-开运算结果
# # 黑帽 = 闭运算-原始输入

# img = cv2.imread('dige.png')
# kernel = np.ones((7,7),np.uint8) 
# tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
# blackhat  = cv2.morphologyEx(img,cv2.MORPH_BLACKHAT, kernel)
# plt.subplot(121), plt.imshow(tophat, 'gray'), plt.title('tophat')
# plt.subplot(122), plt.imshow(blackhat, 'gray'), plt.title('blackhat')
# plt.show()
# cv2.waitKey(0) 
# cv2.destroyAllWindows()


#图像梯度-Sobel算子
# img = cv2.imread('pie.png',cv2.IMREAD_GRAYSCALE)
# cv2.imshow("img",img)
# sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
# sobelx2 = cv2.convertScaleAbs(sobelx)
# cv2.imshow("sobelx",sobelx)
# cv2.imshow("sobelx2",sobelx2)
# sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)
# sobely = cv2.convertScaleAbs(sobely)  
# sobelxy = cv2.addWeighted(sobelx2,0.5,sobely,0.5,0)
# cv2.imshow("sobelxy",sobelxy)
# cv2.waitKey(0)
# cv2.destroyAllWindows()


# #图像梯度-Sobel算子
# img = cv2.imread('lena.jpg',cv2.IMREAD_GRAYSCALE)
# cv2.imshow("img",img)
# sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
# sobelx = cv2.convertScaleAbs(sobelx)
# sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)
# sobely = cv2.convertScaleAbs(sobely)
# sobelxy = cv2.addWeighted(sobelx,0.5,sobely,0.5,0)
# cv2.imshow("sobelxy",sobelxy)
# cv2.waitKey(0)
# cv2.destroyAllWindows()



# #图像梯度-laplacian算子,Scharr算子

# img = cv2.imread('lena.jpg',cv2.IMREAD_GRAYSCALE)
# cv2.imshow("img",img)
# sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
# sobelx = cv2.convertScaleAbs(sobelx)
# sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)
# sobely = cv2.convertScaleAbs(sobely)
# sobelxy = cv2.addWeighted(sobelx,0.5,sobely,0.5,0)

# scharrx = cv2.Scharr(img,cv2.CV_64F,1,0)
# scharry = cv2.Scharr(img,cv2.CV_64F,0,1)
# scharrx = cv2.convertScaleAbs(scharrx)   
# scharry = cv2.convertScaleAbs(scharry)  
# scharrxy =  cv2.addWeighted(scharrx,0.5,scharry,0.5,0) 

# laplacian = cv2.Laplacian(img,cv2.CV_64F)
# laplacian = cv2.convertScaleAbs(laplacian) 

# res = np.hstack((sobelxy,scharrxy,laplacian))

# cv2.imshow("rex",res)
# cv2.waitKey(0)
# cv2.destroyAllWindows()




# # Canny边缘检测
# # 1) 使用高斯滤波器，以平滑图像，滤除噪声。
# # 2) 计算图像中每个像素点的梯度强度和方向。
# # 3) 应用非极大值（Non-Maximum Suppression）抑制，以消除边缘检测带来的杂散响应。
# # 4) 应用双阈值（Double-Threshold）检测来确定真实的和潜在的边缘。
# # 5) 通过抑制孤立的弱边缘最终完成边缘检测
# img=cv2.imread("lena.jpg",cv2.IMREAD_GRAYSCALE)
# v1=cv2.Canny(img,80,150)
# v2=cv2.Canny(img,50,100)
# res1 = np.hstack((v1,v2))
# cv2.imshow("rex",res1)
# cv2.waitKey(0)
# cv2.destroyAllWindows()


#图像金字塔
# img=cv2.imread("AM.png")
# up=cv2.pyrUp(img)
# down=cv2.pyrDown(img)
# up2=cv2.pyrUp(up)
# cv2.imshow("up",up)
# cv2.imshow("down",down)
# cv2.imshow("up2",up2)
# cv2.waitKey(0)
# cv2.destroyAllWindows()


# # 图像轮廓
# # cv2.findContours(img,mode,method)
# # mode:轮廓检索模式
# # RETR_EXTERNAL ：只检索最外面的轮廓；
# # RETR_LIST：检索所有的轮廓，并将其保存到一条链表当中；
# # RETR_CCOMP：检索所有的轮廓，并将他们组织为两层：顶层是各部分的外部边界，第二层是空洞的边界;
# # RETR_TREE：检索所有的轮廓，并重构嵌套轮廓的整个层次;
# # method:轮廓逼近方法
# # CHAIN_APPROX_NONE：以Freeman链码的方式输出轮廓，所有其他方法输出多边形（顶点的序列）。
# # CHAIN_APPROX_SIMPLE:压缩水平的、垂直的和斜的部分，也就是，函数只保留他们的终点部分。
# img = cv2.imread('contours.png')
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# ret,thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# #  dst,
#contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# # img, 
# res = img.copy()
# res2 = img.copy()
# res = cv2.drawContours(res, contours, -1, (0, 0, 255), 5)
# res2 = cv2.drawContours(res2, contours, 0, (0, 0, 255), 5)
# res3 = np.hstack((img,res,res2))
# cv2.imshow("rex",res3)
# cnt = contours[1]
# area=cv2.contourArea(cnt)
# print(area)
# cv2.waitKey(0)
# cv2.destroyAllWindows()



# #轮廓近似
# img = cv2.imread('contours2.png')
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# cnt = contours[0]
# draw_img1 = img.copy()
# res = cv2.drawContours(draw_img1, [cnt], -1, (0, 0, 255), 2)
# cv2.imshow('res',res)

# epsilon = 0.15*cv2.arcLength(cnt,True) 
# approx = cv2.approxPolyDP(cnt,epsilon,True)
# draw_img2 = img.copy()
# res2 = cv2.drawContours(draw_img2, [approx], -1, (0, 0, 255), 2)
# cv2.imshow('res2',res2)

# draw_img3 = img.copy()
# x,y,w,h = cv2.boundingRect(cnt)
# res3 = cv2.rectangle(draw_img3,(x,y),(x+w,y+h),(0,255,0),2)
# cv2.imshow('res3',res3)
# cv2.waitKey(0)
# cv2.destroyAllWindows()


# # 傅里叶变换的作用
# # 高频：变化剧烈的灰度分量，例如边界
# # 低频：变化缓慢的灰度分量，例如一片大海
# # 滤波
# # 低通滤波器：只保留低频，会使得图像模糊
# # 高通滤波器：只保留高频，会使得图像细节增强
# # - opencv中主要就是cv2.dft()和cv2.idft()，输入图像需要先转换成np.float32 格式。
# # - 得到的结果中频率为0的部分会在左上角，通常要转换到中心位置，可以通过shift变换来实现。
# # - cv2.dft()返回的结果是双通道的（实部，虚部），通常还需要转换成图像格式才能展示（0,255）。
# img = cv2.imread('lena.jpg',0)
# img_float32 = np.float32(img)
# dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
# dft_shift = np.fft.fftshift(dft)
# # 得到灰度图能表示的形式
# magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
# plt.subplot(121),plt.imshow(img, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
# plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
# plt.show()

# #低通滤波
# img = cv2.imread('lena.jpg',0)

# img_float32 = np.float32(img)

# dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
# dft_shift = np.fft.fftshift(dft)

# rows, cols = img.shape
# crow, ccol = int(rows/2) , int(cols/2)     # 中心位置

# # 低通滤波
# mask = np.zeros((rows, cols, 2), np.uint8)
# mask[crow-30:crow+30, ccol-30:ccol+30] = 1

# # IDFT
# fshift = dft_shift*mask
# f_ishift = np.fft.ifftshift(fshift)
# img_back = cv2.idft(f_ishift)
# img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])

# plt.subplot(121),plt.imshow(img, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
# plt.title('Result'), plt.xticks([]), plt.yticks([])
# plt.show() 



# #高通滤波
# img = cv2.imread('lena.jpg',0)

# img_float32 = np.float32(img)

# dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
# dft_shift = np.fft.fftshift(dft)

# rows, cols = img.shape
# crow, ccol = int(rows/2) , int(cols/2)     # 中心位置

# # 高通滤波
# mask = np.ones((rows, cols, 2), np.uint8)
# mask[crow-30:crow+30, ccol-30:ccol+30] = 0

# # IDFT
# fshift = dft_shift*mask
# f_ishift = np.fft.ifftshift(fshift)
# img_back = cv2.idft(f_ishift)
# img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])

# plt.subplot(121),plt.imshow(img, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
# plt.title('Result'), plt.xticks([]), plt.yticks([])

# plt.show()   





# 直方图
# cv2.calcHist(images,channels,mask,histSize,ranges)
# images: 原图像图像格式为 uint8 或 ﬂoat32。当传入函数时应 用中括号 [] 括来例如[img]
# channels: 同样用中括号括来它会告函数我们统幅图 像的直方图。如果入图像是灰度图它的值就是 [0]如果是彩色图像 的传入的参数可以是 [0][1][2] 它们分别对应着 BGR。 
# mask: 掩模图像。统整幅图像的直方图就把它为 None。但是如 果你想统图像某一分的直方图的你就制作一个掩模图像并 使用它。
# histSize:BIN 的数目。也应用中括号括来
# ranges: 像素值范围常为 [0256] 


# img = cv2.imread('cat.jpg',0) #0表示灰度图
# hist = cv2.calcHist([img],[0],None,[256],[0,256])
# hist.shape
# plt.hist(img.ravel(),256); 
# plt.show()
##有bug
# img2= img.copy()
# color = ('b','g','r')
# for i,col in enumerate(color): 
#     histr = cv2.calcHist([img2],[i],None,[256],[0,256]) 
#     plt.plot(histr,color = col) 
#     plt.xlim([0,256]) 



# 模板匹配
# 模板匹配和卷积原理很像，模板在原图像上从原点开始滑动，计算模板与（图像被模板覆盖的地方）的差别程度，这个差别程度的计算方法在opencv里有6种，然后将每次计算的结果放入一个矩阵里，作为结果输出。假如原图形是AxB大小，而模板是axb大小，则输出结果的矩阵是(A-a+1)x(B-b+1)

# TM_SQDIFF：计算平方不同，计算出来的值越小，越相关 
# TM_CCORR：计算相关性，计算出来的值越大，越相关
# TM_CCOEFF：计算相关系数，计算出来的值越大，越相关
# TM_SQDIFF_NORMED：计算归一化平方不同，计算出来的值越接近0，越相关
# TM_CCORR_NORMED：计算归一化相关性，计算出来的值越接近1，越相关
# TM_CCOEFF_NORMED：计算归一化相关系数，计算出来的值越接近1，越相关
# https://docs.opencv.org/3.3.1/df/dfb/group__imgproc__object.html#ga3a7850640f1fe1f58fe91a2d7583


# img = cv2.imread('lena.jpg', 0)
# template = cv2.imread('face.jpg', 0)
# h, w = template.shape[:2] 
# print(img.shape)
# print(template.shape)
# res = cv2.matchTemplate(img, template, cv2.TM_SQDIFF)
# min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# cv2.imshow('res',res)
# cv2.waitKey(0)
# cv2.destroyAllWindows()


#图像特征-harris角点检测
# cv2.cornerHarris()
# img： 数据类型为 ﬂoat32 的入图像
# blockSize： 角点检测中指定区域的大小
# ksize： Sobel求导中使用的窗口大小
# k： 取值参数为 [0,04,0.06]

# img = cv2.imread('contours2.png')
# print ('img.shape:',img.shape)
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# # gray = np.float32(gray)
# dst = cv2.cornerHarris(gray, 2, 3, 0.04)
# print ('dst.shape:',dst.shape)
# img[dst>0.01*dst.max()]=[0,0,255]
# cv2.imshow('dst',img) 
# cv2.waitKey(0) 
# cv2.destroyAllWindows()



#图像特征-Scale Invariant Feature Transform（SIFT） .xfeatures2d
#需要降版本3.4.1.15：pip install opencv-python==3.4.1.15 pip install opencv-contrib-python==3.4.1.15
img = cv2.imread('contours2.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT_create()
kp = sift.detect(gray, None)
img = cv2.drawKeypoints(gray, kp, img)
cv2.imshow('drawKeypoints', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
kp, des = sift.compute(gray, kp)
print (np.array(kp).shape)
des.shape
cv2.waitKey(0) 
cv2.destroyAllWindows()
