import cv2 #opencv读取的格式是BGR
import matplotlib.pyplot as plt
import numpy as np 
get_ipython().run_line_magic("matplotlib", " inline ")

img=cv2.imread('images/cat.jpg')


img


#图像的显示,也可以创建多个窗口
cv2.imshow('image',img) 
# 等待时间，毫秒级，0表示任意键终止
cv2.waitKey(0) 
cv2.destroyAllWindows()


def cv_show(name,img):
    cv2.imshow(name,img) 
    cv2.waitKey(0) 
    cv2.destroyAllWindows()

def plt_show(img):
    # opencv读取的是BGR格式 plt展示的是RGB格式 因此需要进行通道的转换
    img=img[:,:,[2,1,0]]
    plt.imshow(img)


plt_show(img)


img.shape


img=cv2.imread('images/cat.jpg',cv2.IMREAD_GRAYSCALE) #读取灰度图
img


img.shape


#保存
cv2.imwrite('images/mycat.png',img)


type(img)


img.size


img.dtype


vc = cv2.VideoCapture('images/test.mp4')


# 检查是否打开正确
if vc.isOpened(): 
    open, frame = vc.read()  #每次读取一帧
else:
    open = False


while open:
    ret, frame = vc.read()
    if frame is None:
        break
    if ret == True:
        gray = cv2.cvtColor(frame,  cv2.COLOR_BGR2GRAY) #转换成灰度图
        cv2.imshow('result', gray)
        if cv2.waitKey(100) & 0xFF == 27:
            break
vc.release()
cv2.destroyAllWindows()


img=cv2.imread('images/cat.jpg')
cat=img[0:50,0:200] 
plt_show(cat)


b,g,r=cv2.split(img)


r


r.shape


img=cv2.merge((b,g,r))
img.shape


# 只保留R
cur_img = img.copy()
cur_img[:,:,0] = 0
cur_img[:,:,1] = 0
plt_show(cur_img)


# 只保留G
cur_img = img.copy()
cur_img[:,:,0] = 0
cur_img[:,:,2] = 0
plt_show(cur_img)


# 只保留B
cur_img = img.copy()
cur_img[:,:,1] = 0
cur_img[:,:,2] = 0
plt_show(cur_img)


top_size,bottom_size,left_size,right_size = (50,50,50,50)  #上下左右

replicate = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size, borderType=cv2.BORDER_REPLICATE)
reflect = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size,cv2.BORDER_REFLECT)
reflect101 = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size, cv2.BORDER_REFLECT_101)
wrap = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size, cv2.BORDER_WRAP)
constant = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size,cv2.BORDER_CONSTANT, value=0)


import matplotlib.pyplot as plt
plt.subplot(231), plt.imshow(img, 'gray'), plt.title('ORIGINAL')
plt.subplot(232), plt.imshow(replicate, 'gray'), plt.title('REPLICATE')
plt.subplot(233), plt.imshow(reflect, 'gray'), plt.title('REFLECT')
plt.subplot(234), plt.imshow(reflect101, 'gray'), plt.title('REFLECT_101')
plt.subplot(235), plt.imshow(wrap, 'gray'), plt.title('WRAP')
plt.subplot(236), plt.imshow(constant, 'gray'), plt.title('CONSTANT')

plt.show()


img_cat=cv2.imread('images/cat.jpg')
img_dog=cv2.imread('images/dog.jpg')


img_cat2= img_cat +10 
img_cat[:5,:,0]


img_cat2[:5,:,0]


#相当于% 256
(img_cat + img_cat2)[:5,:,0] 


cv2.add(img_cat,img_cat2)[:5,:,0]


# 尺寸不同不能相加
# img_cat + img_dog


img_cat.shape


img_dog = cv2.resize(img_dog, (500, 414))
img_dog.shape


res = cv2.addWeighted(img_cat, 0.4, img_dog, 0.6, 0)  #图像融合 ax+by+c


plt_show(res)


res = cv2.resize(img, (0, 0), fx=4, fy=4)  #按比例缩放
plt_show(res)


res = cv2.resize(img, (0, 0), fx=1, fy=3)
plt_show(res)


import cv2 #opencv读取的格式是BGR
import numpy as np
import matplotlib.pyplot as plt #Matplotlib是RGB
get_ipython().run_line_magic("matplotlib", " inline ")

img=cv2.imread('images/cat.jpg')
img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img_gray.shape


def plt_show(img):
    # opencv读取的是BGR格式 plt展示的是RGB格式 因此需要进行通道的转换
    img=img[:,:,[2,1,0]]
    plt.imshow(img)


cv2.imshow('img_gray',img_gray) 
cv2.waitKey(0) 
cv2.destroyAllWindows()


plt.imshow(img_gray,cmap='gray')


hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
plt_show(hsv)


ret, thresh1 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY)
ret, thresh2 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY_INV)
ret, thresh3 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_TRUNC)
ret, thresh4 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_TOZERO)
ret, thresh5 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_TOZERO_INV)

titles = ['Original Image', 'BINARY', 'BINARY_INV', 'TRUNC', 'TOZERO', 'TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]

for i in range(6):
    plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])
plt.show()


img = cv2.imread('images/lenaNoise.png')
plt_show(img)


# 均值滤波
# 简单的平均卷积操作
blur = cv2.blur(img, (3, 3))
plt_show(blur)


# 方框滤波
# 基本和均值一样，可以选择归一化
box = cv2.boxFilter(img,-1,(3,3), normalize=True)  
plt_show(box)


# 方框滤波
# 基本和均值一样，可以选择归一化,容易越界
box = cv2.boxFilter(img,-1,(3,3), normalize=False)  
plt_show(box)


# 高斯滤波
# 高斯模糊的卷积核里的数值是满足高斯分布，相当于更重视中间的
aussian = cv2.GaussianBlur(img, (5, 5), 1)  
plt_show(aussian)


# 中值滤波
# 相当于用中值代替
median = cv2.medianBlur(img, 5)  # 中值滤波
plt_show(median)


# 展示所有的
res = np.hstack((blur,aussian,median))
plt_show(res)


img = cv2.imread('images/dige.png')
plt_show(img)


kernel = np.ones((3,3),np.uint8) 
erosion = cv2.erode(img,kernel,iterations = 1)
plt_show(erosion)


pie = cv2.imread('images/pie.png')
plt_show(pie)


kernel = np.ones((30,30),np.uint8) 
erosion_1 = cv2.erode(pie,kernel,iterations = 1)
erosion_2 = cv2.erode(pie,kernel,iterations = 2)
erosion_3 = cv2.erode(pie,kernel,iterations = 3)
res = np.hstack((erosion_1,erosion_2,erosion_3))
plt_show(res)


img = cv2.imread('images/dige.png')
plt_show(img)


kernel = np.ones((3,3),np.uint8) 
dige_erosion = cv2.erode(img,kernel,iterations = 1)
plt_show(dige_erosion)


kernel = np.ones((3,3),np.uint8) 
dige_dilate = cv2.dilate(dige_erosion,kernel,iterations = 1)
plt_show(dige_dilate)


pie = cv2.imread('images/pie.png')

kernel = np.ones((30,30),np.uint8) 
dilate_1 = cv2.dilate(pie,kernel,iterations = 1)
dilate_2 = cv2.dilate(pie,kernel,iterations = 2)
dilate_3 = cv2.dilate(pie,kernel,iterations = 3)
res = np.hstack((dilate_1,dilate_2,dilate_3))
plt_show(res)


# 开：先腐蚀，再膨胀
img = cv2.imread('images/dige.png')
kernel = np.ones((5,5),np.uint8) 
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)

plt_show(opening)


# 闭：先膨胀，再腐蚀
img = cv2.imread('images/dige.png')
kernel = np.ones((5,5),np.uint8)
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
plt_show(img)


# 梯度=膨胀-腐蚀
pie = cv2.imread('images/pie.png')
kernel = np.ones((7,7),np.uint8) 
dilate = cv2.dilate(pie,kernel,iterations = 5)
erosion = cv2.erode(pie,kernel,iterations = 5)

res = np.hstack((dilate,erosion))
plt_show(res)


gradient = cv2.morphologyEx(pie, cv2.MORPH_GRADIENT, kernel)
plt_show(gradient)


#礼帽
img = cv2.imread('images/dige.png')
tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
plt_show(tophat)


#黑帽
img = cv2.imread('images/dige.png')
blackhat  = cv2.morphologyEx(img,cv2.MORPH_BLACKHAT, kernel)
plt_show(blackhat)


img = cv2.imread('images/pie.png',cv2.IMREAD_GRAYSCALE)
plt.imshow(img,cmap='gray')


sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3) #算水平梯度
plt.imshow(sobelx,plt.cm.gray)


sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
sobelx = cv2.convertScaleAbs(sobelx)  #转换成绝对值
plt.imshow(sobelx,plt.cm.gray)


sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)
sobely = cv2.convertScaleAbs(sobely)  
plt.imshow(sobely,plt.cm.gray)


sobelxy = cv2.addWeighted(sobelx,0.5,sobely,0.5,0)
plt.imshow(sobelxy,plt.cm.gray)


sobelxy=cv2.Sobel(img,cv2.CV_64F,1,1,ksize=3)
sobelxy = cv2.convertScaleAbs(sobelxy) 
plt.imshow(sobelxy,plt.cm.gray)


img = cv2.imread('images/lena.jpg',cv2.IMREAD_GRAYSCALE)
plt.imshow(img,cmap='gray')


img = cv2.imread('images/lena.jpg',cv2.IMREAD_GRAYSCALE)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
sobelx = cv2.convertScaleAbs(sobelx)
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)
sobely = cv2.convertScaleAbs(sobely)
sobelxy = cv2.addWeighted(sobelx,0.5,sobely,0.5,0)
plt.imshow(sobelxy,plt.cm.gray)


img = cv2.imread('images/lena.jpg',cv2.IMREAD_GRAYSCALE)
sobelxy=cv2.Sobel(img,cv2.CV_64F,1,1,ksize=3)
sobelxy = cv2.convertScaleAbs(sobelxy) 
plt.imshow(sobelxy,plt.cm.gray)


#不同算子的差异
img = cv2.imread('images/lena.jpg',cv2.IMREAD_GRAYSCALE)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)
sobelx = cv2.convertScaleAbs(sobelx)   
sobely = cv2.convertScaleAbs(sobely)  
sobelxy =  cv2.addWeighted(sobelx,0.5,sobely,0.5,0)  

scharrx = cv2.Scharr(img,cv2.CV_64F,1,0)
scharry = cv2.Scharr(img,cv2.CV_64F,0,1)
scharrx = cv2.convertScaleAbs(scharrx)   
scharry = cv2.convertScaleAbs(scharry)  
scharrxy =  cv2.addWeighted(scharrx,0.5,scharry,0.5,0) 

laplacian = cv2.Laplacian(img,cv2.CV_64F)
laplacian = cv2.convertScaleAbs(laplacian)   

res = np.hstack((img,sobelxy,scharrxy,laplacian))
plt.imshow(res,plt.cm.gray)


img = cv2.imread('images/lena.jpg',cv2.IMREAD_GRAYSCALE)
plt.imshow(img,plt.cm.gray)


img=cv2.imread("images/lena.jpg",cv2.IMREAD_GRAYSCALE)

v1=cv2.Canny(img,80,150)
v2=cv2.Canny(img,50,100)

res = np.hstack((img,v1,v2))
plt.imshow(res,plt.cm.gray)


img=cv2.imread("images/car.png",cv2.IMREAD_GRAYSCALE)

v1=cv2.Canny(img,120,250)
v2=cv2.Canny(img,50,100)

res = np.hstack((v1,v2))
plt.imshow(res,plt.cm.gray)


img=cv2.imread("images/AM.png")
plt_show(img)
print (img.shape)


up=cv2.pyrUp(img)
plt_show(up)
print (up.shape)


down=cv2.pyrDown(img)
plt_show(down)
print (down.shape)


up2=cv2.pyrUp(up)
plt_show(up2)
print (up2.shape)


up=cv2.pyrUp(img)
up_down=cv2.pyrDown(up)
plt_show(up_down)


plt_show(np.hstack((img,up_down)))


up=cv2.pyrUp(img)
up_down=cv2.pyrDown(up)
plt_show(img-up_down)


down=cv2.pyrDown(img)
down_up=cv2.pyrUp(down)
l_1=img-down_up
plt_show(l_1)


img = cv2.imread('images/car.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  #转换成灰度图
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY) #二值处理
plt.imshow(thresh,plt.cm.gray)


'''
contours中每个元素都是图像中的一个轮廓，用numpy中的ndarray表示
每个轮廓contours[i]对应4个hierarchy元素hierarchy[i][0] ~hierarchy[i][3]，分别表示后一个轮廓、前一个轮廓、父轮廓、内嵌轮廓的索引编号，如果没有对应项，则该值为负数。
'''
contours, hierarchy= cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
print('len(contours) : {}'.format(len(contours)))
print('hierarchy.shape : {}'.format(hierarchy.shape))


plt_show(img)


#传入绘制图像，轮廓，轮廓索引，颜色模式，线条厚度
# 注意需要copy,要不原图会变。。。
draw_img = img.copy()
res = cv2.drawContours(draw_img, contours, -1, (0, 0, 255), 2)
plt_show(res)


draw_img = img.copy()
res = cv2.drawContours(draw_img, contours, 0, (0, 0, 255), 2)
plt_show(res)


cnt = contours[0]


#面积
cv2.contourArea(cnt)


#周长，True表示闭合的
cv2.arcLength(cnt,True)


img = cv2.imread('images/contours2.png')

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnt = contours[0]

draw_img = img.copy()
res = cv2.drawContours(draw_img, [cnt], -1, (0, 0, 255), 2)

plt_show(np.hstack([img,res]))


epsilon = 0.15*cv2.arcLength(cnt,True) 
approx = cv2.approxPolyDP(cnt,epsilon,True)

draw_img = img.copy()
res = cv2.drawContours(draw_img, [approx], -1, (0, 0, 255), 2)
plt_show(res)


img = cv2.imread('images/contours.png')

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnt = contours[0]

# 边界矩形
x,y,w,h = cv2.boundingRect(cnt)
img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
plt_show(img)


area = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w * h
extent = float(area) / rect_area
print ('轮廓面积与边界矩形比',extent)


(x,y),radius = cv2.minEnclosingCircle(cnt) 
center = (int(x),int(y)) 
radius = int(radius) 
img = cv2.circle(img,center,radius,(0,255,0),2)
plt_show(img)


import numpy as np
import cv2
from matplotlib import pyplot as plt

img = cv2.imread('images/lena.jpg',0)
img_float32 = np.float32(img)

dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
# 得到灰度图能表示的形式
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))

plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])

plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])

plt.show()


import numpy as np
import cv2
from matplotlib import pyplot as plt

img = cv2.imread('images/lena.jpg',0)

img_float32 = np.float32(img)

dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)

rows, cols = img.shape
crow, ccol = int(rows/2) , int(cols/2)     # 中心位置

# 低通滤波
mask = np.zeros((rows, cols, 2), np.uint8)
mask[crow-30:crow+30, ccol-30:ccol+30] = 1

# IDFT
fshift = dft_shift*mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])

plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
plt.title('Result'), plt.xticks([]), plt.yticks([])

plt.show()                


img = cv2.imread('images/lena.jpg',0)

img_float32 = np.float32(img)

dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)

rows, cols = img.shape
crow, ccol = int(rows/2) , int(cols/2)     # 中心位置

# 高通滤波
mask = np.ones((rows, cols, 2), np.uint8)
mask[crow-30:crow+30, ccol-30:ccol+30] = 0

# IDFT
fshift = dft_shift*mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])

plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
plt.title('Result'), plt.xticks([]), plt.yticks([])

plt.show()    


import cv2 #opencv读取的格式是BGR
import numpy as np
import matplotlib.pyplot as plt#Matplotlib是RGB
get_ipython().run_line_magic("matplotlib", " inline ")


img = cv2.imread('images/cat.jpg',0) #0表示灰度图
hist = cv2.calcHist([img],[0],None,[256],[0,256])
hist.shape


plt.hist(img.ravel(),256); 
plt.show()


img = cv2.imread('images/cat.jpg') 
color = ('b','g','r')
for i,col in enumerate(color): 
    histr = cv2.calcHist([img],[i],None,[256],[0,256]) 
    plt.plot(histr,color = col) 
    plt.xlim([0,256]) 


# 创建mask
mask = np.zeros(img.shape[:2], np.uint8)
print (mask.shape)
mask[100:300, 100:400] = 255
plt.imshow(mask,cmap=plt.cm.gray)


img = cv2.imread('images/cat.jpg', 0)
plt.imshow(img,plt.cm.gray)


masked_img = cv2.bitwise_and(img, img, mask=mask)#与操作
plt.imshow(masked_img,plt.cm.gray)


hist_full = cv2.calcHist([img], [0], None, [256], [0, 256])
hist_mask = cv2.calcHist([img], [0], mask, [256], [0, 256])


plt.subplot(221), plt.imshow(img, 'gray')
plt.subplot(222), plt.imshow(mask, 'gray')
plt.subplot(223), plt.imshow(masked_img, 'gray')
plt.subplot(224), plt.plot(hist_full), plt.plot(hist_mask)
plt.xlim([0, 256])
plt.show()


img = cv2.imread('images/clahe.jpg',0) #0表示灰度图 #clahe
plt.hist(img.ravel(),256); 
plt.show()


equ = cv2.equalizeHist(img) 
plt.hist(equ.ravel(),256)
plt.show()


res = np.hstack((img,equ))
plt.imshow(res,cmap='gray')


clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) 


res_clahe = clahe.apply(img)
res = np.hstack((img,equ,res_clahe))
plt.imshow(res,cmap='gray')


# 模板匹配
img = cv2.imread('images/lena.jpg', 0)
template = cv2.imread('images/face.jpg', 0)
h, w = template.shape[:2] 


img.shape


template.shape


methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
           'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']


res = cv2.matchTemplate(img, template, cv2.TM_SQDIFF)
res.shape


min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)


min_val


max_val


min_loc


max_loc


for meth in methods:
    img2 = img.copy()

    # 匹配方法的真值
    method = eval(meth)
    print (meth)
    res = cv2.matchTemplate(img, template, method)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    # 如果是平方差匹配TM_SQDIFF或归一化平方差匹配TM_SQDIFF_NORMED，取最小值
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)

    # 画矩形
    cv2.rectangle(img2, top_left, bottom_right, 255, 2)

    plt.subplot(121), plt.imshow(res, cmap='gray')
    plt.xticks([]), plt.yticks([])  # 隐藏坐标轴
    plt.subplot(122), plt.imshow(img2, cmap='gray')
    plt.xticks([]), plt.yticks([])
    plt.suptitle(meth)
    plt.show()


img_rgb = cv2.imread('images/mario.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('images/mario_coin.jpg', 0)
h, w = template.shape[:2]

res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.8
# 取匹配程度大于%80的坐标
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):  # *号表示可选参数
    bottom_right = (pt[0] + w, pt[1] + h)
    cv2.rectangle(img_rgb, pt, bottom_right, (0, 0, 255), 2)

plt.imshow(img_rgb[:,:,[2,1,0]])
