from __future__ import print_function
import cv2 as cv
from matplotlib import pyplot as plt
import numpy as np
import time
import argparse
import glob
import math

# s_1_人脸检测
# face_cascade = cv.CascadeClassifier('haarcascade_frontalface_alt.xml')
# eyes_cascade = cv.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')
# cap = cv.VideoCapture(0)
# while(1):
#     # 读取帧
#     _, frame = cap.read()
#     frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
#     frame_gray = cv.equalizeHist(frame_gray)
#     #-- 检测面部
#     faces = face_cascade.detectMultiScale(frame_gray)
#     for (x,y,w,h) in faces:
#         center = (x + w//2, y + h//2)
#         frame = cv.ellipse(frame, center, (w//2, h//2), 0, 0, 360, (255, 0, 255), 4)
#         faceROI = frame_gray[y:y+h,x:x+w]
#         #-- 在每张面部上检测眼睛
#         eyes = eyes_cascade.detectMultiScale(faceROI)
#         for (x2,y2,w2,h2) in eyes:
#             eye_center = (x + x2 + w2//2, y + y2 + h2//2)
#             radius = int(round((w2 + h2)*0.25))
#             frame = cv.circle(frame, eye_center, radius, (255, 0, 0 ), 4)

#     cv.imshow('frame',frame)
    
#     k = cv.waitKey(5) & 0xFF
#     if k == 27:
#         break
# cv.destroyAllWindows()


# 9_1_图像去噪

# img = cv.imread('IMG_3829.jpg')
# dst = cv.fastNlMeansDenoisingColored(img,None,10,10,7,21)
# plt.subplot(121),plt.imshow(img)
# plt.subplot(122),plt.imshow(dst)
# plt.show()

# cap = cv.VideoCapture(0)
# # 创建5个帧的列表
# img = [cap.read()[1] for i in range(5)]
# # 将所有转化为灰度
# gray = [cv.cvtColor(i, cv.COLOR_BGR2GRAY) for i in img]
# # 将所有转化为float64
# gray = [np.float64(i) for i in gray]
# # 创建方差为25的噪声
# noise = np.random.randn(*gray[1].shape)*10
# # 在图像上添加噪声
# noisy = [i+noise for i in gray]
# # 转化为unit8
# noisy = [np.uint8(np.clip(i,0,255)) for i in noisy]
# # 对第三帧进行降噪
# dst = cv.fastNlMeansDenoisingMulti(noisy, 2, 5, None, 4, 7, 35)
# plt.subplot(131),plt.imshow(gray[2],'gray')
# plt.subplot(132),plt.imshow(noisy[2],'gray')
# plt.subplot(133),plt.imshow(dst,'gray')
# plt.show()

# 8_6_OpenCV中的K均值
# X = np.random.randint(25,50,(25,2))
# Y = np.random.randint(60,85,(25,2))
# Z = np.vstack((X,Y))
# # 将数据转换未 np.float32
# Z = np.float32(Z)
# # 定义停止标准，应用K均值
# criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# ret,label,center=cv.kmeans(Z,2,None,criteria,10,cv.KMEANS_RANDOM_CENTERS)
# # 现在分离数据, Note the flatten()
# A = Z[label.ravel()==0]
# B = Z[label.ravel()==1]
# # 绘制数据
# plt.scatter(A[:,0],A[:,1])
# plt.scatter(B[:,0],B[:,1],c = 'r')
# plt.scatter(center[:,0],center[:,1],s = 80,c = 'y', marker = 's')
# plt.xlabel('Height'),plt.ylabel('Weight')
# plt.show()

# 3.颜色量化
# img = cv.imread('IMG_3829.jpg')
# Z = img.reshape((-1,3))
# Z = np.float32(Z)
# # 定义终止标准 聚类数并应用k均值
# criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# K = 3
# ret,label,center=cv.kmeans(Z,K,None,criteria,10,cv.KMEANS_RANDOM_CENTERS)
# center = np.uint8(center)
# res = center[label.flatten()]
# res2 = res.reshape((img.shape))
# cv.imshow('res2',res2)
# cv.waitKey(0)
# cv.destroyAllWindows()


# 8_2_使用OCR手写数据集运行KNN
# img = cv.imread('digits.png')
# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# # 现在我们将图像分割为5000个单元格，每个单元格为20x20
# cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
# x = np.array(cells)
# # 现在我们准备train_data和test_data。
# train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# # 为训练和测试数据创建标签
# k = np.arange(10)
# train_labels = np.repeat(k,250)[:,np.newaxis]
# test_labels = train_labels.copy()
# # 初始化kNN，训练数据，然后使用k = 1的测试数据对其进行测试
# knn = cv.ml.KNearest_create()
# knn.train(train, cv.ml.ROW_SAMPLE, train_labels)
# ret,result,neighbours,dist = knn.findNearest(test,k=5)
# # 现在，我们检查分类的准确性
# #为此，将结果与test_labels进行比较，并检查哪个错误
# matches = result==test_labels
# correct = np.count_nonzero(matches)
# accuracy = correct*100.0/result.size
# print( accuracy )

# 8_1_理解KNN
# 包含(x,y)值的25个已知/训练数据的特征集
# trainData  = np.random.randint(0, 100, (25, 2)).astype(np.float32)
# # 用数字0和1分别标记红色或蓝色
# responses = np.random.randint(0, 2, (25, 1)).astype(np.float32)
# # 取红色族并绘图
# red = trainData[responses.ravel() == 0]
# plt.scatter(red[:,0],red[:,1],80,'r','^')
# # 取蓝色族并绘图
# blue = trainData[responses.ravel()==1]
# plt.scatter(blue[:,0],blue[:,1],80,'b','s')

# newcomer = np.random.randint(0,100,(3,2)).astype(np.float32)
# plt.scatter(newcomer[:,0],newcomer[:,1],80,'g','o')
# knn = cv.ml.KNearest_create()
# knn.train(trainData, cv.ml.ROW_SAMPLE, responses)
# ret, results, neighbours ,dist = knn.findNearest(newcomer, 3)
# print( "ret:  {}\n".format(ret) )
# print( "result:  {}\n".format(results) )
# print( "neighbours:  {}\n".format(neighbours) )
# print( "distance:  {}\n".format(dist) )
# plt.show()


# 7_2_姿态估计
# cap = cv.VideoCapture(0)
# img = cv.imread('ca.jpg')

# criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# objp = np.zeros((6*7,3), np.float32)
# objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
# while(1):
#     # ret, img = cap.read()
#     gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
#     ret, corners = cv.findChessboardCorners(gray, (7,6),None)
#     if ret == True:
#         corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
#         # 找到旋转和平移矢量。
#         ret,rvecs, tvecs = cv.solvePnP(objp, corners2, mtx, dist)
#         # 将3D点投影到图像平面
#         imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist)
#         img = draw(img,corners2,imgpts)
#     cv.imshow('img',img)
#     k = cv.waitKey(30) & 0xff
#     if k == 27:
#         break

# cv.waitKey(0)
# cv.destroyAllWindows()

# 6_3_光流
# cap = cv.VideoCapture(0)
# ret, frame1 = cap.read()
# prvs = cv.cvtColor(frame1,cv.COLOR_BGR2GRAY)
# hsv = np.zeros_like(frame1)
# hsv[...,1] = 255
# while(1):
#     ret, frame2 = cap.read()
#     next = cv.cvtColor(frame2,cv.COLOR_BGR2GRAY)
#     flow = cv.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
#     mag, ang = cv.cartToPolar(flow[...,0], flow[...,1])
#     hsv[...,0] = ang*180/np.pi/2
#     hsv[...,2] = cv.normalize(mag,None,0,255,cv.NORM_MINMAX)
#     bgr = cv.cvtColor(hsv,cv.COLOR_HSV2BGR)
#     cv.imshow('frame2',bgr)
#     k = cv.waitKey(30) & 0xff
#     if k == 27:
#         break
#     prvs = next

# cap = cv.VideoCapture(0)
# # 用于ShiTomasi拐点检测的参数
# feature_params = dict( maxCorners = 100,
#                        qualityLevel = 0.3,
#                        minDistance = 7,
#                        blockSize = 7 )
# # lucas kanade光流参数
# lk_params = dict( winSize  = (15,15),
#                   maxLevel = 2,
#                   criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
# # 创建一些随机的颜色
# color = np.random.randint(0,255,(100,3))
# # 拍摄第一帧并在其中找到拐角
# ret, old_frame = cap.read()
# old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
# p0 = cv.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# # 创建用于作图的掩码图像
# mask = np.zeros_like(old_frame)
# while(1):
#     ret,frame = cap.read()
#     frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
#     # 计算光流
#     p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
#     # 选择良好点
#     good_new = p1[st==1]
#     good_old = p0[st==1]
#     # 绘制跟踪
#     for i,(new,old) in enumerate(zip(good_new, good_old)):
#         a,b = new.ravel()
#         c,d = old.ravel()
#         mask = cv.line(mask, (a,b),(c,d), color[i].tolist(), 2)
#         frame = cv.circle(frame,(a,b),5,color[i].tolist(),-1)
#     img = cv.add(frame,mask)
#     cv.imshow('frame',img)
#     k = cv.waitKey(30) & 0xff
#     if k == 27:
#         break
#     # 现在更新之前的帧和点
#     old_gray = frame_gray.copy()
#     p0 = good_new.reshape(-1,1,2)

# 6_2_Meanshift和Camshift
# cap = cv.VideoCapture(0)
# # 视频的第一帧
# ret,frame = cap.read()
# # 设置窗口的初始位置
# x, y, w, h = 300, 200, 100, 50 # simply hardcoded the values
# track_window = (x, y, w, h)
# # 设置初始ROI来追踪
# roi = frame[y:y+h, x:x+w]
# hsv_roi =  cv.cvtColor(roi, cv.COLOR_BGR2HSV)
# mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
# roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180])
# cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)
# # 设置终止条件，可以是10次迭代，也可以至少移动1 pt
# term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
# while(1):
#     ret, frame = cap.read()
#     if ret == True:
#         hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
#         dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1)
#         # 应用meanshift来获取新位置
#         ret, track_window = cv.meanShift(dst, track_window, term_crit)
#         ret, track_window = cv.CamShift(dst, track_window, term_crit)

#         # 在图像上绘制
#         x,y,w,h = track_window
#         img2 = cv.rectangle(frame, (x,y), (x+w,y+h), 255,2)
#         cv.imshow('img2',img2)
#         k = cv.waitKey(30) & 0xff
#         if k == 27:
#             break
#     else:
#         break

# 6_1_如何使用背景分离方法
# backSub = cv.createBackgroundSubtractorMOG2()
# backSub = cv.createBackgroundSubtractorKNN()
# capture = cv.VideoCapture(0)
# if not capture.isOpened:
#     print('Unable to open: ')
#     exit(0)
# while True:
#     ret, frame = capture.read()
#     if frame is None:
#         break

#     fgMask = backSub.apply(frame)


#     cv.rectangle(frame, (10, 2), (100,20), (255,255,255), -1)
#     cv.putText(frame, str(capture.get(cv.CAP_PROP_POS_FRAMES)), (15, 15),
#                cv.FONT_HERSHEY_SIMPLEX, 0.5 , (0,0,0))

#     # cv.imshow('Frame', frame)
#     cv.imshow('FG Mask', fgMask)

#     keyboard = cv.waitKey(30)
#     if keyboard == 'q' or keyboard == 27:
#         break

# 5_8_ORB（定向快速和旋转简要）
# img = cv.imread('IMG_3829.jpg',0)
# # 初始化ORB检测器
# orb = cv.ORB_create()
# # 用ORB寻找关键点
# kp = orb.detect(img,None)
# # 用ORB计算描述符
# kp, des = orb.compute(img, kp)
# # 仅绘制关键点的位置，而不绘制大小和方向
# img2 = cv.drawKeypoints(img, kp, None, color=(0,255,0), flags=0)
# plt.imshow(img2), plt.show()

# 5_7_BRIEF（二进制的鲁棒独立基本特征）
# img = cv.imread('IMG_3829.jpg',0)
# # 初始化FAST检测器
# star = cv.xfeatures2d.StarDetector_create()
# # 初始化BRIEF提取器
# brief = cv.xfeatures2d.BriefDescriptorExtractor_create()
# # 找到STAR的关键点
# kp = star.detect(img,None)
# # 计算BRIEF的描述符
# kp, des = brief.compute(img, kp)
# print( brief.descriptorSize() )
# print( des.shape )
# img2 = cv.drawKeypoints(img, kp, None, color=(255,0,0))

# plt.subplot(121),plt.imshow(img2,cmap = 'gray')
# plt.title('BG'), plt.xticks([]), plt.yticks([])
# plt.show()

# 5_6_用于角点检测的FAST算法
# img = cv.imread('IMG_3829.jpg',0)
# # 用默认值初始化FAST对象
# fast = cv.FastFeatureDetector_create()
# # 寻找并绘制关键点
# kp = fast.detect(img,None)
# img2 = cv.drawKeypoints(img, kp, None, color=(255,0,0))
# # 打印所有默认参数
# print( "Threshold: {}".format(fast.getThreshold()) )
# print( "nonmaxSuppression:{}".format(fast.getNonmaxSuppression()) )
# print( "neighborhood: {}".format(fast.getType()) )
# print( "Total Keypoints with nonmaxSuppression: {}".format(len(kp)) )
# # 关闭非极大抑制
# fast.setNonmaxSuppression(0)
# kp = fast.detect(img,None)
# print( "Total Keypoints without nonmaxSuppression: {}".format(len(kp)) )
# img3 = cv.drawKeypoints(img, kp, None, color=(255,0,0))

# plt.subplot(121),plt.imshow(img2,cmap = 'gray')
# plt.title('BG'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(img3,cmap = 'gray')
# plt.title('FG'), plt.xticks([]), plt.yticks([])
# plt.show()

# 5_4_SIFT & 5_5_SURF
# img = cv.imread('IMG_3829.jpg')
# gray= cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# sift = cv.xfeatures2d.SURF_create() # 由于专利原因 4.0 以上版本未编译进来
# # kp = sift.detect(gray,None)
# kp, des = sift.detectAndCompute(gray,None)
# img=cv.drawKeypoints(gray,kp,img)
# plt.imshow(img),plt.show()

# 5_3_Shi-Tomasi拐角探测器和良好的跟踪功能
# img = cv.imread('IMG_3829.jpg')
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# corners = cv.goodFeaturesToTrack(gray,10,0.01,10)
# corners = np.int0(corners)
# for i in corners:
#     x,y = i.ravel()
#     cv.circle(img,(x,y),3,255,-1)
# plt.imshow(img),plt.show()

#5_2 哈里斯角检测
# img = cv.imread('IMG_3829.jpg')
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# gray = np.float32(gray)
# dst = cv.cornerHarris(gray,2,3,0.04)
# #result用于标记角点，并不重要
# dst = cv.dilate(dst,None)
# #最佳值的阈值，它可能因图像而异。
# img[dst>0.01*dst.max()]=[0,0,255]
# cv.imshow('dst',img)
# if cv.waitKey(0) & 0xff == 27:
#     cv.destroyAllWindows()

#SubPixel精度的转角
# img = cv.imread('IMG_3829.jpg')
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# # 寻找哈里斯角
# gray = np.float32(gray)
# dst = cv.cornerHarris(gray,2,3,0.04)
# dst = cv.dilate(dst,None)
# ret, dst = cv.threshold(dst,0.01*dst.max(),255,0)
# dst = np.uint8(dst)
# # 寻找质心
# ret, labels, stats, centroids = cv.connectedComponentsWithStats(dst)
# # 定义停止和完善拐角的条件
# criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.001)
# corners = cv.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria)
# # 绘制
# res = np.hstack((centroids,corners))
# res = np.int0(res)
# img[res[:,1],res[:,0]]=[0,0,255]
# img[res[:,3],res[:,2]] = [0,255,0]
# cv.imshow('dst',img)
# if cv.waitKey(0) & 0xff == 27:
#     cv.destroyAllWindows()

# 4_15_图像分割与分水岭算法
# img = cv.imread('coins.jpg')
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
# # cv.imshow('detected circles',thresh)
# # cv.waitKey(0)
# # cv.destroyAllWindows()
# # 噪声去除
# kernel = np.ones((3,3),np.uint8)
# opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations = 2)
# # 确定背景区域
# sure_bg = cv.dilate(opening,kernel,iterations=3)
# # 寻找前景区域
# dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
# ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# # 找到未知区域
# sure_fg = np.uint8(sure_fg)
# unknown = cv.subtract(sure_bg,sure_fg)
# # 类别标记
# ret, markers = cv.connectedComponents(sure_fg)
# # 为所有的标记加1，保证背景是0而不是1
# markers = markers+1
# # 现在让所有的未知区域为0
# markers[unknown==255] = 0
# markers = cv.watershed(img,markers) 
# img[markers == -1] = [255,0,0]

# plt.subplot(121),plt.imshow(sure_bg,cmap = 'gray')
# plt.title('BG'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(sure_fg,cmap = 'gray')
# plt.title('FG'), plt.xticks([]), plt.yticks([])
# plt.subplot(131),plt.imshow(img,cmap = 'gray')
# plt.title('IMG'), plt.xticks([]), plt.yticks([])
# plt.show()

# 4_14_霍夫圈变换
# import numpy as np
# import cv2 as cv
# img = cv.imread('IMG_3829.jpg',0)
# img = cv.medianBlur(img,5)
# cimg = cv.cvtColor(img,cv.COLOR_GRAY2BGR)
# circles = cv.HoughCircles(img,cv.HOUGH_GRADIENT,1,20,
#                             param1=50,param2=30,minRadius=10,maxRadius=60)
# circles = np.uint16(np.around(circles))
# for i in circles[0,:]:
#     # 绘制外圆
#     cv.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
#     # 绘制圆心
#     cv.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
# cv.imshow('detected circles',cimg)
# cv.waitKey(0)
# cv.destroyAllWindows()

# 4_13_霍夫线变换
# img = cv.imread(cv.samples.findFile('IMG_3829.jpg'))
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# edges = cv.Canny(gray,50,150,apertureSize = 3)
# lines = cv.HoughLines(edges,1,np.pi/180,200)
# lines = cv.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)
# for line in lines:
#     x1,y1,x2,y2 = line[0]
#     cv.line(img,(x1,y1),(x2,y2),(0,255,0),2)


# for line in lines:
#     rho,theta = line[0]
#     a = np.cos(theta)
#     b = np.sin(theta)
#     x0 = a*rho
#     y0 = b*rho
#     x1 = int(x0 + 1000*(-b))
#     y1 = int(y0 + 1000*(a))
#     x2 = int(x0 - 1000*(-b))
#     y2 = int(y0 - 1000*(a))
#     cv.line(img,(x1,y1),(x2,y2),(0,0,255),2)
#     cv.imshow('aaa', img)
#     if cv.waitKey(20) & 0xFF == 27:
#         break
    
# cv.waitKey(0)
# cv.destroyAllWindows()

#4_12_模板匹配
# import cv2 as cv
# import numpy as np
# from matplotlib import pyplot as plt
# img = cv.imread('IMG_3829.jpg',0)
# img2 = img.copy()
# template = img[100:300, 100:400]
# w, h = template.shape[::-1]
# # 列表中所有的6种比较方法
# methods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR',
#             'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED']
# for meth in methods:
#     img = img2.copy()
#     method = eval(meth)
#     # 应用模板匹配
#     res = cv.matchTemplate(img,template,method)
#     min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
#     # 如果方法是TM_SQDIFF或TM_SQDIFF_NORMED，则取最小值
#     if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
#         top_left = min_loc
#     else:
#         top_left = max_loc
#     bottom_right = (top_left[0] + w, top_left[1] + h)
#     cv.rectangle(img,top_left, bottom_right, 255, 2)
#     plt.subplot(121),plt.imshow(res,cmap = 'gray')
#     plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
#     plt.subplot(122),plt.imshow(img,cmap = 'gray')
#     plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
#     plt.suptitle(meth)
#     plt.show()

#4_11_傅里叶变换
#为什么拉普拉斯算子是高通滤波器？
# 没有缩放参数的简单均值滤波器
# mean_filter = np.ones((3,3))
# # 创建高斯滤波器
# x = cv.getGaussianKernel(5,10)
# gaussian = x*x.T
# # 不同的边缘检测滤波器
# # x方向上的scharr
# scharr = np.array([[-3, 0, 3],
#                    [-10,0,10],
#                    [-3, 0, 3]])
# # x方向上的sobel
# sobel_x= np.array([[-1, 0, 1],
#                    [-2, 0, 2],
#                    [-1, 0, 1]])
# # y方向上的sobel
# sobel_y= np.array([[-1,-2,-1],
#                    [0, 0, 0],
#                    [1, 2, 1]])
# # 拉普拉斯变换
# laplacian=np.array([[0, 1, 0],
#                     [1,-4, 1],
#                     [0, 1, 0]])
# filters = [mean_filter, gaussian, laplacian, sobel_x, sobel_y, scharr]
# filter_name = ['mean_filter', 'gaussian','laplacian', 'sobel_x', \
#                 'sobel_y', 'scharr_x']
# fft_filters = [np.fft.fft2(x) for x in filters]
# fft_shift = [np.fft.fftshift(y) for y in fft_filters]
# mag_spectrum = [np.log(np.abs(z)+1) for z in fft_shift]
# for i in range(6):
#     plt.subplot(2,3,i+1),plt.imshow(mag_spectrum[i],cmap = 'gray')
#     plt.title(filter_name[i]), plt.xticks([]), plt.yticks([])
# plt.show()

# img = cv.imread('IMG_3829.jpg',0)
# dft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT)
# dft_shift = np.fft.fftshift(dft)
# magnitude_spectrum = 20*np.log(cv.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))

# rows, cols = img.shape
# crow,ccol = int(rows/2) , int(cols/2)
# # 首先创建一个掩码，中心正方形为1，其余全为零
# mask = np.zeros((rows,cols,2),np.uint8)
# mask[crow-30:crow+30, ccol-30:ccol+30] = 1
# # 应用掩码和逆DFT
# fshift = dft_shift*mask
# f_ishift = np.fft.ifftshift(fshift)
# img_back = cv.idft(f_ishift)
# img_back = cv.magnitude(img_back[:,:,0],img_back[:,:,1])
# img_back = np.real(img_back)
# plt.subplot(121),plt.imshow(img, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
# plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
# plt.show()

# img = cv.imread('IMG_3829.jpg',0)
# f = np.fft.fft2(img)
# fshift = np.fft.fftshift(f)
# rows, cols = img.shape
# crow,ccol = int(rows/2), int(cols/2)
# fshift[crow-20:crow+21, ccol-20:ccol+21] = 0
# f_ishift = np.fft.ifftshift(fshift)
# img_back = np.fft.ifft2(f_ishift)
# img_back = np.real(img_back)

# fshift4 = np.fft.fftshift(f)
# fshift4[crow-40:crow+41, ccol-40:ccol+41] = 0
# f_ishift4 = np.fft.ifftshift(fshift4)
# img_back4 = np.fft.ifft2(f_ishift4)
# img_back4 = np.real(img_back4)

# plt.subplot(121),plt.imshow(img_back, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(img_back4, cmap = 'gray')
# plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
# plt.show()

# 4_10_4_直方图-4：直方图反投影
# roi = cv.imread('5.png')
# hsv = cv.cvtColor(roi,cv.COLOR_BGR2HSV)
# target = cv.imread('5.png')
# target = target[0:340, 0:390]
# hsvt = cv.cvtColor(target,cv.COLOR_BGR2HSV)
# # 计算对象的直方图
# roihist = cv.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
# # 直方图归一化并利用反传算法
# cv.normalize(roihist,roihist,0,255,cv.NORM_MINMAX)
# dst = cv.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
# # 用圆盘进行卷积
# disc = cv.getStructuringElement(cv.MORPH_ELLIPSE,(5,5))
# cv.filter2D(dst,-1,disc,dst)
# # 应用阈值作与操作
# ret,thresh = cv.threshold(dst,50,255,0)
# thresh = cv.merge((thresh,thresh,thresh))
# res = cv.bitwise_and(target,thresh)
# # res = np.vstack((target,thresh,res))
# cv.imshow('sss', res)
# cv.waitKey(0)
# cv.destroyAllWindows()

# 4_10_3_直方图3：二维直方图
# img = cv.imread('5.png')
# hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
# hist = cv.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
# plt.imshow(hist,interpolation = 'nearest')
# plt.show()

#4_10_2_直方图-2：直方图均衡
# img = cv.imread('IMG_3829.jpg',0)
# hist,bins = np.histogram(img.flatten(),256,[0,256])
# cdf = hist.cumsum()
# cdf_normalized = cdf * float(hist.max()) / cdf.max()

# cdf_m = np.ma.masked_equal(cdf,0)
# cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
# cdf = np.ma.filled(cdf_m,0).astype('uint8')
# img2 = cdf[img] 
# img3 = cdf_normalized[img] 
# img4 = cv.equalizeHist(img)

# clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# img5 = clahe.apply(img)

# plt.plot(cdf_normalized, color = 'b')
# plt.hist(img.flatten(),256,[0,256], color = 'r')
# plt.xlim([0,256])
# plt.legend(('cdf','histogram'), loc = 'upper left')
# plt.show()
# plt.subplot(221), plt.imshow(img, 'gray')
# plt.subplot(222), plt.imshow(img2,'gray')
# plt.subplot(223), plt.imshow(img3,'gray')
# plt.subplot(224), plt.imshow(img4,'gray')
# plt.subplot(222), plt.imshow(img5,'gray')
# edges = cv.Canny(img,100,200)
# plt.subplot(222), plt.imshow(edges,'gray')

# plt.show()

#4_10_1_直方图-1：查找，绘制，分析
# img = cv.imread('5.png')
# plt.hist(img.ravel(),256,[0,256]); plt.show()

# hist = cv.calcHist([img],[0],None,[256],[0,256])
# hist,bins = np.histogram(img.ravel(),256,[0,256])
# hist = np.bincount(img.ravel()，minlength = 256)

# hist = cv.calcHist([img],[0],None,[256],[0,256])
# plt.plot(hist, color='b')
# hist = cv.calcHist([img],[1],None,[256],[0,256])
# plt.plot(hist, color='g')
# hist = cv.calcHist([img],[2],None,[256],[0,256])
# plt.plot(hist, color='r')
# plt.show()

# img = cv.imread('IMG_3829.jpg',0)
# # create a mask
# mask = np.zeros(img.shape[:2], np.uint8)
# mask[100:300, 100:400] = 255
# masked_img = cv.bitwise_and(img,img,mask = mask)
# # 计算掩码区域和非掩码区域的直方图
# # 检查作为掩码的第三个参数
# hist_full = cv.calcHist([img],[0],None,[256],[0,256])
# hist_mask = cv.calcHist([img],[0],mask,[256],[0,256])
# plt.subplot(221), plt.imshow(img, 'gray')
# plt.subplot(222), plt.imshow(mask,'gray')
# plt.subplot(223), plt.imshow(masked_img, 'gray')
# plt.subplot(224), plt.plot(hist_full), plt.plot(hist_mask)
# plt.xlim([0,256])
# plt.show()

# 4_9 边缘检测
# 凸性缺陷
# img = cv.imread('25.png')
# img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# ret,thresh = cv.threshold(img_gray, 127, 255,0)
# contours,hierarchy = cv.findContours(thresh,2,1)
# cnt = contours[0]
# hull = cv.convexHull(cnt,returnPoints = False)
# defects = cv.convexityDefects(cnt,hull)
# for i in range(defects.shape[0]):
#     s,e,f,d = defects[i,0]
#     start = tuple(cnt[s][0])
#     end = tuple(cnt[e][0])
#     far = tuple(cnt[f][0])
#     cv.line(img,start,end,[0,255,0],2)
#     cv.circle(img,far,5,[0,0,255],-1)
# cv.imshow('img',img)
# cv.waitKey(0)
# cv.destroyAllWindows()

# 4_9_1_OpenCV中的轮廓
# im = cv.imread('25.png')
# imgray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
# ret, thresh = cv.threshold(imgray, 127, 255, 0)
# contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)

# cnt = contours[0]
# epsilon = 0.01*cv.arcLength(cnt,True) 
# approx = cv.approxPolyDP(cnt,epsilon,True)

# cv.drawContours(im, approx, -1, (0,255,0), 3)

# x,y,w,h = cv.boundingRect(cnt)
# cv.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)

# (x,y),radius = cv.minEnclosingCircle(cnt)
# center = (int(x),int(y))
# radius = int(radius)
# cv.circle(im,center,radius,(0,255,0),2)

# cv.imshow('img', im)
# cv.waitKey(0)
# cv.destroyAllWindows()

# plt.subplot(121),plt.imshow(thresh,cmap = 'gray')
# plt.title('Original Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(hierarchy,cmap = 'gray')
# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
# plt.show()


# 4_7_Canny Edge检测
# img = cv.imread('IMG_3829.jpg',0)
# edges = cv.Canny(img,100,200)
# plt.subplot(121),plt.imshow(img,cmap = 'gray')
# plt.title('Original Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(edges,cmap = 'gray')
# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
# plt.show()

# 4_6_图像梯度
# img = cv.imread('IMG_3829.jpg',0)
# laplacian = cv.Laplacian(img,cv.CV_64F)
# sobelx = cv.Sobel(img,cv.CV_64F,1,0,ksize=5)
# sobely = cv.Sobel(img,cv.CV_64F,0,1,ksize=5)
# plt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')
# plt.title('Original'), plt.xticks([]), plt.yticks([])
# plt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')
# plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
# plt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')
# plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
# plt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')
# plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
# plt.show()
# 在我们的最后一个示例中，输出数据类型为cv.CV_8U或np.uint8。但这有一个小问题。黑色到白色的过渡被视为正斜率（具有正值），而白色到黑色的过渡被视为负斜率（具有负值）。因此，当您将数据转换为np.uint8时，所有负斜率均​​设为零。简而言之，您会错过这一边缘信息。
# 如果要检测两个边缘，更好的选择是将输出数据类型保留为更高的形式，例如cv.CV_16S，cv.CV_64F等，取其绝对值，然后转换回cv.CV_8U。 下面的代码演示了用于水平Sobel滤波器和结果差异的此过程。
# img = cv.imread('IMG_3829.jpg',0)
# # Output dtype = cv.CV_8U
# sobelx8u = cv.Sobel(img,cv.CV_8U,1,0,ksize=5)
# # Output dtype = cv.CV_64F. Then take its absolute and convert to cv.CV_8U
# sobelx64f = cv.Sobel(img,cv.CV_64F,1,0,ksize=5)
# abs_sobel64f = np.absolute(sobelx64f)
# sobel_8u = np.uint8(abs_sobel64f)
# plt.subplot(1,3,1),plt.imshow(img,cmap = 'gray')
# plt.title('Original'), plt.xticks([]), plt.yticks([])
# plt.subplot(1,3,2),plt.imshow(sobelx8u,cmap = 'gray')
# plt.title('Sobel CV_8U'), plt.xticks([]), plt.yticks([])
# plt.subplot(1,3,3),plt.imshow(sobel_8u,cmap = 'gray')
# plt.title('Sobel abs(CV_64F)'), plt.xticks([]), plt.yticks([])
# plt.show()


#4_5_形态转换
# img = cv.imread('IMG_3829.jpg', 0)
# kernel = np.ones((5,5),np.uint8)
# erosion = cv.erode(img, kernel, iterations = 1) #侵蚀
# erosion = cv.dilate(img,kernel,iterations = 1) #扩张
# erosion = cv.morphologyEx(img, cv.MORPH_OPEN, kernel)  #开运算
# erosion = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel)  #闭运算
# erosion = cv.morphologyEx(img, cv.MORPH_GRADIENT, kernel) #图像扩张和侵蚀之间的差别
# erosion = cv.morphologyEx(img, cv.MORPH_TOPHAT, kernel)  #输入图像和图像开运算之差
# blackhat = cv.morphologyEx(img, cv.MORPH_BLACKHAT, kernel) #输入图像和图像闭运算之差
# cv.imshow('ddd', erosion)
# cv.waitKey(0)
# cv.destroyAllWindows()
# plt.subplot(121),plt.imshow(img),plt.title('Original')
# plt.subplot(122),plt.imshow(erosion),plt.title('Blurred')
# plt.show()

# 4_4_图像平滑
# img = cv.imread('IMG_3829.jpg')
# blur = cv.blur(img,(5,5)) #平均模糊
# blur = cv.GaussianBlur(img,(5,5), 0) #高斯模糊
# blur = cv.medianBlur(img,5) #中位模糊
# blur = cv.bilateralFilter(img,9,75,75) #双边滤波

# plt.subplot(121),plt.imshow(img),plt.title('Original')
# # plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(blur),plt.title('Blurred')
# # plt.xticks([]), plt.yticks([])
# plt.show()

# 2D卷积（图像过滤）
# img = cv.imread('IMG_3829.jpg')
# kernel = np.ones((10,10),np.float32)/100
# dst = cv.filter2D(img,-1,kernel)
# plt.subplot(121),plt.imshow(img),plt.title('Original')
# plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(dst),plt.title('Averaging')
# plt.xticks([]), plt.yticks([])
# plt.show()

#4_3
# img = cv.imread('IMG_3829.jpg',0)
# # 全局阈值
# ret1,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
# # Otsu阈值
# ret2,th2 = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# # 高斯滤波后再采用Otsu阈值
# blur = cv.GaussianBlur(img,(5,5),0)
# ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
# # 绘制所有图像及其直方图
# images = [img, 0, th1,
#           img, 0, th2,
#           blur, 0, th3]
# titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
#           'Original Noisy Image','Histogram',"Otsu's Thresholding",
#           'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
# for i in range(3):
#     plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
#     plt.title(titles[i*3]), plt.xticks([]), plt.yticks([])
#     plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
#     plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([])
#     plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
#     plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([])
# plt.show()

# img = cv.imread('IMG_3829.jpg',0)
# thresh1 = cv.adaptiveThreshold(img,127,cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 21, 10)
# thresh2 = cv.adaptiveThreshold(img,127,cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 21, 10)
# titles = ['Original Image','ADAPTIVE_THRESH_MEAN_C','ADAPTIVE_THRESH_GAUSSIAN_C']
# images = [img, thresh1, thresh2]
# for i in range(3):
#     plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
#     plt.title(titles[i])
#     plt.xticks([]),plt.yticks([])
# plt.show()

# img = cv.imread('IMG_3829.jpg',0)
# ret,thresh1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
# ret,thresh2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV)
# ret,thresh3 = cv.threshold(img,127,255,cv.THRESH_TRUNC)
# ret,thresh4 = cv.threshold(img,127,255,cv.THRESH_TOZERO)
# ret,thresh5 = cv.threshold(img,127,255,cv.THRESH_TOZERO_INV)
# titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
# images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
# for i in range(6):
#     plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
#     plt.title(titles[i])
#     plt.xticks([]),plt.yticks([])
# plt.show()

#4_2
#仿射变换
# img = cv.imread('IMG_3829.jpg')
# rows, cols = img.shape[:2]
# pts1 = np.float32([[50,50],[200,50],[50,200]])
# pts2 = np.float32([[10,100],[200,50],[100,250]])
# M = cv.getAffineTransform(pts1,pts2)
# dst = cv.warpAffine(img,M,(cols,rows))
# plt.subplot(121),plt.imshow(img),plt.title('Input')
# plt.subplot(122),plt.imshow(dst),plt.title('Output')
# plt.show()
# cv.waitKey(0)
# cv.destroyAllWindows()

#旋转
# img = cv.imread('IMG_3829.jpg')
# rows, cols = img.shape[:2]
# # cols-1 和 rows-1 是坐标限制
# M = cv.getRotationMatrix2D(((cols-1)/2, (rows-1)/2), 90, 0.5)
# print(M)
# dst = cv.warpAffine(img, M, (cols, rows))
# cv.imshow('img',dst)
# cv.waitKey(0)
# cv.destroyAllWindows()

# 移动
# img = cv.imread('IMG_3829.jpg')
# rows, cols = img.shape[:2]
# M = np.float32([[1,0,100],[0,1,50]])
# print(M)
# dst = cv.warpAffine(img,M,(cols,rows))
# cv.imshow('img',dst)
# cv.waitKey(0)
# cv.destroyAllWindows()

# 缩放
# img = cv.imread('IMG_3829.jpg')
# rows, cols = img.shape[:2]
# print(img.shape, rows, cols)
# res = cv.resize(img, (600, 400), interpolation = cv.INTER_CUBIC)
# cv.imshow('res', res)
# cv.waitKey(0)
# cv.destroyAllWindows()

#4_1
# cap = cv.VideoCapture(0)
# while (1):
#     # 读取帧
#     _, frame = cap.read()
#     # 转换颜色空间 BGR 到 HSV
#     hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
#     # 定义HSV中蓝色的范围
#     lower_blue = np.array([106, 43, 46])
#     upper_blue = np.array([130, 255, 255])
#     # 设置HSV的阈值使得只取蓝色
#     mask_B = cv.inRange(hsv, lower_blue, upper_blue)

#     lower_green = np.array([35, 43, 46])
#     upper_green = np.array([77, 255, 255])
#     mask_G = cv.inRange(hsv, lower_green, upper_green)

#     lower_red1 = np.array([0, 43, 46])
#     upper_red1 = np.array([20, 255, 255])
#     mask_R1 = cv.inRange(hsv, lower_red1, upper_red1)

#     lower_red2 = np.array([150, 43, 46])
#     upper_red2 = np.array([180, 255, 255])
#     mask_R2 = cv.inRange(hsv, lower_red2, upper_red2)

#     mask=mask_B+mask_G+mask_R1+mask_R2
#     # 将掩膜和图像逐像素相加
#     res = cv.bitwise_and(frame, frame, mask=mask)

#     cv.imshow('frame', frame)
#     cv.imshow('mask', mask)
#     cv.imshow('res', res)
#     k = cv.waitKey(5) & 0xFF
#     if k == 27:
#         break
# cv.destroyAllWindows()

# cap = cv.VideoCapture(0)
# while(1):
#     # 读取帧
#     _, frame = cap.read()
#     # 转换颜色空间 BGR 到 HSV
#     hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
#     # 定义HSV中蓝色的范围
#     lower_blue = np.array([120,50,50])
#     upper_blue = np.array([130,155,255])
#     # 设置HSV的阈值使得只取蓝色
#     mask = cv.inRange(hsv, lower_blue, upper_blue)
#     # 将掩膜和图像逐像素相加
#     res = cv.bitwise_and(frame,frame, mask= mask)
#     cv.imshow('frame',frame)
#     cv.imshow('mask',mask)
#     cv.imshow('res',res)
#     k = cv.waitKey(5) & 0xFF
#     if k == 27:
#         break
# cv.destroyAllWindows()


# 颜色转换代码
# flags = [i for i in dir(cv) if i.startswith('COLOR_')]
# print( flags )

#3_3
# cv.setUseOptimized(False)
# img1 = cv.imread('IMG_3829.jpg')
# e1 = cv.getTickCount()
# for i in range(5,49,2):
#     img1 = cv.medianBlur(img1,i)
# e2 = cv.getTickCount()
# t = (e2 - e1)/cv.getTickFrequency()
# print( t )

# print(cv.useOptimized())

#3_2
# 加载两张图片
# img2 = cv.imread('IMG_3829.jpg')
# img1 = cv.imread('5.png')
# # 我想把logo放在左上角，所以我创建了ROI
# rows,cols,channels = img2.shape
# roi = img1[0:rows, 0:cols ]
# # 现在创建logo的掩码，并同时创建其相反掩码
# img2gray = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)

# ret, mask = cv.threshold(img2gray, 100, 255, cv.THRESH_TOZERO)
# mask_inv = cv.bitwise_not(mask)
# # 现在将ROI中logo的区域涂黑
# img1_bg = cv.bitwise_and(roi,roi,mask = mask_inv)
# # 仅从logo图像中提取logo区域
# img2_fg = cv.bitwise_and(img2,img2,mask = mask)
# # 将logo放入ROI并修改主图像
# dst = cv.add(img1_bg,img2_fg)
# img1[0:rows, 0:cols ] = dst
# cv.imshow('res',img1)
# cv.waitKey(0)
# cv.destroyAllWindows()

# img1 = cv.imread('IMG_3829.jpg')
# img22 = cv.imread('5.png')
# rows,cols,channels = img1.shape
# img2 = img22[0:rows, 0:cols]
# op = 0
# dir = True
# while(True):
#     dst = cv.addWeighted(img1, op, img2, 1 - op, 0)
#     cv.imshow('dst',dst)
#     if dir:
#         op = op + 0.01
#     else:
#         op = op - 0.01
#     if op > 1:
#         dir = False
#     elif op < 0:
#         dir = True

#     if cv.waitKey(20) & 0xFF == 27:
#         break
# cv.waitKey(0)
# cv.destroyAllWindows()

#3_1
# img = cv.imread('IMG_3829.jpg', -1)
# print(img.shape, img.size, img.dtype)

# ball = img[280:340, 330:390]
# img[273:333, 100:160] = ball 
# cv.imshow('image', img)
# cv.waitKey(0)
# cv.destroyAllWindows()

# b,g,r = cv.split(img)
# print(b, img [:, :, 0])

# img [:, :, 2] = 0
# cv.imshow('image', img)
# cv.waitKey(0)
# cv.destroyAllWindows()

# BLUE = [255,0,0]
# replicate = cv.copyMakeBorder(img,10,10,10,10,cv.BORDER_REPLICATE)
# reflect = cv.copyMakeBorder(img,10,10,10,10,cv.BORDER_REFLECT)
# reflect101 = cv.copyMakeBorder(img,10,10,10,10,cv.BORDER_REFLECT_101)
# wrap = cv.copyMakeBorder(img,10,10,10,10,cv.BORDER_WRAP)
# constant= cv.copyMakeBorder(img,10,10,10,10,cv.BORDER_CONSTANT,value=BLUE)
# plt.subplot(231),plt.imshow(img,'gray'),plt.title('ORIGINAL')
# plt.subplot(232),plt.imshow(replicate,'gray'),plt.title('REPLICATE')
# plt.subplot(233),plt.imshow(reflect,'gray'),plt.title('REFLECT')
# plt.subplot(234),plt.imshow(reflect101,'gray'),plt.title('REFLECT_101')
# plt.subplot(235),plt.imshow(wrap,'gray'),plt.title('WRAP')
# plt.subplot(236),plt.imshow(constant,'gray'),plt.title('CONSTANT')
# plt.show()
# cv.waitKey(0)
# cv.destroyAllWindows()


#2_5
# def nothing(x):
#     pass
# # 创建一个黑色的图像，一个窗口
# img = np.zeros((300,512,3), np.uint8)
# cv.namedWindow('image')
# # 创建颜色变化的轨迹栏
# cv.createTrackbar('R','image',0,255,nothing)
# cv.createTrackbar('G','image',0,255,nothing)
# cv.createTrackbar('B','image',0,255,nothing)
# # 为 ON/OFF 功能创建开关
# switch = '0 : OFF \n1 : ON'
# cv.createTrackbar(switch, 'image',0,1,nothing)
# while(1):
#     cv.imshow('image',img)
#     k = cv.waitKey(1) & 0xFF
#     if k == 27:
#         break
#     # 得到四条轨迹的当前位置
#     r = cv.getTrackbarPos('R','image')
#     g = cv.getTrackbarPos('G','image')
#     b = cv.getTrackbarPos('B','image')
#     s = cv.getTrackbarPos(switch,'image')
#     if s == 0:
#         img[:] = 0
#     else:
#         img[:] = [b,g,r]
# cv.destroyAllWindows()

#2_4
# events = [i for i in dir(cv) if 'EVENT' in i]
# print( events )

# drawing = False # 如果按下鼠标，则为真
# mode = True # 如果为真，绘制矩形。按 m 键可以切换到曲线
# ix,iy = -1,-1

# def draw_circle(event,x,y,flags,param):
#     global ix,iy,drawing,mode
#     if event == cv.EVENT_LBUTTONDOWN:
#         drawing = True
#         ix,iy = x,y
#     elif event == cv.EVENT_MOUSEMOVE:
#         if drawing == True:
#             if mode == True:
#                 cv.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
#             else:
#                 cv.circle(img,(x,y),5,(0,0,255),-1)
#     elif event == cv.EVENT_LBUTTONUP:
#         drawing = False
#         if mode == True:
#             cv.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
#         else:
#             cv.circle(img,(x,y),5,(0,0,255),-1)

# # 创建一个黑色的图像，一个窗口，并绑定到窗口的功能
# img = np.zeros((512,512,3), np.uint8)
# cv.namedWindow('image')
# cv.setMouseCallback('image',draw_circle)
# while(1):
#     cv.imshow('image',img)
#     if cv.waitKey(20) & 0xFF == 27:
#         break
# cv.destroyAllWindows()

#2_3
# img = np.zeros((512,512,3), np.uint8)
# # 绘制一条厚度为5的蓝色对角线
# cv.line(img,(0,0),(511,511),(255,0,0),5)
# font = cv.FONT_HERSHEY_SIMPLEX
# cv.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv.LINE_AA)

# cv.ellipse(img,(256,256),(100,50),0,0,180,255,-1)

# cv.circle(img,(447,63), 63, (0,0,255), -1)

# cv.circle(img,(447,63), 30, (0,0,0), -1)

# Pts = np.array([[447,63],[512-126, 126],[512, 126]], np.int32)
# Pts=Pts.reshape((-1,1,2))
# # cv.polylines(img,[Pts],True,(0,0,255))
# cv.fillPoly(img, [Pts], (0, 0, 0));

# cv.imshow('img', img)

# cv.waitKey(0)
# cv.destroyAllWindows()


#2_2
# cap = cv.VideoCapture(0)
# # cap = cv.VideoCapture('output.avi')

# if not cap.isOpened():
#     print("Cannot open camera")
#     exit()

# fourcc = cv.VideoWriter_fourcc(*'XVID')
# # out = cv.VideoWriter('output.avi', fourcc, 20.0, (640,  480))


# print(cap.get(cv.CAP_PROP_FRAME_WIDTH))
# while True:
#     # 逐帧捕获
#     ret, frame = cap.read()
#     # 如果正确读取帧，ret为True
#     if not ret:
#         print("Can't receive frame (stream end?). Exiting ...")
#         break
#     frame = cv.flip(frame, 1)
#     # 写翻转的框架
#     # out.write(frame)

#     # 我们在框架上的操作到这里
#     gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
#     cv.imshow('frame', gray)
#     if cv.waitKey(1) == ord('q'):
#         break
# cap.release()
# # out.release()

# cv.destroyAllWindows()

# 2_1
# img = cv.imread('IMG_3829.jpg', -1)

# b,g,r = cv.split(img)
# img2 = cv.merge([r,g,b])


# cv.imshow('image', img2)

# plt.imshow(img2, cmap = 'gray', interpolation = 'bicubic')
# plt.xticks([]), plt.yticks([])  # 隐藏 x 轴和 y 轴上的刻度值
# plt.show()

# k = cv.waitKey(0)

# if k == 27:         # 等待ESC退出
#     cv.destroyAllWindows()
# elif k == ord('s'): # 等待关键字，保存和退出
#     cv.imwrite('messigray.png',img)
#     cv.destroyAllWindows()

