import numpy as np
import cv2
import matplotlib.pyplot as plt
import imutils
from PIL import Image
img1 = cv2.imread("images/zp1.jpg")
img2 = cv2.imread("images/two.jpg")
def gamma(img):
	for gamma in [0.1, 0.5, 1.2, 2.2]: 
	      
	    # Apply gamma correction. 
	    gamma_corrected = np.array(255*(img / 255) ** gamma, dtype = 'uint8') 
	  
	    # Save edited images. 
	    cv_show(gamma_corrected)
def contrast(img):
	img = cv2.imread("images/one.jpg")
	img1=img.copy()
	# Function to map each intensity level to output intensity level. 
	def pixelVal(pix, r1, s1, r2, s2): 
	    if (0 <= pix and pix <= r1): 
	        return (s1 / r1)*pix 
	    elif (r1 < pix and pix <= r2): 
	        return ((s2 - s1)/(r2 - r1)) * (pix - r1) + s1 
	    else: 
	        return ((255 - s2)/(255 - r2)) * (pix - r2) + s2 
	  
	# Define parameters. 
	r1 = 70
	s1 = 0
	r2 = 140
	s2 = 255
	  
	# Vectorize the function to apply it to each value in the Numpy array. 
	pixelVal_vec = np.vectorize(pixelVal) 
	  
	# Apply contrast stretching. 
	contrast_stretched = pixelVal_vec(img1, r1, s1, r2, s2) 
	cv_show(img)
	cv_show(contrast_stretched)
def equHis(img):
	img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	# creating a Histograms Equalization 
	# of a image using cv2.equalizeHist() 
	equ = cv2.equalizeHist(img) 

	# stacking images side-by-side 
	res = np.hstack((img, equ)) 

	# show image input vs output 
	cv2.imshow('image', res) 

	cv2.waitKey(0) 
	cv2.destroyAllWindows() 

def p_new():#pillow image
	image = Image.new(mode='RGBA', size=(100, 100), color=(255, 0, 255))
	# img.show()
	logging.info('宽度(px): ', image.width)
	logging.info('高度(px): ', image.height)
	logging.info('尺寸（px）: ', image.size)
	logging.info('颜色模式: ', image.mode)
	logging.info('格式（扩展名）: ', image.format)
	# logging.info('类别: ', image.category)
	logging.info('只读（1为只读）: ', image.readonly)
	logging.info('字典信息: ', image.info)
	# img_crop = image.crop(box=(10,15,60,60))
	# img_obj.paste(img_obj_other, (posx, posy))
	# img_obj.copy()
	# img_obj.rotate()
	# img_crop.show()

def show_xy():
	# 1 创建一个空白的图像
	img = np.zeros((512,512,3), np.uint8)
	# 2 绘制图形
	cv2.line(img,(0,0),(511,511),(255,0,0),5)
	cv2.rectangle(img,(384,0),(510,128),(0,255,0),3)
	cv2.circle(img,(447,63), 63, (0,0,255), -1)
	font = cv2.FONT_HERSHEY_SIMPLEX
	cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
	# 3 图像展示
	plt.imshow(img[:,:,::-1])
	plt.title('匹配结果'), plt.xticks([]), plt.yticks([])
	plt.show()
def test1():
	rgb_img = img1
	gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
	reverse_img = 255 - gray_img

	random_img = np.zeros((gray_img.shape[0], gray_img.shape[1]), dtype=np.uint8)
	for i in range(gray_img.shape[0]):
	    for j in range(gray_img.shape[1]):
	        random_img[i, j] = gray_img[i, j]*1.2
	cv2.imshow('reverse_img', imutils.resize(reverse_img, 800))
	cv2.imshow('random_img', imutils.resize(random_img, 800))
	if cv2.waitKey(0) == 27:
	    cv2.destroyAllWindows()
def draw():
	img = np.ones((512,512,3), np.uint8)
	img = 255*img
	img = cv2.line(img, (100,100), (400,400),(255, 0, 0), 5)
	img = cv2.rectangle(img,(200, 20),(400,120),(0,255,0),3)
	img = cv2.circle(img,(100,400), 50, (0,0,255), 2)
	img = cv2.circle(img,(250,400), 50, (0,0,255), 0)
	img = cv2.ellipse(img,(256,256),(100,50),0,0,180,(0, 255, 255), -1)
	pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
	img = cv2.polylines(img,[pts],True,(0, 0, 0), 2)

	cv2.imshow('img', img)
	if cv2.waitKey(0) == 27:
	    cv2.destroyAllWindows()
def add(img1,img2):
	# a=(img1.shape[1],img1.shape[0])
	# logging.info(a)
	img2_1= cv2.resize(img2,(img1.shape[1],img1.shape[0]))
	logging.info(img1.shape)
	logging.info(img2_1.shape)
	# 2 加法操作
	img3 = cv2.add(img1,img2_1) # cv中的加法
	img4 = img1+img2_1 # 直接相加

	# 3 图像显示
	fig,axes=plt.subplots(nrows=1,ncols=2,figsize=(10,8),dpi=100)
	axes[0].imshow(img3[:,:,::-1])
	axes[0].set_title("cv2 add")
	axes[1].imshow(img4[:,:,::-1])
	axes[1].set_title("numpy add")
	plt.show()
def fx(img):
	dst = cv2.resize(img, (0, 0), fx=0.5, fy=0.3)
	fig,axes=plt.subplots(nrows=1,ncols=2,figsize=(10,8),dpi=100)
	axes[0].imshow(img[:,:,::-1])
	axes[0].set_title("origin")
	axes[1].imshow(dst[:,:,::-1])
	axes[1].set_title("fx")
	plt.show()
def add_wt(img1,img2):
	# 2 图像混合
	img2_1= cv2.resize(img2,(img1.shape[1],img1.shape[0]))
	img3 = cv2.addWeighted(img1,0.7,img2_1,0.3,0)

	# 3 图像显示
	plt.figure(figsize=(8,8))
	plt.imshow(img3[:,:,::-1])
	plt.show()
def border(img):#图像外延
	top_size,bottom_size,left_size,right_size = (50,50,50,50)

	replicate = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size, borderType=cv2.BORDER_REPLICATE)
	reflect = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size,cv2.BORDER_REFLECT)
	reflect101 = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size, cv2.BORDER_REFLECT_101)
	wrap = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size, cv2.BORDER_WRAP)
	constant = cv2.copyMakeBorder(img, top_size, bottom_size, left_size, right_size,cv2.BORDER_CONSTANT, value=0)
	plt.subplot(231), plt.imshow(img, 'gray'), plt.title('ORIGINAL')
	plt.subplot(232), plt.imshow(replicate, 'gray'), plt.title('REPLICATE')
	plt.subplot(233), plt.imshow(reflect, 'gray'), plt.title('REFLECT')
	plt.subplot(234), plt.imshow(reflect101, 'gray'), plt.title('REFLECT_101')
	plt.subplot(235), plt.imshow(wrap, 'gray'), plt.title('WRAP')
	plt.subplot(236), plt.imshow(constant, 'gray'), plt.title('CONSTANT')

	plt.show()
def hsv(img):
	hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
	cv2.imshow("hsv", hsv)
	cv2.waitKey(0)    
	cv2.destroyAllWindows()
def meng(img):
	img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	ret, thresh1 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY)
	ret, thresh2 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY_INV)
	ret, thresh3 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_TRUNC)
	ret, thresh4 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_TOZERO)
	ret, thresh5 = cv2.threshold(img_gray, 127, 255, cv2.THRESH_TOZERO_INV)

	titles = ['Original Image', 'BINARY', 'BINARY_INV', 'TRUNC', 'TOZERO', 'TOZERO_INV']
	images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]

	for i in range(6):
	    plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
	    plt.title(titles[i])
	    plt.xticks([]), plt.yticks([])
	plt.show()
def blur(img):
	blur = cv2.blur(img, (3, 3))# 均值滤波
	box1 = cv2.boxFilter(img,-1,(3,3), normalize=True)  # 方框滤波
	box2 = cv2.boxFilter(img,-1,(3,3), normalize=False) # 方框滤波
	aussian = cv2.GaussianBlur(img, (5, 5), 1)  # 高斯滤波
	median = cv2.medianBlur(img, 5)  # 中值滤波
	res = np.hstack((blur,aussian,median))
	cv2.imshow("res", res)
	cv2.waitKey(0)    
	res = np.hstack((box1,box2))
	cv2.imshow("res", res)
	cv2.waitKey(0)    
	cv2.destroyAllWindows()
def erode(pie):
	kernel = np.ones((30,30),np.uint8) 
	erosion_1 = cv2.erode(pie,kernel,iterations = 1)
	erosion_2 = cv2.erode(pie,kernel,iterations = 2)
	erosion_3 = cv2.erode(pie,kernel,iterations = 3)
	res = np.hstack((erosion_1,erosion_2,erosion_3))
	cv2.imshow('res', res)
	cv2.waitKey(0)
	cv2.destroyAllWindows()
def dilate(pie):
	kernel = np.ones((30,30),np.uint8) 
	dilate_1 = cv2.dilate(pie,kernel,iterations = 1)
	dilate_2 = cv2.dilate(pie,kernel,iterations = 2)
	dilate_3 = cv2.dilate(pie,kernel,iterations = 3)
	res = np.hstack((dilate_1,dilate_2,dilate_3))
	cv2.imshow('res', res)
	cv2.waitKey(0)
	cv2.destroyAllWindows()
def morp(img):
	kernel = np.ones((30,30),np.uint8) 
	gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)

	cv2.imshow('gradient', gradient)
	cv2.waitKey(0)
	cv2.destroyAllWindows()
	#礼帽
	tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
	cv2.imshow('tophat', tophat)
	cv2.waitKey(0)
	cv2.destroyAllWindows()

	#黑帽
	blackhat  = cv2.morphologyEx(img,cv2.MORPH_BLACKHAT, kernel)
	cv2.imshow('blackhat ', blackhat )
	cv2.waitKey(0)
	cv2.destroyAllWindows()
def cv_show(img,title=""):
	cv2.imshow(title,img )
	cv2.waitKey(0)
	cv2.destroyAllWindows()
def sobel(img):
	img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
	cv2.imshow('sobel', sobelx )
	cv2.waitKey(0)
	cv2.destroyAllWindows()
	sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
	sobelx = cv2.convertScaleAbs(sobelx)
	sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)
	sobely = cv2.convertScaleAbs(sobely)  
	sob=np.hstack((sobelx,sobely))
	cv_show(sob,"sob")
def s3(img):
	img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=3)
	sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)
	sobelx = cv2.convertScaleAbs(sobelx)   
	sobely = cv2.convertScaleAbs(sobely)  
	sobelxy =  cv2.addWeighted(sobelx,0.5,sobely,0.5,0)  

	scharrx = cv2.Scharr(img,cv2.CV_64F,1,0)
	scharry = cv2.Scharr(img,cv2.CV_64F,0,1)
	scharrx = cv2.convertScaleAbs(scharrx)   
	scharry = cv2.convertScaleAbs(scharry)  
	scharrxy =  cv2.addWeighted(scharrx,0.5,scharry,0.5,0) 

	laplacian = cv2.Laplacian(img,cv2.CV_64F)
	laplacian = cv2.convertScaleAbs(laplacian)   

	res = np.hstack((sobelxy,scharrxy,laplacian))
	cv_show(res,'res')
def canny(img):
	img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	v1=cv2.Canny(img,80,150)
	v2=cv2.Canny(img,50,100)
	res = np.hstack((v1,v2))
	cv_show(res,'res')
def ta(img):
	# up=cv2.pyrUp(img)
	# logging.info (up.shape)
	down=cv2.pyrDown(img)
	logging.info (down.shape)
	down_up=cv2.pyrUp(down)
	l_1=img-down_up
	cv_show(l_1,'l_1')
def cont(img):
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
	contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
	#传入绘制图像，轮廓，轮廓索引，颜色模式，线条厚度
	# 注意需要copy,要不原图会变。。。
	draw_img = img.copy()
	res = cv2.drawContours(draw_img, contours, -1, (0, 0, 255), 1)#-1表示显示所有轮廓，123表示第123个轮廓
	cv_show(res,'res')
def appox(img):
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
	contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
	cnt = contours[0]
	#面积
	cv2.contourArea(cnt)
	#周长，True表示闭合的
	cv2.arcLength(cnt,True)

	epsilon = 0.15*cv2.arcLength(cnt,True) #主要进行调参的函数
	approx = cv2.approxPolyDP(cnt,epsilon,True)

	draw_img = img.copy()
	res = cv2.drawContours(draw_img, [approx], -1, (0, 0, 255), 2)
	cv_show(res,'res')
def rect(img):
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
	(contours,bin2) = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
	cnt = contours[0]

	x,y,w,h = cv2.boundingRect(cnt)
	img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
	cv_show(img,'img')
def hv(img):
	img= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	hist = cv2.calcHist([img],[0],None,[256],[0,256])
	hist.shape#(256, 1)
	plt.hist(img.ravel(),256);
	plt.show()
def hv_color(img):
	color=('b','g','r')
	for i,col in enumerate(color):
	    #histr = cv2.calcHist([img],[i],None,[256],[0,256])
	    histr = cv2.calcHist([img],[i],None,[256],[0,256]) 
	    plt.plot(histr,color=col)
	    plt.xlim([0,256])#x轴的范围
	plt.show()
def mask(img):
	# 创建mast
	mask = np.zeros(img.shape[:2], np.uint8)
	logging.info (mask.shape)#(414, 500)
	mask[100:300,200:400]=255
	cv_show(mask,'gray')
	mask_img=cv2.bitwise_and(img,img,mask=mask)#图片之间与操作
	cv_show(mask_img,"masked")
def bijao(img):
	img= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	mask = np.zeros(img.shape[:2], np.uint8)
	mask[100:300,200:400]=255
	mask_img=cv2.bitwise_and(img,img,mask=mask)#图片之间与操作
	hist_full = cv2.calcHist([img],[0],None,[256],[0,256])
	hist_mask=cv2.calcHist([img],[0],mask,[256],[0,256])
	plt.subplot(221), plt.imshow(img, 'gray')
	plt.subplot(222), plt.imshow(mask, 'gray')
	plt.subplot(223), plt.imshow(mask_img, 'gray')
	plt.subplot(224), plt.plot(hist_full), plt.plot(hist_mask)
	plt.xlim([0, 256])
	plt.show()
def jun(img):
	img= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	#均衡化会让图片更加明显
	# plt.imshow(img,'gray')
	# plt.show()
	# plt.hist(img.ravel(),256)
	# plt.show()
	equ = cv2.equalizeHist(img) 
	res = np.hstack((img,equ))
	cv_show(res,'jun')
	plt.show()
def jun_grid(img):
	img= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	#均衡化会让图片更加明显
	# plt.imshow(img,'gray')
	# plt.show()
	# plt.hist(img.ravel(),256)
	# plt.show()
	equ = cv2.equalizeHist(img) 
	res = np.hstack((img,equ))
	clahe=cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
	res_clahe=clahe.apply(img)
	res=np.hstack((img,equ,res_clahe))
	cv2.imshow('res',res)
	cv2.waitKey(0)
def template(img):
	img_gray=cv2.cvtColor(img_rgb,cv2.COLOR_BGR2GRAY)
	template=cv2.imread('mario_coin.jpg',0)
	h,w=template.shape[:2]
	res=cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
	threshold=0.8
	loc=np.where(res>=threshold)
	for pt in zip(*loc[::-1]):
	    bottom_right=(pt[0]+w,pt[1]+h)
	    cv2.rectangle(img_rgb,pt,bottom_right,(255,255,0),2)
	cv2.imshow("img_rgb",img_rgb)
	cv2.waitKey(0)
def fly(img):
	img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	img_float32 = np.float32(img)

	dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
	dft_shift = np.fft.fftshift(dft)
	# 得到灰度图能表示的形式
	magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))

	plt.subplot(121),plt.imshow(img, cmap = 'gray')
	plt.title('Input Image'), plt.xticks([]), plt.yticks([])
	plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
	plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
	plt.show()
def fly2(img):
	img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

	img_float32 = np.float32(img)

	dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
	dft_shift = np.fft.fftshift(dft)

	rows, cols = img.shape
	crow, ccol = int(rows/2) , int(cols/2)     # 中心位置

	# 低通滤波
	mask = np.zeros((rows, cols, 2), np.uint8)
	mask[crow-30:crow+30, ccol-30:ccol+30] = 1

	# IDFT
	fshift = dft_shift*mask
	f_ishift = np.fft.ifftshift(fshift)
	img_back = cv2.idft(f_ishift)
	img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])

	plt.subplot(121),plt.imshow(img, cmap = 'gray')
	plt.title('Input Image'), plt.xticks([]), plt.yticks([])
	plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
	plt.title('Result'), plt.xticks([]), plt.yticks([])

	plt.show()                
def fly3(img):
	img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

	img_float32 = np.float32(img)

	dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
	dft_shift = np.fft.fftshift(dft)

	rows, cols = img.shape
	crow, ccol = int(rows/2) , int(cols/2)     # 中心位置

	# 高通滤波
	mask = np.ones((rows, cols, 2), np.uint8)
	mask[crow-30:crow+30, ccol-30:ccol+30] = 0

	# IDFT
	fshift = dft_shift*mask
	f_ishift = np.fft.ifftshift(fshift)
	img_back = cv2.idft(f_ishift)
	img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])

	plt.subplot(121),plt.imshow(img, cmap = 'gray')
	plt.title('Input Image'), plt.xticks([]), plt.yticks([])
	plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
	plt.title('Result'), plt.xticks([]), plt.yticks([])

	plt.show()    
def flyTong(img):
	img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
	dft_center = np.fft.fftshift(dft)
	di=True
	if di: 
		# 定义掩模：生成的掩模中间为1周围为0 #低通
		crow, ccol = int(img.shape[0] / 2), int(img.shape[1] / 2)  # 求得图像的中心点位置
		mask = np.zeros((img.shape[0], img.shape[1], 2), np.uint8)
		mask[crow - 30:crow + 30, ccol - 30:ccol + 30] = 1
	else:
		# 定义掩模：生成的掩模中间为0周围为1 #高通
		crow, ccol = int(img.shape[0] / 2), int(img.shape[1] / 2) # 求得图像的中心点位置
		mask = np.ones((img.shape[0], img.shape[1], 2), np.uint8)
		mask[crow-30:crow+30, ccol-30:ccol+30] = 0
	 
	# 将掩模与傅里叶变化后图像相乘，保留中间部分
	mask_img = dft_center * mask
	 
	img_idf = np.fft.ifftshift(mask_img)
	img_idf = cv2.idft(img_idf)
	img_idf = cv2.magnitude(img_idf[:, :, 0], img_idf[:, :, 1])
	 
	plt.subplot(121)
	plt.axis('off')
	plt.imshow(img, cmap='gray')
	plt.subplot(122)
	plt.axis('off')
	plt.imshow(img_idf, cmap='gray')
	plt.show()
def background():
	# reading the damaged image
	img = cv2.imread(filename=r"images/zp1.jpg")
	# cv2.imshow("damaged image", img)
	# cv2.waitKey(0)
	damaged_img=img.copy() 
	# get the shape of the image
	height, width = damaged_img.shape[0], damaged_img.shape[1]
	 
	# Converting all pixels greater than zero to black while black becomes white
	for i in range(height):
	    for j in range(width):
	        if damaged_img[i, j].sum() > 30:
	            damaged_img[i, j] = 0
	        else:
	            damaged_img[i, j] = [255, 255, 255]
	 
	# saving the mask 
	mask = cv2.cvtColor(damaged_img, cv2.COLOR_BGR2GRAY)
	dst = cv2.inpaint(img, mask, 3, cv2.INPAINT_NS)
	# Write the output.
	cv2.imshow("inpainted", dst)
	cv2.waitKey(0)
def filtColor():
	cap = cv2.VideoCapture(0) 

	while(1): 
		_, frame = cap.read() 
		# It converts the BGR color space of image to HSV color space 
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) 
		
		# Threshold of blue in HSV space 
		lower_blue = np.array([60, 35, 140]) 
		upper_blue = np.array([180, 255, 255]) 

		# preparing the mask to overlay 
		mask = cv2.inRange(hsv, lower_blue, upper_blue) 
		
		# The black region in the mask has the value of 0, 
		# so when multiplied with original image removes all non-blue regions 
		result = cv2.bitwise_and(frame, frame, mask = mask) 

		cv2.imshow('frame', frame) 
		cv2.imshow('mask', mask) 
		cv2.imshow('result', result) 
		
		cv2.waitKey(0) 

	cv2.destroyAllWindows() 
	cap.release() 
# gamma(img1)
# contrast(img1)
# equHis(img1)
# p_new()
# repair()
# show_xy()
# test1()
# draw()
# flyTong(img1)
# fly(img1)
# fly3(img1)
# jun_grid(img1)
# jun(img1)
# mask(img1)
# hv_color(img1)
# hv(img1)
# rect(img1)
# if cv2.waitKey(100) & 0xFF == ord('q'):
# 	break
# add(img1,img2)
# fx(img1)
# wt(img1,img2)
#border(img2)
# hsv(img2)
# meng(img1)
# blur(img1)
# erode(img2)
# dilate(img2)
# morp(img2)
# sobel(img1)
# s3(img1)
# canny(img2)
# ta(img2)
cont(img1)
# appox(img1)
# background()