#第二周进阶作业

#1. 以Lena为原始图像，通过OpenCV实现平均滤波，高斯滤波及中值滤波，比较滤波结果。

import numpy as np
import cv2
import matplotlib.pyplot as plt
########     三个不同的滤波器    #########
img = cv2.imread('E:\deep learning\cv\data\lena.jpg')
source = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
# 均值滤波
img_mean = cv2.blur(source, (5,5))

# 高斯滤波
img_Guassian = cv2.GaussianBlur(source,(5,5),0)

# 中值滤波
img_median = cv2.medianBlur(source, 5)

# 展示不同的图片
titles = ['source','mean', 'Gaussian', 'median']
imgs = [source, img_mean, img_Guassian, img_median]

for i in range(4):
    plt.subplot(2,2,i+1)#注意，这和matlab中类似，没有0，数组下标从1开始
    plt.imshow(imgs[i])
    plt.title(titles[i])
plt.show()
##########################################################################################

#2. 以Lena为原始图像，通过OpenCV使用Sobel及Canny算子检测，比较边缘检测结果。
#coding=utf-8
import cv2
import numpy as np

img = cv2.imread("E:\deep learning\cv\data\lena.jpg", 0)

img2 = cv2.GaussianBlur(img,(3,3),0)
canny = cv2.Canny(img2, 50, 150)

cv2.imshow('Canny', canny)

x = cv2.Sobel(img,cv2.CV_16S,1,0)
y = cv2.Sobel(img,cv2.CV_16S,0,1)

absX = cv2.convertScaleAbs(x)   # 转回uint8
absY = cv2.convertScaleAbs(y)

dst = cv2.addWeighted(absX,0.5,absY,0.5,0)

cv2.imshow("absX", absX)
cv2.imshow("absY", absY)

cv2.imshow("SobelX+Y", dst)
cv2.imshow('Canny', canny)

cv2.waitKey(0)
cv2.destroyAllWindows()

#################################################################

#3. 在OpenCV安装目录下找到课程对应演示图片(安装目录\sources\samples\data)，首先计算灰度直方图，进一步使用大津算法进行分割，并比较分析分割结果。

import cv2
import matplotlib.pyplot as plt
import numpy as np

def plot_demo(image):
    plt.hist(image.ravel(), 256, [0, 256])  # image.ravel()是将多维数组降为一维数组，256为bins数量，[0, 256]为范围
    plt.show()


def image_hist(image):
    color = ('blue', 'green', 'red')
    for i, color in enumerate(color):
        # 计算出直方图，calcHist(images, channels, mask, histSize(有多少个bin), ranges[, hist[, accumulate]]) -> hist
        # hist 是一个 256x1 的数组，每一个值代表了与该灰度值对应的像素点数目。
        hist = cv2.calcHist(image, [i], None, [256], [0, 256])
        print(hist.shape)
        plt.plot(hist, color=color)
        plt.xlim([0, 256])
    plt.show()

if __name__ == '__main__':
    src = cv2.imread("E:\deep learning\cv\data\pic2.png")  # 读入图片放进src中
    cv2.namedWindow("input image")  # 创建窗口
    cv2.imshow("input image", src)  # 将src图片放入该创建的窗口中
    plot_demo(src)
    image_hist(src)
   # cv2.waitKey(0)  # 等有键输入或者1000ms后自动将窗口消除，0表示只用键输入结束窗口
   # cv2.destroyAllWindows()  # 关闭所有窗口

# 读取图像
img = cv2.imread('E:\deep learning\cv\data\pic2.png')
# 变微灰度图
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 大津法二值化
retval, dst = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU)
# 腐蚀和膨胀是对白色部分而言的，膨胀，白区域变大，最后的参数为迭代次数
dst = cv2.dilate(dst, None, iterations=1)
# 腐蚀，白区域变小
dst = cv2.erode(dst, None, iterations=4)

cv2.imshow('binary', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()

#############################################################################

#4. 使用米粒图像，分割得到各米粒，首先计算各区域(米粒)的面积、长度等信息，进一步计算面积、长度的均值及方差，分析落在3sigma范围内米粒的数量。
# -*- coding:utf-8 -*-
import cv2 as cv
import copy

filename = "rice.png"
image = cv.imread(filename)

gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) #转换为灰度图

#_, bw = cv.threshold(gray, 0, 0xff, cv.THRESH_OTSU)  #全局大津法

bw = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY,101, 1) #局部二值化 效果好

element = cv.getStructuringElement(cv.MORPH_CROSS, (3, 3))  #形态学去噪

bw = cv.morphologyEx(bw, cv.MORPH_OPEN, element)    #开运算去噪

seg = copy.deepcopy(bw)
bin, cnts, hier = cv.findContours(seg, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)  #检测轮廓
count = 0

for i in range(len(cnts), 0, -1):
    c = cnts[i-1]
    area = cv.contourArea(c)
    if area <10:
        continue
    count = count + 1
    print("blob", i, ":", area)

    x, y, w, h = cv.boundingRect(c)
    cv.rectangle(image, (x, y), (x+w, y+h), (0, 0, 0xff), 1)
    cv.putText(image, str(count), (x, y), cv.FONT_HERSHEY_PLAIN, 0.5, (0, 0xff, 0))

print("米粒数量： ", count)
cv.imshow("source", image)
cv.imshow("binary", bw)

cv.waitKey()
cv.destroyAllWindows()

###########################################################################################

#5.使用棋盘格及自选风景图像，分别使用SIFT、FAST及ORB算子检测角点，并比较分析检测结果。(可选)使用Harris角点检测算子检测棋盘格，并与上述结果比较。

#####################  SIFT  SURF  FAST ################################
import cv2

imgpath = './home.jpg'
img = cv2.imread(imgpath)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# 创建SIFT对象
sift1 = cv2.xfeatures2d.SIFT_create()
keypoints1, descriptor = sift1.detectAndCompute(gray, None)

sift2 = cv2.xfeatures2d.SURF_create(float(4000))
keypoints2, descriptor = sift2.detectAndCompute(gray, None)

fast = cv2.FastFeatureDetector_create(threshold=40,
                                      nonmaxSuppression=True,
                                      type=cv2.FAST_FEATURE_DETECTOR_TYPE_9_16)
kp = fast.detect(img, None)
img3 = cv2.drawKeypoints(img, kp, img,color=(255, 255, 255))

img1 = cv2.drawKeypoints(image=img,
						outImage=img,
						keypoints = keypoints1,
						flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT,
						color = (51, 163, 236))

img2 = cv2.drawKeypoints(image=img,
						outImage=img,
						keypoints = keypoints2,
						flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT,
						color = (51, 163, 236))

img3 = cv2.drawKeypoints(img, kp, img,color=(255, 255, 255))

cv2.imshow('sift', img1)
cv2.imshow('surf', img2)
cv2.imshow('fast', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()


#################### OBR #####################################
import cv2
from matplotlib import pyplot as plt

def match_ORB():
	img1 = cv2.imread('./gggg/001.png',0)
	img2 = cv2.imread('./gggg/002.png',0)

	# 使用ORB特征检测器和描述符，计算关键点和描述符
	orb = cv2.ORB_create()
	kp1, des1 = orb.detectAndCompute(img1,None)
	kp2, des2 = orb.detectAndCompute(img2,None)

	bf = cv2.BFMatcher(normType=cv2.NORM_HAMMING, crossCheck=True)
	matches = bf.match(des1,des2)
	matches = sorted(matches, key = lambda x:x.distance)

	img3 = cv2.drawMatches(img1=img1,keypoints1=kp1,
						   img2=img2,keypoints2=kp2,
						   matches1to2=matches,
						   outImg=img2, flags=2)
	return img3

if __name__ == '__main__':
    img3 = match_ORB()
    plt.imshow(img3)
    plt.show()


#FAST提取了大量的特征点，在计算时间上，比SIFT SURF快两个数量级，ORB在FAST基础上得来的，特征点的质量比较高！

############################  Harris  ###############################
import cv2
import numpy as np

filename = 'chessboard.png'

img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
#图像转换为float32
dst = cv2.cornerHarris(gray,2,3,0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)#图像膨胀
# Threshold for an optimal value, it may vary depending on the image.
#print(dst)
#img[dst>0.00000001*dst.max()]=[0,0,255] #可以试试这个参数，角点被标记的多余了一些
img[dst>0.01*dst.max()]=[0,0,255]#角点位置用红色标记
#这里的打分值以大于0.01×dst中最大值为边界

cv2.imshow('dst',img)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()

#在纹理信息丰富的区域,Harris 算子可以提取出大量有用的特征点,而在纹理信息少的区域,提取的特征点则较少
#Harris 算子的局限性有：①它对尺度很敏感，不具有尺度不变性。②提取的角点是像素级的。