# -- coding:utf-8 --
import cv2
import matplotlib as mpl
import numpy as np
import copy
'''
1. 以Lena为原始图像，通过OpenCV实现平均滤波，高斯滤波及中值滤波，比较滤波结果。
2. 以Lena为原始图像，通过OpenCV使用Sobel及Canny算子检测，比较边缘检测结果。
3. 在OpenCV安装目录下找到课程对应演示图片(安装目录\sources\samples\data)，首先计算灰度直方图，进一步使用大津算法进行分割，并比较分析分割结果。
4. 使用米粒图像，分割得到各米粒，首先计算各区域(米粒)的面积、长度等信息，进一步计算面积、长度的均值及方差，分析落在3sigma范围内米粒的数量。
扩展作业：
5. 使用棋盘格及自选风景图像，分别使用SIFT、FAST及ORB算子检测角点，并比较分析检测结果。
(可选)使用Harris角点检测算子检测棋盘格，并与上述结果比较。
'''


'''
filename = './lena.jpg'
img = cv2.imread(filename)

# 均值滤波
img_mean = cv2.blur(img, (5,5))

# 高斯滤波
img_Guassian = cv2.GaussianBlur(img,(5,5),0)

# 中值滤波
img_median = cv2.medianBlur(img, 5)

# 展示不同的图片
titles = ['srcImg', 'mean', 'Gaussian', 'median']
imgs = [img, img_mean, img_Guassian, img_median]

for i in range(len(titles)):
    cv2.imshow(titles[i], imgs[i])
    cv2.imwrite('lena-'+titles[i]+'.jpg', imgs[i])
    cv2.waitKey()
    cv2.destroyAllWindows()
'''

'''
# 以灰度图像读出来
img = cv2.imread(filename,0)

# Sobel
x = cv2.Sobel(img, cv2.CV_64F, 1,0, ksize=3)
y = cv2.Sobel(img, cv2.CV_64F, 0,1, ksize=3)
absX = cv2.convertScaleAbs(x)   # 转回uint8
absY = cv2.convertScaleAbs(y)
img_sobel = cv2.addWeighted(absX,0.5,absY,0.5,0)
cv2.imshow("absX", absX)
cv2.imshow("absY", absY)
# 结果
cv2.imshow("Result", img_sobel)
cv2.imwrite('lena-sobel.jpg', img_sobel)
cv2.waitKey()
cv2.destroyAllWindows()

# Canny
img_canny = cv2.Canny(img, 80, 150)

# 结果
# cv2.imshow("Result", img_canny)
# cv2.imwrite('lena-canny.jpg', img_canny)
# cv2.waitKey()
# cv2.destroyAllWindows()
'''

'''
filename = './example-pic6.png'
img = cv2.imread(filename, 0)
hist = cv2.calcHist([img], [0], None, [256], [0, 255])
# 画出直方图
plt.figure()
plt.title("Grayscale Histogram")
plt.xlabel("Bins")
plt.ylabel("number of Pixels")
plt.plot(hist)
plt.xlim([0, 255])
# plt.show()
plt.clf()

ret2, th2 = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)
print(ret2)
plt.figure()
plt.subplot(221), plt.imshow(img, 'gray')
plt.subplot(222), plt.hist(img.ravel(), 256)  # .ravel方法将矩阵转化为一维
plt.subplot(223), plt.imshow(th2, 'gray')
plt.show()

# img_otsu = cv2.threshold(img, 0, 255, type=cv2.THRESH_OTSU)
# cv2.imshow("Result", img_otsu)
# cv2.imwrite('lena-otsu.jpg', img_otsu)
# cv2.waitKey()
# cv2.destroyAllWindows()
'''

'''
# 解决中文显示问题
mpl.rcParams['font.sans-serif'] = ['SimHei']  # 指定默认字体 SimHei为黑体
mpl.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
def zh_ch(string):
    return string.encode("gbk").decode(errors="ignore")
# 计算平均值、中位值、方差
def stats(li):
    return np.mean(li), np.median(li), np.var(li)

filename = 'rice.png'
image = cv2.imread(filename)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, bw = cv2.threshold(gray, 0, 0xff, cv2.THRESH_OTSU)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
bw = cv2.morphologyEx(bw, cv2.MORPH_OPEN, element)

seg = copy.deepcopy(bw)
bin, cnts, hier = cv2.findContours(
    seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
count = 0
li_area = []
li_perimeter = []
for i in range(len(cnts), 0, -1):
    c = cnts[i - 1]
    area = cv2.contourArea(c)
    perimeter = cv2.arcLength(c , False)
    if area < 10:
        continue
    count += 1
    # print("blob_area", i, ' : ', area)
    # print("blob_perimeter", i, ' : ', perimeter)
    li_area.append(area)
    li_perimeter.append(perimeter)
    x, y, w, h = cv2.boundingRect(c)
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 0xff), 1)
    cv2.putText(image, str(count), (x, y),
                cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 0xff, 0))
print("米粒数量： ", count)
# cv2.imshow("origin", image)
# cv2.imshow("threshold", bw)
# cv2.waitKey()
# cv2.destroyAllWindows()
# print(li_area)
print("面积——平均数： %.2f, 中位数：%.2f, 方差：%.2f"% stats(li_area))
mean, _, var = stats(li_area)
std = np.sqrt(var)
li_area_3sigma = [i for i in li_area if i>(mean+3*std) or i<(mean-3*std) ]
print(li_area_3sigma)
print("面积在3sigma之外的数量：", len(li_area_3sigma))
# print(li_perimeter)
print("周长——平均数： %.2f, 中位数：%.2f, 方差：%.2f"% stats(li_perimeter))
mean, _, var = stats(li_perimeter)
std = np.sqrt(var)
li_perimeter_3sigma = [i for i in li_area if i>(mean+3*std) or i<(mean-3*std) ]
print(li_perimeter_3sigma)
print("周长在3sigma之外的数量：", len(li_area_3sigma))
'''

# fast


def fast_detect(image, scale, image_name):
    scale_percent = scale  # percent of original size
    width = int(image.shape[1] * scale_percent / 100)
    height = int(image.shape[0] * scale_percent / 100)
    dim = (width, height)
    # resize image
    image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
    # fast = cv2.FastFeatureDetector_create(nonmaxSuppression=False)
    fast = cv2.FastFeatureDetector_create(
        threshold=40,
        nonmaxSuppression=False,
        type=cv2.FAST_FEATURE_DETECTOR_TYPE_9_16)
    kp = fast.detect(image, None)
    img4 = cv2.drawKeypoints(image, kp, None, color=(255, 0, 0))
    cv2.imshow('origin', image)
    cv2.imshow('fast_result', img4)
    cv2.imwrite("fast-" + image_name + ".jpg", img4)
    cv2.waitKey()
    cv2.destroyAllWindows()


# filename = 'chessboard.png'
# image = cv2.imread(filename, 0)
# fast_detect(image, 20, "chessboard")
#
# filename = 'view.png'
# image = cv2.imread(filename, 0)
# fast_detect(image, 50, "view")

# sift


def sift_detect(image, scale, image_name):
    scale_percent = scale  # percent of original size
    width = int(image.shape[1] * scale_percent / 100)
    height = int(image.shape[0] * scale_percent / 100)
    dim = (width, height)
    # resize image
    image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
    # fast = cv2.FastFeatureDetector_create(nonmaxSuppression=False)
    sift = cv2.xfeatures2d.SIFT_create()
    keypoints, descriptor = sift.detectAndCompute(image, None)
    image_result = cv2.drawKeypoints(image=image,
                                     outImage=image,
                                     keypoints=keypoints,
                                     flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT,
                                     color=(255, 0, 0))
    cv2.imshow('origin', image)
    cv2.imshow('fast_result', image_result)
    cv2.imwrite("sift-" + image_name + ".jpg", image_result)
    cv2.waitKey()
    cv2.destroyAllWindows()


# filename = 'chessboard.png'
# image = cv2.imread(filename, 0)
# sift_detect(image, 20, "chessboard")
#
# filename = 'view.png'
# image = cv2.imread(filename, 0)
# sift_detect(image, 50, "view")


# orb


def orb_detect(image, scale, image_name):
    scale_percent = scale  # percent of original size
    width = int(image.shape[1] * scale_percent / 100)
    height = int(image.shape[0] * scale_percent / 100)
    dim = (width, height)
    # resize image
    image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
    orb = cv2.ORB_create()
    keypoints, descriptor = orb.detectAndCompute(image, None)
    image_result = cv2.drawKeypoints(image=image,
                                     outImage=image,
                                     keypoints=keypoints,
                                     flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT,
                                     color=(255, 0, 0))
    cv2.imshow('origin', image)
    cv2.imshow('fast_result', image_result)
    cv2.imwrite("orb-" + image_name + ".jpg", image_result)
    cv2.waitKey()
    cv2.destroyAllWindows()

# filename = 'chessboard.png'
# image = cv2.imread(filename, 0)
# orb_detect(image, 20, "chessboard")
#
# filename = 'view.png'
# image = cv2.imread(filename, 0)
# orb_detect(image, 50, "view")


# harris


def harris_detect(image, scale, image_name):
    scale_percent = scale  # percent of original size
    width = int(image.shape[1] * scale_percent / 100)
    height = int(image.shape[0] * scale_percent / 100)
    dim = (width, height)
    # resize image
    image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
    image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image_float = np.float32(image_gray)
    image_result = cv2.cornerHarris(image_float, 2, 3, 0.04)
    image_cp = cv2.cvtColor(image_gray, cv2.COLOR_GRAY2BGR)
    image_cp[image_result > 0.01 * image_result.max()] = [255, 0, 0]
    cv2.imshow('origin', image_cp)
    cv2.imshow('harris_result', image_result)
    cv2.imwrite("harris-" + image_name + ".jpg", image_result)
    cv2.waitKey()
    cv2.destroyAllWindows()


filename = 'chessboard.png'
image = cv2.imread(filename)
harris_detect(image, 20, "chessboard")

filename = 'view.png'
image = cv2.imread(filename)
harris_detect(image, 50, "view")
