from matplotlib import pylab as plt
from skimage.io import imread
from skimage.color import rgb2gray
from skimage.feature import corner_harris,corner_subpix,corner_peaks
from skimage.transform import warp,SimilarityTransform,AffineTransform,resize
import cv2
import numpy as np
from skimage import data
from skimage.util import img_as_float
from skimage.exposure import rescale_intensity
from skimage.measure import ransac
from numpy import sqrt
from skimage.feature import blob_dog,blob_log,blob_doh
from skimage.feature import hog
from skimage import exposure
from skimage import transform as transform
from skimage.feature import (match_descriptors,corner_peaks,corner_harris,plot_matches, BRIEF)
from skimage.feature import (match_descriptors,ORB,plot_matches)
from skimage.feature import haar_like_feature_coord
from skimage.feature import draw_haar_like_feature



#7.2.1 scikit-image包
#scikit-image特征模块的 corner_harris()函数的哈里斯角点检测器检测图像中的角点
image =imread ('football.png')
image_gray=rgb2gray(image)
coordinates=cv2.corner_harris(image_gray,k =0.001)
image[coordinates>0.01*coordinates.max()]=[255,0,0,255]
plt.figure(figsize=(20,10))
plt.imshow(image)
plt.axis ('off')
plt.show()


#子像素准确率
#有时候我们需要寻找最精确的角点，使用scikit-image特征模块中的corner_subpix()函数，可以将子像素的准确率对检测到的角点进行细化。
#先使用corner_peaks()计算哈里斯角点，然后使用corner_subpix()函数计算角点的子像素位置，
#后者使用统计检验来决定是否接受或拒绝先前使用corner_peaks()函数计算的角点
#为此需要定义函数来搜索角点的领域（窗口）的大小
image=imread('pyramids2.jpg')
image_gray=rgb2gray(image)
coordinates=corner_harris (image_gray,k =0.001)
coordinates[coordinates>0.03*coordinates.max()]=255 
corner_coordinates=corner_peaks(coordinates)
coordinates_subpix=corner_subpix(image_gray,corner_coordinates,window_size=11)
plt.figure(figsize=(20,20))
plt.subplot (211)
plt.imshow(coordinates,cmap='inferno')
plt.plot(coordinates_subpix [:,1],coordinates_subpix[:,0],'r.',markersize=5,label='subpixel')
plt.legend(prop={'size':20})
plt.axis ('off')
plt.subplot (212)
plt.imshow(image,interpolation='nearest')
plt.plot(corner_coordinates[:,1],corner_coordinates[:,0],'bo',markersize=5)
plt.plot(coordinates_subpix[:,1],coordinates_subpix[:,0],'r+',markersize=10)
plt.axis('off')
plt.tighe_layout()
plt.show()



#7.2.2哈里斯角点特征在图像匹配中的应用
#一旦检测到图像中的感兴趣点，最好知道如何跨越相同对象的不同图像来匹配这些点
#匹配两个这样的图像的一般方法。
'''
1.计算感兴趣的点（例如，使用哈里斯角点检测器的角点
2.考虑每个关键点周围的区域（窗口）
3.从该区域为每幅图像、每个关键点计算一个局部特征描述符并对其规范化
4.匹配在两幅图像中计算的局部描述符
'''

#基于RANSAC算法和哈里斯角点特征的鲁棒图匹配
#使用仿射图像变换后的版本与原始图像进行匹配，可以将它们视为是不用角度拍摄的
'''
1.首先计算两幅图的感兴趣点或哈里斯角点
2.考虑点周围的小空间。然后使用误差平方加权和计算点之间的对应点。这种度量不是很健壮，而且只要稍稍改变视角时可用
3.一旦找到对应点，就会得到一组源坐标和对应的目标坐标，它们用于估计两幅图像之间的几何变换
4.用于坐标简单地估计参数是不够的，许多对应关系可能是错误的
5.采用随机抽样一致性（RANSAC）算法对参数进行鲁棒估计，先将点分类为内点和外点，然后在忽略外点的情况下将模型拟合到内点上，以寻找与仿射变换一致的匹配。
'''
temple=rgb2gray(img_as_float(imread ('temple.jpg')))
image_original=np.zeros(list (temple.shape)+[3])
image_original[...,0]=temple
gradient_row,gradient_col =(np.mgrid[0:image_original.shape[0],0:image_original.shape[1]]/float(image_original.shape[0]))
image_original[...,1]=gradient_row
image_original[...,2]=gradient_col
image_original=rescale_intensity(image_original)
image_original_gray =rgb2gray(image_original)
affine_trans=AffineTransform(scale=(0.8,0.9),rotation=0.1,translation=(120,-20))
image_warped=warp(image_original,affine_trans .inverse,output_shape=image_original.shape)
image_warped_gray=rgb2gray (image_warped)


coordinates=corner_harris(image_original_gray)
coordinates[coordinates >0.01*coordinates.max()]=1
coordinates_original=corner_peaks(coordinates,threshold_rel=0.0001,min_distance=5)
coordinates=corner_harris(image_warped_gray)
coordinates[coordinates>0.01*coordinates.max()]=1
coordinates_warped=corner_peaks(coordinates,threshold_rel=0.0001,min_distance=5)


coordinates_original_subpix=corner_subpix(image_original_gray,coordinates_original,window_size=9)
coordinates_warped_subpix=corner_subpix(image_warped_gray,coordinates_warped,window_size=9)
def gaussian_weights (window_ext,sigma=1):
    y,x=np.mgrid[-window_ext:window_ext+1,-window_ext:window_ext+1]
    g_w=np.zeros(y.shape,dtype=np.double)
    g_w[:]=np.exp(-0.5*(x**2/sigma**2+y**2/sigma**2))
    g_w/=2*np.pi*sigma*sigma
    return g_w


def match_corner(coordinates,window_ext=3):
    row,col=np.round(coordinates).astype(np.intp)
    window_original=image_original[row-window_ext:row+window_ext+1,col-window_ext:col+window_ext+1,:]
    weights=gaussian_weights(window_ext,3)
    weights=np.dstack((weights,weights,weights))
    SSDs =[]
    for coord_row,coord_col in coordinates_warped:
        window_warped=image_warped[coord_row-window_ext:coord_row+window_ext+1,coord_col-window_ext:coord_col+window_ext+1,:]
        if window_original.shape==window_warped.shape:
            SSD=np.sum(weights*(window_original -window_warped)**2)
            SSDs.append(SSD)
    min_idx=np.argmin(SSDs) if len(SSDs)>0 else -1
    return coordinates_warped_subpix[min_idx] if min_idx >=0 else [None]


source,destination=[],[]
for coordinates in coordinates_original_subpix:
    coordinates1=match_corner(coordinates)
    if any(coordinates1) and len(coordinates1)>0 and not all(np.isnan(coordinates1)):
        source.append (coordinates)
        destination.append(coordinates1)
    source=np.array (source)
    destination=np.array(destination)


model=AffineTransform()
model.estimate(source,destination)


model_robust,inliers=ransac((source,destination),AffineTransform,min_samples=3,residual_threshold=2,max_trials=100)
outliers=inliers==False


print(affine_trans.scale,affine_trans.translation,affine_trans.rotation)
print(model.scale,model.translation,model.rotation)
print(model_robust.scale,model_robust.translation,model_robust.rotation)


fig,axes=plt.subplots(nrows=2,ncols=1,figsize=(20,15))
plt.gray ()
inlier_idxs=np.nonzero(inliers)[0]
plot_matches(axes [0],image_original_gray,image_warped_gray,source,destination,np.column_stack((inlier_idxs,inlier_idxs)),matches_color='b')
axes[0].axis ('off'),axes [0].set_title('Correct correspondences',size=20)
outlier_idxs=np.nonzero(outliers)[0]
plot_matches(axes [1],image_original_gray,image_warped_gray,source,destination,np.column_stack((outlier_idxs,outlier_idxs)),matches_color='row')
axes[1].axis('off'),axes[1].set_title('Faulty correspondences',size=20)
fig.tight_layout()
plt.show()



#7.3.3
im=imread('butterfly.png')
im_gray=rgb2gray(im)
log_blobs=blob_log(im_gray,max_sigma=30,num_sigma=10,threshold=0.1)
log_blobs[:,2]=sqrt(2)*log_blobs[:,2]
dog_blobs=blob_dog(im_gray,max_sigma=30,threshold=0.1)
dog_blobs[:,2]=sqrt(2)*dog_blobs[:,2]
doh_blobs=blob_doh(im_gray,max_sigma=30,threshold=0.005)
list_blobs=[log_blobs,dog_blobs,doh_blobs]
color,titles=['yellow','lime','red'],['Laplacian of Gaussian','Difference of Gaussian','Determinant of Hessian']
sequence=zip(list_blobs,colors,titles)
fig,axes=plt.subplots (2,2,figsize=(20,20),sharex=True,sharey=True)
axes=axes.ravel()
axes[0].imshow(im,interpolation='nearest')
axes[0].set_title('original image',size=30),axes[0].set_axis_off ()
for idx,(blobs,color,title) in enumerate(sequence):
    axes[idx+1].imshow(im,interpolation='nearest')
    axes[idx+1].set_title('Blobs with'+title,size=30)
    for blob in blobs:
        y,x,row=blob
        col=plt.Circle((x,y),row,color=color,linewidth=2,fill=False)
        axes[idx+1].add_patch(col),axes[idx+1].set_axis_off()
plt.tight_layout()
plt.show()



#7.4.2
image=rgb2gray(imread ('cameraman.jpg'))
fd,hog_image =hog(image,orientations=8,pixels_per_cell=(16,16),cells_per_block=(1,1),visualize=True)
print(image.shape,len(fd))
fig,(axes1,axes2)=plt.subplots(1,2,figsize=(15,10),sharex=True,sharey=True)
axes1.axis('off'),axes1.imshow(image,cmap=plt.cm.gray)
axes1.set_title('Input image')


hog_image_rescaled=exposure.rescale_intensity(hog_image,in_range=(0,10))
axes2.axis('off'),axes2.imshow(hog_image_rescaled,cmap=plt.cm.gray)
plt.show()



#7.5.2
print(cv2.version_)
img=cv2.imread('monalisa.jpg')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
sift=cv2.xfeatures2d.SIFT_create()
kp=sift.detect(gray,None)
img=cv2.drawKeypoints(img,kp,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow("Image",img);
cv2.imwrite('me5_keypoints.jpg',img)
kp,des=sift.detectAndCompute(gray,None)



# 7.5.3
img1=rgb2gray(imread ('lena.jpg'))
affine_trans=transform.AffineTransform(scale=(1.2,1.2),translation=(0,-100))
img2=transform.warp(img1,affine_trans)
img3=transform.rotate(img1,25)
coords1,coords2,coords3=corner_harris=(img1),corner_harris(img2),corner_harris(img3)
coords1[coords1>0.01*coords1.max()]=1
coords2[coords2>0.01*coords2.max()]=1
coords3[coords3>0.01*coords3.max()]=1
keypoints1=corner_peaks(coords1,min_distance=5)
keypoints2=corner_peaks(coords2,min_distance=5)
keypoints3=corner_peaks(coords3,min_distance=5)
extractor=BRIEF()
extractor.extract (img1,keypoints1)
keypoints1,descriptors1=keypointsl[extractor.mask],extractor.descriptors
extractor.extract(img2,keypoints2)
keypoints2,descriptors2=keypoints2 [extractor.mask],extractor.descriptors
extractor.extract (img3,keypoints3)
keypoints3,descriptors3=keypoints3[extractor.mask],extractor.descriptors
matches12=match_descriptors(descriptors1,descriptors2,cross_check=True)
matches13=match_descriptors(descriptors1,descriptors3,cross_check=True)
fig,axes=plt.subplots(nrows=2,ncols=1,figsize=(20,20))
plt.gray(),plot_matches (axes[0],img1,img2,keypoints1,keypoints2,matches12)
axes[0].axis('off'),axes[0].set_title("Original Image vs.Transformed Image")
plot_matches(axes[1],img1,img3,keypoints1,keypoints3,matches13)
axes [1].axis('off'),axes[1].set_title("Original Image vs.Transformed Image")
plt.show()


img1=rgb2gray(imread('me5.jpg'))
img2=transform.rotate(img1,180)
affine_trans=transform.AffineTransform(scale=(1.3,1.1),rotation=0.5,translation=(0,-200))
img3=transform.warp(img1,affine_trans)
img4=transform.resize(rgb2gray(imread('me6.jpg')),img1.shape,anti_aliasing=True)
descriptor_extractor=ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1,descriptors1=descriptor_extractor.keypoints,descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2,descriptors2=descriptor_extractor.keypoints,descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img3)
keypoints3,descriptors3=descriptor_extractor.keypoints,descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img4)
keypoints4,descriptors4=descriptor_extractor.keypoints,descriptor_extractor.descriptors
matches12=match_descriptors(descriptors1,descriptors2,cross_check=True)
matches13=match_descriptors(descriptors1,descriptors3,cross_check=True)
matches14=match_descriptors(descriptors1,descriptors4,cross_check=True)
fig,axes=plt.subplots(nrows=3,ncols=1,figsize=(20,25))
plt.gray()
plot_matches(axes[0],img1,img2,keypoints1,keypoints2,matches12)
axes[0].axis('off'),axes[0].set_title("Original Image vs.Transformed Image",size=20)
plot_matches(axes[1],img1,img3,keypoints1,keypoints3,matches13)
axes[1].axis('off'),axes[1].set_title("Original Image vs.Transformed Image",size=20)
plot_matches(axes[2],img1,img4,keypoints1,keypoints4,matches14)
axes[2].axis('off'),axes[2].set_title("Image1 vs. Image2",size=20)
plt.show()


img1=cv2.imread ('books.png',0)
img2=cv2.imread ('/book.png',0)
orb=cv2.ORB_create()
kp1,des1=orb.detectAndCompute(img1,None)
kp2,des2=orb.detectAndCompute(img2,None)
bf=cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True)
matches=bf.match(des1,des2)
matches=sorted(matches,key=lambda x:x.distance)
img3=cv2.drawMatches(img1,kp1,img2,kp2,matches[:20],None,flags=2)
plt.figure(sigsize=(20,10))
plt.imshow(img3)
plt.show()



print(cv2.__version__)
img1=cv2.imread('books.png',0)
img2=cv2.imread('book.png',0)
sift=cv2.xfeatures2d.SIFT_create()
kp1,des1=sift.detectAndCompute(img1,None)
kp2,des2=sift.detectAndCompute(img2,None)
bf=cv2.BFMatcher()
matches=bf.knnMatch(des1,des2,k=2)
good_matches=[]
for m1,m2 in matches:
    if m1.distance<0.75*m2.distance:
        good_matches.append([m1])
img3=cv2.drawMatchesKnn(img1,kp1,img2,kp2,good_matches,None,flags=2)
plt.imshow(img3)
plt.show()



# 7.6.1
images=[np.zeros((2,2)),np.zeros((2,2)),np.zeros((3,3)),np.zeros((3,3)),np.zeros((2,2))]
feature_types=['type-2-x','type-2-y','type-3-x','type-3-y','type-4']
fig,axes=plt.subplots(3,2,figsize=(5,7))
for axes,img,feat_t in zip(np.ravel(axes),images,feature_types):
    coordinates,_=haar_like_feature_coord(img.shape[0],img.shape[1],feat_t)
    haar_feature=draw_haar_like_feature(img,0,0,
img.shape [0],img.shape [1],coordinates,max_n_features=1,random_state=0,
color_positive_block=(1.0,0.0,0.0),color_negative_block=(0.0,0.0,1.0),alpha=0.8)
    axes.imshow(haar_feature),axes.set_title(feat_t),axes.set_axis_off ()
plt.axis ('off')
plt.tight_layout()
plt.show()



# 7.6.2
opencv_haar_path ='./#'
face_cascade=cv2.Cascadeclassifier(opencv_haar_path+'haarcascade_frontalface_default.xml')
eye_cascade=cv2.CascadeClassifier(opencv_haar_path +'haarcascade_eye.xml')
img=cv2.imread('lena.jpg')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale (gray,1.2,5)
print (len (faces))
for (x,y,w,h) in faces:
    img=cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
    roi_gray=gray[y:y+h,x:x+w]
    roi_color=img[y:y+h,x:x+w]
    eyes=eye_cascade.detectMultiScale(roi_gray)
    print(eyes)
    for (ex,ey,ew,eh) in eyes:
        cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
        cv2.imwrite('',img)