#import cv2
#import numpy as np
#import sys
#
#img = cv2.imread('/dev/shm/mark/mark1.bmp')
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#gray = np.float32(gray)
#dst = cv2.cornerHarris(gray, 11, 17, 0.4 )
#
#qualityLevel = 0.01;  
#minDistance = 10;  
#blockSize = 3;  
#useHarrisDetector = false;  
#k = 0.04; 
#dst = cv2.goodFeaturesToTrack(gray)
#img[dst>0.01 * dst.max()] = [0, 0, 255] 
#while (True):
#    cv2.namedWindow('corners', cv2.WINDOW_NORMAL)
#    cv2.imshow('corners', img)
#    if cv2.waitKey(100) & 0xff == ord("q"):
#        break
#cv2.destroyAllWindows()

#!/usr/bin/env python
# -*- coding: utf-8 -*-

#"""
#作者：
#功能：跟踪温度高的区域。
#"""
#
import numpy as np
import cv2
import time

#cap=cv2.VideoCapture("my.mp4")

feasize=1
max=200
qua=0.05
mindis=7
blocksize=10
usehaar=True
k=0.04

paras=dict(maxCorners=200,
           qualityLevel=0.05,
           minDistance=7,
           blockSize=10,
           useHarrisDetector=True,
           k=0.04)

keypoints=list()
mask=None
marker=None


def getkpoints(imag,input1):
    mask1=np.zeros_like(input1)
    x=0
    y=0
    w1,h1=input1.shape
    #print 666
    #print input1.shape
    input1=input1[0:w1,200:h1]
    #print input1.shape
    try:
        w,h=imag.shape

        #w=w/2
        #h=h/2
        #print w,h
    except:
        return None

    mask1[y:y+h,x:x+w]=255

    keypoints=list()

    #kp=cv2.goodFeaturesToTrack(input1,
                                #mask1,
                                #**paras)
    #input1=input1.fromarray
    kp=cv2.goodFeaturesToTrack(input1, 12, 0.1, 150, blockSize = 31 , k=0.1 )                               
   
    #cv2.goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance)
    if kp is not None and len(kp)>0:
        for x,y in np.float32(kp).reshape(-1,2):
            keypoints.append((x,y))
    return keypoints


def process(image):
    grey1=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    grey=cv2.equalizeHist(grey1)
    keypoints=getkpoints(grey,grey1)
    #print keypoints

    #print image.shape
    if keypoints is not None and len(keypoints)>0:
        for x,y in keypoints:   
            cv2.circle(image, (int(x+200),y), 5, (0,0,255),3)
    return image


start = time.clock()
for x in range(1,8):
    name = 'H:/mark/mark'+str(x)+'.bmp'
    p=cv2.imread(name)
    p2=process(p)
    outName = 'H:/mark/'+str(x)+'.bmp'
    cv2.imwrite(outName,p2)

end = time.clock()
print("run time:",end-start )

##cv2.namedWindow('corners', cv2.WINDOW_NORMAL)
##cv2.imshow('corners',p2)
##
##cv2.waitKey(30000)
##cv2.destroyAllWindows()
#
##while (cap.isOpened()):
##    ret,frame=cap.read()
##    frame=process(frame)
##    cv2.imshow('frame',frame)
##    if cv2.waitKey(1)&0xFF==ord('q'):
##        break    
##    
##cv2.waitKey(0)
##cv2.destroyAllWindows()



#!/usr/bin/env python

#'''
#Texture flow direction estimation.
#Sample shows how cv2.cornerEigenValsAndVecs function can be used
#to estimate image texture flow direction.
#Usage:
#    texture_flow.py [<image>]
#'''
#
## Python 2/3 compatibility
#from __future__ import print_function
#
#import numpy as np
#import cv2
#
#if __name__ == '__main__':
#    import sys
#    fn = '/dev/shm/mark/mark1.bmp'
#
#    img = cv2.imread(fn)
#    if img is None:
#        print('Failed to load image file:', fn)
#        sys.exit(1)
#
#    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#    h, w = img.shape[:2]
#
#    eigen = cv2.cornerEigenValsAndVecs(gray, 15, 3)
#    eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
#    flow = eigen[:,:,2]
#
#    vis = img.copy()
#    vis[:] = (192 + np.uint32(vis)) / 2
#    d = 12
#    points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
#    for x, y in np.int32(points):
#        vx, vy = np.int32(flow[y, x]*d)
#        cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA)
#    #cv2.namedWindow('input', cv2.WINDOW_NORMAL)
#    #cv2.imshow('input', img)
#    cv2.namedWindow('flow', cv2.WINDOW_NORMAL)
#    cv2.imshow('flow', vis)
#cv2.waitKey(20000)
#cv2.destroyAllWindows()

#!/usr/bin/env python

##'''
##Coherence-enhancing filtering example
##=====================================
##inspired by
##  Joachim Weickert "Coherence-Enhancing Shock Filters"
##  http://www.mia.uni-saarland.de/Publications/weickert-dagm03.pdf
##'''
##
### Python 2/3 compatibility
##from __future__ import print_function
##import sys
##PY3 = sys.version_info[0] == 3
##
##if PY3:
##    xrange = range
##
##import numpy as np
##import cv2
##
##def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
##    h, w = img.shape[:2]
##
##    for i in xrange(iter_n):
##        print(i)
##
##        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
##        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
##        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
##        x, y = eigen[:,:,1,0], eigen[:,:,1,1]
##
##        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
##        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
##        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
##        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
##        m = gvv < 0
##
##        ero = cv2.erode(img, None)
##        dil = cv2.dilate(img, None)
##        img1 = ero
##        img1[m] = dil[m]
##        img = np.uint8(img*(1.0 - blend) + img1*blend)
##    print('done')
##    return img
##
##
##if __name__ == '__main__':
##    import sys
##    try:
##        fn = sys.argv[1]
##    except:
##        fn = '/dev/shm/baboon.jpg'
##
##    src = cv2.imread(fn)
##
##    def nothing(*argv):
##        pass
##
##    def update():
##        sigma = cv2.getTrackbarPos('sigma', 'control')*2+1
##        str_sigma = cv2.getTrackbarPos('str_sigma', 'control')*2+1
##        blend = cv2.getTrackbarPos('blend', 'control') / 10.0
##        print('sigma: %d  str_sigma: %d  blend_coef: %f' % (sigma, str_sigma, blend))
##        dst = coherence_filter(src, sigma=sigma, str_sigma = str_sigma, blend = blend)
##        cv2.namedWindow('dst', cv2.WINDOW_NORMAL)
##        cv2.imshow('dst', dst)
##
##    cv2.namedWindow('control', cv2.WINDOW_NORMAL)
##    cv2.createTrackbar('sigma', 'control', 9, 15, nothing)
##    cv2.createTrackbar('blend', 'control', 7, 10, nothing)
##    cv2.createTrackbar('str_sigma', 'control', 9, 15, nothing)
##
##
##    print('Press SPACE to update the image\n')
##
##    cv2.namedWindow('src', cv2.WINDOW_NORMAL)
##    cv2.imshow('src', src)
##    update()
##    while True:
##        ch = cv2.waitKey()
##        if ch == ord(' '):
##            update()
##        if ch == 27:
##            break
##cv2.destroyAllWindows()
