# coding=utf-8
##
## Author: jmdvirus@aliyun.com
##
## Create: 2018年08月03日 星期五 16时17分15秒
##

import cv2
import numpy as np
import os
from scipy import ndimage

class FaceDetection:
    def __init__(self):
        self.confpath = "D:/proj/apps/opencv-3.4.0/data"
        # self.confpath = "F:/jmd/opencv/sources/data"
        self.haarfile = "haarcascades/haarcascade_frontalface_alt2.xml"
        # self.haarfile = "haarcascades/haarcascade_upperbody.xml"
        self.detecthandle = cv2.CascadeClassifier(self.confpath + "/" + self.haarfile)

    def detect(self, img):
        grayimg = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        objs = self.detecthandle.detectMultiScale(grayimg, 1.1, 4, 0, (30,30))
        for rct in objs:
            cv2.rectangle(img, (rct[0], rct[1]), (rct[0] + rct[2], rct[1] + rct[3]),
                (0, 0, 255), 2)
        return img

    def detect_rects(self, img):
        grayimg = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        return self.detecthandle.detectMultiScale(grayimg, 1.1, 4, 0, (30,30))

class VConvolutionFilter(object):
    def __init__(self, kernel):
        self._kernel = kernel

    def apply(self, src, dst):
        cv2.filter2D(src, -1, self._kernel, dst)

class SharpenFilter(VConvolutionFilter):
    def __init__(self):
        kernel = np.array([[-1, -1, -1],
                              [-1,  9, -1],
                              [-1, -1, -1]])
        VConvolutionFilter.__init__(self, kernel)

class FindEdgesFilter(VConvolutionFilter):
    def __init__(self):
        kernel = np.array([[-1, -1, -1],
                           [-1,  8, -1],
                           [-1, -1, -1]])
        VConvolutionFilter.__init__(self, kernel)

class BlurFilter(VConvolutionFilter):
    def __init__(self):
        kernel = np.array([[0.04, 0.04, 0.04, 0.04, 0.04],
                           [0.04, 0.04, 0.04, 0.04, 0.04],
                           [0.04, 0.04, 0.04, 0.04, 0.04],
                           [0.04, 0.04, 0.04, 0.04, 0.04],
                           [0.04, 0.04, 0.04, 0.04, 0.04]])
        VConvolutionFilter.__init__(self, kernel)

class EmbossFilter(VConvolutionFilter):
    def __init__(self):
        kernel = np.array([[-2, -1, 0],
                           [-1,  1, 1],
                           [ 0,  1, 2]])
        VConvolutionFilter.__init__(self, kernel)

class AVBase:
    kernel_3x3 = np.array([[-1, -1, -1],
                           [-1, 8, -1],
                           [-1, -1, -1]])
    kernel_5x5 = np.array([[-1, -1, -1, -1, -1],
                           [-1, 1, 2, 1, -1],
                           [-1, 2, 4, 2, -1],
                           [-1, 1, 2, 1, -1],
                           [-1, -1, -1, -1, -1]])

    def numpy_array_image(self):
        randomByteArray = bytearray(os.urandom(120000))
        flatNumpyArray = np.array(randomByteArray)

        grayImage = flatNumpyArray.reshape(300, 400)

        bgrImage = flatNumpyArray.reshape(100, 400, 3)
        cv2.imshow('gray', grayImage)
        cv2.imshow('bgr', bgrImage)
        cv2.waitKey()

    def change_image_value(self, file):
        img = cv2.imread(file)
        img[:,:,2] = 0
        cv2.imwrite('ChangeValue.png', img)

    def hpf_image(self, file):
        img = cv2.imread(file, 0)
        k3 = ndimage.convolve(img, self.kernel_3x3)
        k5 = ndimage.convolve(img, self.kernel_5x5)

        blurred = cv2.GaussianBlur(img, (11, 11), 0)
        g_hpf = img - blurred

        cv2.imshow('origin', img)
        cv2.imshow('3x3', k3)
        cv2.imshow('5x5', k5)
        cv2.imshow('g_hpf', g_hpf)
        cv2.waitKey(0)

    def create_image(self, file):
        img = cv2.imread(file)
        print("shape: ", img.shape)
        imgc = np.zeros((300, 800, 3), np.uint8)
        imgc[100:200,600:800, 0] = 55
        cv2.imshow('x', imgc)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    def stoke_edges(self, src, dst, blurksize = 7, edgeksize = 5):
        if blurksize >= 3:
            blurredSrc = cv2.medianBlur(src, blurksize)
            graySrc = cv2.cvtColor(blurredSrc, cv2.COLOR_BGR2GRAY)
        else:
            graySrc = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)

        cv2.Laplacian(graySrc, cv2.CV_8U, graySrc, ksize = edgeksize)
        normalizedInverseAlpha = (1.0 / 255) * (255 - graySrc)
        channels = cv2.split(src)
        for channel in channels:
            channel[:] = channel * normalizedInverseAlpha
        cv2.merge(channels, dst)

    def conv_filter(self, file):
        img = cv2.imread(file)
        imgc = img.copy()
        #f = SharpenFilter()
        #f = FindEdgesFilter()
        #f = BlurFilter()
        #f = EmbossFilter()
        #f.apply(img, imgc)
        imgc = cv2.Canny(img, 50, 300)
        cv2.namedWindow('origin', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('origin', img)
        cv2.imshow('gen', imgc)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    def contour_image(self, img):
        ret, thresh = cv2.threshold(img, 127, 255, 0)
        image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                cv2.CHAIN_APPROX_SIMPLE)
        color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        img = cv2.drawContours(color, contours, -1, (0, 255, 0), 2)
        return img

    def contour(self, file):
        img = np.zeros((200, 200), dtype = np.uint8)
        img[50:150, 50:150] = 255

        ret, thresh = cv2.threshold(img, 127, 255, 0)
        image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                cv2.CHAIN_APPROX_SIMPLE)
        color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        img = cv2.drawContours(color, contours, -1, (0, 255, 0), 2)
        cv2.imshow('contours', color)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    def contour_new(self, file):
        img = cv2.pyrDown(cv2.imread(file, cv2.IMREAD_UNCHANGED))

        ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY),
                127, 255, cv2.THRESH_BINARY)
        image, contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)

        for c in contours:
            x, y, w, h = cv2.boundingRect(c)
            cv2.rectangle(img, (x,y), (x+w, y+h), (0, 255, 0), 2)

            rect = cv2.minAreaRect(c)
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            cv2.drawContours(img, [box], 0, (0, 0, 255), 3)

            (x,y), radius = cv2.minEnclosingCircle(c)
            center = (int(x), int(y))
            radius = int(radius)
            img = cv2.circle(img, center, radius, (0, 255, 0), 2)

        cv2.drawContours(img, contours, -1, (255, 0, 0), 1)
        cv2.imshow('x', img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    def line_detect_image(self, img):
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 50, 120)
        minLineLength = 20
        maxLineGap = 5
        lines = cv2.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength, maxLineGap)
        for x1, y1, x2, y2 in lines[0]:
            cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
        return img

    def line_detect(self, file):
        img = cv2.imread(file)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 50, 120)
        minLineLength = 20
        maxLineGap = 5
        lines = cv2.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength, maxLineGap)
        for x1, y1, x2, y2 in lines[0]:
            cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)

        cv2.imshow('x', edges)
        cv2.imshow('y', img)
        cv2.waitKey()
        cv2.destroyAllWindows()

    def circle_detect(self, file):
        origimg = cv2.imread(file)
        gray_img = cv2.cvtColor(origimg, cv2.COLOR_BGR2GRAY)
        img = cv2.medianBlur(gray_img, 5)
        cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

        circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 120,
                param1 = 100, param2 = 30, minRadius = 0, maxRadius = 0)

        circles = np.uint16(np.around(circles))
        
        for i in circles[0, :]:
            cv2.circle(origimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
            cv2.circle(origimg, (i[0], i[1]), 2, (0, 0, 255), 3)

        cv2.imshow('x', origimg)
        cv2.waitKey()
        cv2.destroyAllWindows()

    def do_sift(self, img):
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        sift = cv2.xfeatures2d.SIFT_create()
        keypoints, descriptor = sift.detectAndCompute(gray,None)
        img = cv2.drawKeypoints(image=img, outImage=img, keypoints = keypoints,
        flags = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINT, color = (51, 163, 236))
        return img

    def demo_for_affine(self, imagefile):
        """仿射变换"""
        image = cv2.imread(imagefile)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        h, w = image.shape[:2]
        # shrink 2 times
        A1 = np.array([[0.5, 0, 0], [0, 0.5, 0]], np.float32)
        d1 = cv2.warpAffine(image, A1, (w, h), borderValue = 125)

        # shrink 2 times, then move
        A2 = np.array([[0.1, 0.01, w/10], [0.04, 0.1, h/12]], np.float32)
        d2 = cv2.warpAffine(image, A2, (w,h), borderValue = 125)

        # rotate with corner base on d1
        A3 = cv2.getRotationMatrix2D((w/4.0, h/4.0), 30, 1)
        d3 = cv2.warpAffine(d2, A3, (w, h), borderValue = 125)

        # cv2.imshow("image", image)
        cv2.imshow("d1", d1)
        cv2.imshow("d2", d2)
        cv2.imshow("d3", d3)

        cv2.waitKey(0)
        cv2.destroyAllWindows()

