'''
opencv api
'''

import cv2
import numpy
import time

# 打印版本号
def print_cv_version():
    print(cv2.__version__)

# 图像按通道分离
def split_bgr_image()->None:
    bgr = cv2.imread("lena.png")
    cv2.imshow("bgr", bgr)
    
    # 使用列表访问形式实现通道分离
    b = bgr[:,:,0]
    cv2.imshow("b", b)
    # cv2.imwrite("lena_blue.png", b)
    
    g = bgr[:,:,1]
    cv2.imshow("g", g)
    # cv2.imwrite("lena_green.png", g)
    
    r = bgr[:,:,2]
    cv2.imshow("r", r)
    # cv2.imwrite("lena_red.png", r)
    
    # 使用内部函数实现通道分离
    b1, g1, r1 = cv2.split(bgr)
    cv2.imshow("b1", b1)
    cv2.imshow("g1", g1)
    cv2.imshow("r1", r1)
    
    cv2.waitKey()
    cv2.destroyAllWindows()
    
# 图像按通道分离并合并
def split_bgr_image_and_merge()->None:
    bgr = cv2.imread("lena.png")
    cv2.imshow("bgr", bgr)
    
    # 使用内部函数实现通道分离
    b1, g1, r1 = cv2.split(bgr)
    cv2.imshow("b1", b1)
    cv2.imshow("g1", g1)
    cv2.imshow("r1", r1)
    
    merged = cv2.merge([b1, g1, r1])
    cv2.imshow("merged", merged)
    
    cv2.waitKey()
    cv2.destroyAllWindows()
    
# 图像缩放
def img_resize_test1():
    origin = cv2.imread('images/lena.png')
    if origin is None:
        return
    
    cv2.imshow('origin', origin)
    cv2.waitKey()
    
    dest = cv2.resize(origin, (40, 40))
    cv2.imshow('dest', dest)
    cv2.waitKey()

'''
图像旋转
'''
def img_rotate_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    src_h, src_w = src.shape
    
    # 获取二维旋转矩阵
    mat = cv2.getRotationMatrix2D((src_h / 2, src_w / 2), 45.0, 1.0)
    print(f"mat={mat}")
    
    '''
    执行仿射变换，常见的仿射变换有：平移、缩放、旋转、倾斜(裁切)
    '''
    dest = cv2.warpAffine(src=src, M=mat, dsize=(src_h, src_w))
    cv2.imshow('dest', dest)
    cv2.waitKey()

'''
图像翻转
'''
def img_flip_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return

    dest0 = cv2.flip(src=src, flipCode=0) #0表示上下翻转

    dest1 = cv2.flip(src=src, flipCode=1) #1表示左右翻转

    cv2.imshow('src', src)
    cv2.imshow('dest0', dest0)
    cv2.imshow('dest1', dest1)

    cv2.waitKey()

'''
图像阈值处理
阈值处理分类：简单阈值处理、自适应阈值处理、otsu二值化
'''

'''
简单阈值处理
'''
def img_threshold_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    '''
    if x > 128:x=255
    else:x=0
    '''
    threshold, dest1 = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_BINARY)

    '''
    if x > 128:x=0
    else:x=255
    '''
    threshold, dest2 = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_BINARY_INV)

    '''
    if x > 128:x=128
    else:x=x
    '''
    threshold, dest3 = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_TRUNC)

    '''
    if x > 128:x=x
    else:x=0
    '''
    threshold, dest4 = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_TOZERO)

    '''
    if x > 128:x=0
    else:x=x
    '''
    threshold, dest5 = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_TOZERO_INV)
    
    cv2.imshow('src', src)
    cv2.imshow('dest1', dest1)
    cv2.imshow('dest2', dest2)
    cv2.imshow('dest3', dest3)
    cv2.imshow('dest4', dest4)
    cv2.imshow('dest5', dest5)

    cv2.waitKey()

'''
自适应阈值处理
'''
def img_threshold_test2():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    '''
    邻域平均采样值+c作为阈值，进行二值化
    if x > mean(block_size) + c:x=255:
    else:x=0
    '''
    dest1 = cv2.adaptiveThreshold(src=src, maxValue=255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=5, C=0)


    '''
    邻域高斯采样值+c作为阈值，进行二值化
    if x > mean(block_size) + c:x=255:
    else:x=0
    '''
    dest2 = cv2.adaptiveThreshold(src=src, maxValue=255, adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=5, C=0)

    cv2.imshow('src', src)
    cv2.imshow('dest1', dest1)
    cv2.imshow('dest2', dest2)
    cv2.waitKey()

'''
otsu阈值处理
'''
def img_threshold_test3():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    threshold, dest1 = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    cv2.imshow('src', src)
    cv2.imshow('dest1', dest1)
    cv2.waitKey()


'''
图像滤波(一般应用于灰度图像)
常用滤波方法：均值滤波、高斯滤波、中值滤波、双边滤波
'''

'''
均值滤波
'''
def img_blur_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    dest1 = cv2.blur(src=src, ksize=(5, 5))
    
    cv2.imshow('src', src)
    cv2.imshow('dest1', dest1)
    cv2.waitKey()

'''
高斯滤波
'''
def img_gaussian_blur_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    dest1 = cv2.GaussianBlur(src=src, ksize=(5, 5), sigmaX=0, sigmaY=0)

    cv2.imshow('src', src)
    cv2.imshow('dest1', dest1)
    cv2.waitKey()

'''
中值滤波
'''
def img_median_blur_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    dest1 = cv2.medianBlur(src=src, ksize=5)

    cv2.imshow('src', src)
    cv2.imshow('dest1', dest1)
    cv2.waitKey()


'''
双边滤波
原理：？？？
'''
def img_bilateral_filter_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    dest1 = cv2.bilateralFilter(src=src, d=9, sigmaColor=75, sigmaSpace=75)

    cv2.imshow('src', src)
    cv2.imshow('dest1', dest1)
    cv2.waitKey()

'''
图像形态学操作(一般应用于二值图像)
常用的图像形态学操作：腐蚀、膨胀、开运算、闭运算、形态学梯度、顶帽运算、黑帽运算
'''

'''
腐蚀
原理：结构元和被操作区域完全一致则设置锚点为白，否则为黑
'''
def img_erode_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    threshold, binary = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_BINARY)

    kernel_mat = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(3, 3))

    dest = cv2.erode(src=binary, kernel=kernel_mat)

    cv2.imshow('src', src)
    cv2.imshow('binary', binary)
    cv2.imshow('dest', dest)
    cv2.waitKey()

'''
膨胀
原理：结构元和被操作区域有交集则设置锚点为白，否则为黑
'''
def img_dilate_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    threshold, binary = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_BINARY)

    kernel_mat = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(3, 3))

    dest = cv2.dilate(src=binary, kernel=kernel_mat)

    cv2.imshow('src', src)
    cv2.imshow('binary', binary)
    cv2.imshow('dest', dest)
    cv2.waitKey()

'''
开运算
原理：先腐蚀后膨胀
作用：去除小物体，平滑物体边界
'''
def img_morphology_open_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    threshold, binary = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_BINARY)

    kernel_mat = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(3, 3))

    dest = cv2.morphologyEx(src=binary, op=cv2.MORPH_OPEN, kernel=kernel_mat)

    cv2.imshow('src', src)
    cv2.imshow('binary', binary)
    cv2.imshow('dest', dest)
    cv2.waitKey()

'''
闭运算
原理：先膨胀再腐蚀
作用：填充小孔洞
'''
def img_morphology_close_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    threshold, binary = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_BINARY)

    kernel_mat = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(3, 3))

    dest = cv2.morphologyEx(src=binary, op=cv2.MORPH_CLOSE, kernel=kernel_mat)

    cv2.imshow('src', src)
    cv2.imshow('binary', binary)
    cv2.imshow('dest', dest)
    cv2.waitKey()

'''
形态学梯度
原理：膨胀图像减去腐蚀图像
作用：提取物体边缘
'''
def img_morphology_grad_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    threshold, binary = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_BINARY)

    kernel_mat = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(3, 3))

    dest = cv2.morphologyEx(src=binary, op=cv2.MORPH_GRADIENT, kernel=kernel_mat)

    cv2.imshow('src', src)
    cv2.imshow('binary', binary)
    cv2.imshow('dest', dest)
    cv2.waitKey()

'''
顶帽操作
原理：原图减去开运算结果
作用：提取比背景亮的微小物体
'''
def img_morphology_tophat():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return

    kernel_mat = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(3, 3))

    dest = cv2.morphologyEx(src=src, op=cv2.MORPH_TOPHAT, kernel=kernel_mat)

    cv2.imshow('src', src)
    cv2.imshow('dest', dest)
    cv2.waitKey()

'''
黑帽操作
原理：闭运算结果减去原图
作用：提取比背景暗的微小物体
'''
def img_morphology_blackhat_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return

    kernel_mat = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(3, 3))

    dest = cv2.morphologyEx(src=src, op=cv2.MORPH_BLACKHAT, kernel=kernel_mat)

    cv2.imshow('src', src)
    cv2.imshow('dest', dest)
    cv2.waitKey()

'''
图像边缘检测
常用函数：canny;sobel;scharr;laplacian
'''

'''
canny边缘提取
多阶段算法，检测效果好，对噪声抑制效果好
通用边缘检测，适合大多数场景
步骤：
    1.高斯滤波
    2.使用sobel算子计算图像的梯度幅值和方向
    3.非极大值抑制(NMS)：沿着梯度方向，保留局部梯度最大的像素点，抑制其他像素点
    4.双阈值检测：使用两个阈值(低阈值和高阈值)来确定真正的边缘。高于高阈值的像素点认为是强边缘；低于低阈值的像素点被抑制；介于两个阈值之间的像素点若和强边缘相连则保留
    5.边缘连接
'''
def canny_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    dest = cv2.Canny(image=src, threshold1=0, threshold2=255)
    
    cv2.imshow('src', src)
    cv2.imshow('dest', dest)
    cv2.waitKey()
    
'''
sobel算子
基于一阶导数
可检测竖直边缘和水平边缘
水平方向的算子：
    [-1,0,1]
    [-2,0,2]
    [-1,0,1]
竖直方向的算子：
    [-1,-2,-1]
    [0,0,0]
    [1,2,1]
'''
def sobel_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    '''
    ddepth:原图的深度
    dx:x方向的导数阶数
    dy:y方向的导数阶数
    '''
    dest_x = cv2.Sobel(src=src, ddepth=cv2.CV_8UC1, dx=1, dy=0)
    dest_y = cv2.Sobel(src=src, ddepth=cv2.CV_8UC1, dx=0, dy=1)
    dest_xy = cv2.Sobel(src=src, ddepth=cv2.CV_8UC1, dx=1, dy=1)
    
    cv2.imshow('src', src)
    cv2.imshow('dest_x', dest_x)
    cv2.imshow('dest_y', dest_y)
    cv2.imshow('dest_xy', dest_xy)
    cv2.waitKey()
    
'''
scharr
sobel的改进版本，对边缘的响应更强
检测细微的边缘
'''
def scharr_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    dest_x = cv2.Scharr(src=src, ddepth=cv2.CV_8UC1, dx=1, dy=0)
    dest_y = cv2.Scharr(src=src, ddepth=cv2.CV_8UC1, dx=0, dy=1)
    
    cv2.imshow('src', src)
    cv2.imshow('dest_x', dest_x)
    cv2.imshow('dest_y', dest_y)
    cv2.waitKey()
    
'''
laplacian
基于二阶导数的边缘检测，对噪声敏感
作用：检测边缘和角点
'''
def laplacian_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    dest = cv2.Laplacian(src=src, ddepth=cv2.CV_8UC1)
    
    cv2.imshow('src', src)
    cv2.imshow('dest', dest)
    cv2.waitKey()
    
'''
轮廓检测
findContours 查找图像中的轮廓
drawContours 在图像上绘制轮廓
contourArea 计算轮廓的面积
arcLength 计算轮廓的周长或弧长
boundingRect 计算轮廓的边界矩形
minAreaRect 计算轮廓的最小外接矩形
minEnclosingCircle 计算轮廓的最小外接圆
approxPolyDP 对轮廓进行多边形近似
'''
def img_contours_test1():
    src = cv2.imread('images/lena.png', cv2.IMREAD_GRAYSCALE)
    if src is None:
        return
    
    threshold, binary = cv2.threshold(src=src, thresh=128, maxval=255, type=cv2.THRESH_BINARY)
    
    '''
    mode:
        RETR_EXTERNAL：只检测最外层轮廓
        RETR_LIST：检测所有轮廓，但不建立层次关系
        RETR_TREE：检测所有轮廓，建立层次关系
    method:
        CHAIN_APPROX_NONE:存储所有的轮廓点
        CHAIN_APPROX_SIMPLE:
    '''
    contours, hierarchy = cv2.findContours(image=binary, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
    print(f"contours={contours}")
    
    drawn = cv2.drawContours(image=src, contours=contours, contourIdx=-1, color=(0, 0, 0))
    
    cv2.imshow("src", src)
    cv2.imshow("binary", binary)
    cv2.imshow("drawn", drawn)
    cv2.waitKey()
    
'''
图像直方图
计算直方图 calcHist
直方图均衡化 equalizeHist
直方图比较 compareHist
'''
def img_hist_test():
    url = "rtsp://admin:new3sip123@192.168.88.228:554/cam/realmonitor?channel=1&subtype=0&unicast=true&proto=Onvif"
    
    video_capturer = cv2.VideoCapture(url)
    
    ret = video_capturer.open(url)
    if not ret:
        print(f"open {url} error")
        return
    
    while True:
        ret, src = video_capturer.read()
        if not ret:
            break
        
        gray = cv2.cvtColor(src=src, code=cv2.COLOR_BGR2GRAY)
        
        start = time.time_ns()
        cv2.calcHist(images=[gray], channels=[0], mask=None, histSize=[256], ranges=[0,255])
        end = time.time_ns()
        print(f"calcHist use {end - start} ns")
        
        cv2.imshow("gray", gray)
        cv2.waitKey(40)

'''
视频目标跟踪
opencv目标跟踪算法：MeanShift、CamShift
'''

'''
MeanShift算法原理：
    通过迭代计算目标区域的质心，并将窗口中心移动到质心，从而实现目标的跟踪。

基本步骤：
    1.手动或自动选取图像里的某个目标区域，作为初始窗口
    2.在当前窗口中，计算目标的质心
    3.将窗口中心移动到质心
    4.迭代。重复2、3步骤，直至 窗口中心稳定落在质心 或 达到最大迭代次数

优缺点：
    优点1.简单易实现，效率高
    优点2.对目标的形状和大小变化不敏感
    缺点1.对目标的快速移动和遮挡，处理差
    缺点2.窗口大小固定，无法适应目标的大小变化
'''
def MeanShift_test1():
    video_capturer = cv2.VideoCapture(0)
    
    ret = video_capturer.open(0)
    if not ret:
        print(f"open {0} error")
        return
    
    ret, src = video_capturer.read()
    if not ret:
        return
    
    # 初始窗口
    x = 100
    y = 100
    w = 100
    h = 100
    track_window = (x, y, w, h)

    # 通过窗口截取roi
    roi = src[y:y+h, x:x+w]
    cv2.imshow("roi", roi)

    # roi区域图像转hsv色彩空间
    hsv_roi = cv2.cvtColor(src=roi, code=cv2.COLOR_BGR2HSV)

    # 创建掩膜并计算直方图
    mask = cv2.inRange(hsv_roi, numpy.array((0., 60., 32.)), numpy.array((180., 255., 255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    # 设置终止条件
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
    
    while True:
        ret, src = video_capturer.read()
        if not ret:
            break

        cv2.imshow("src", src)

        hsv = cv2.cvtColor(src=src, code=cv2.COLOR_BGR2HSV)

        # 计算反向投影
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

        # 应用 MeanShift 算法
        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        # 绘制跟踪结果
        x, y, w, h = track_window
        mean_shift = cv2.rectangle(src, (x, y), (x+w, y+h), 255, 2)

        cv2.imshow("mean_shift", mean_shift)
        cv2.waitKey(40)

'''
CamShift
MeanShfit的改进版本，通过自适应调整窗口大小更好地跟踪目标。
在MeanShfit的基础上加了窗口大小和方向的调整，使得能适应目标的尺寸变化和旋转变化。
基本步骤：
    1.初始化一个窗口，选定目标区域
    2.在当前窗口中计算目标质心
    3.移动窗口，将窗口中心移动到目标质心
    4.根据目标的尺寸角度调整窗口
    5.迭代。直到 窗口中心稳定落在目标质心 或 达到迭代次数
'''
def CamShift_test1():
    video_capturer = cv2.VideoCapture(0)
    
    ret = video_capturer.open(0)
    if not ret:
        print(f"open {0} error")
        return
    
    ret, src = video_capturer.read()
    if not ret:
        return
    
    # 初始窗口
    x = 100
    y = 100
    w = 100
    h = 100
    track_window = (x, y, w, h)

    # 通过窗口截取roi
    roi = src[y:y+h, x:x+w]
    cv2.imshow("roi", roi)

    # roi区域图像转hsv色彩空间
    hsv_roi = cv2.cvtColor(src=roi, code=cv2.COLOR_BGR2HSV)

    # 创建掩膜并计算直方图
    mask = cv2.inRange(hsv_roi, numpy.array((0., 60., 32.)), numpy.array((180., 255., 255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    # 设置终止条件
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
    
    while True:
        ret, src = video_capturer.read()
        if not ret:
            break

        cv2.imshow("src", src)

        hsv = cv2.cvtColor(src=src, code=cv2.COLOR_BGR2HSV)

        # 计算反向投影
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

        # 应用 CamShift 算法
        ret, track_window = cv2.CamShift(dst, track_window, term_crit)

        # 绘制跟踪结果
        pts = cv2.boxPoints(ret)
        pts = numpy.int0(pts)
        cam_shfit = cv2.polylines(src, [pts], True, 255, 2)

        cv2.imshow('cam_shfit', cam_shfit)
        cv2.waitKey(40)

'''
视频背景减除，即提取前景对象
主要用于检测视频中的运动对象
基本流程：
    1.背景建模。分析视频序列中的多帧图像，建立一个背景模型
    2.前景检测。将当前帧与背景模型进行比较，找出与背景差异较大的区域，视为前景
    3.背景更新。随着时间推移，不断更新背景模型。
opencv常用方法：MOG、MOG2
'''

'''
基于高斯混合模型的背景减除法。
核心思想：使用多个高斯分布来建模背景中的像素值。
每个像素的值看做一个随机变量，其分布为多个高斯分布组合。
MOG能够处理背景中的复杂变化，如光照变化，阴影等。

算法步骤：
    1.初始化。为每个像素初始化多个高斯分布
    2.模型更新。对于每一个像素，更新高斯分布参数(均值、方差、权重)
    3.前景检测。将当前图像的像素值与背景模型比较，如果像素值不在高斯模型里，则视为前景。
'''
def MOG_test1():
    video_capturer = cv2.VideoCapture(0)
    
    ret = video_capturer.open(0)
    if not ret:
        print(f"open {0} error")
        return
    
    # opencv哪个版本有
    pass

'''
MOG2是MOG的改进版本。区别在于能够自动选择高斯分布的数量，能够更好的适应背景的变化。
MOG2通过动态调整高斯分布的数量和参数，能更准确的建立模型，更准确的检测前景

算法步骤：
    1.初始化。为每个像素初始化多个高斯分布
    2.模型更新。对每一帧图像，更新每个像素的高斯分布参数，并根据需要增加或减少高斯分布的数量
    3.前景检测。将当前图像的像素值与背景模型比较，如果像素值不在高斯模型里，则视为前景。
'''
def MOG2_test1():
    video_capturer = cv2.VideoCapture(0)
    
    ret = video_capturer.open(0)
    if not ret:
        print(f"open {0} error")
        return
    
    mpg2 = cv2.createBackgroundSubtractorMOG2()

    while True:
        ret, src = video_capturer.read()
        if not ret:
            break

        dest = mpg2.apply(image=src)

        cv2.imshow("src", src)
        cv2.imshow("dest", dest)
        cv2.waitKey(40)

'''
人脸检测
opencv提供了基于Haar特征的人脸检测方法，简单易用且效果显著

步骤：
1.加载Haar特征分类模型
2.读取图像
3.转为灰度图像
4.检测人脸
5.绘制结果
6.显示
'''
def face_detect_test1():
    # 加载分类器
    classifier = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

    videio_capturer = cv2.VideoCapture(0)

    ret = videio_capturer.open(0)
    if not ret:
        print("open error")
        return
    
    while True:
        ret, bgr = videio_capturer.read()
        if not ret:
            print("read error")
            break

        gray = cv2.cvtColor(src=bgr, code=cv2.COLOR_BGR2GRAY)

        faces = classifier.detectMultiScale(image=gray)

        for f in faces:
            cv2.rectangle(img=bgr, rec=f, color=(255, 255, 255))

        cv2.imshow("bgr", bgr)
        cv2.waitKey(40)


'''
模板匹配
常用于在一幅大图中寻找某个小图（模板图像）的位置,它广泛应用于目标检测、图像识别、自动化测试、游戏辅助等领域
原理：滑动模板图像在被检测图像上移动，计算每个位置的相似度，获取相似度最高的位置

cv2.matchTemplate的返回值:匹配结果矩阵，大小为 (W-w+1) x (H-h+1)
    结果矩阵中的每个像素点 result[y, x] 代表了这样一个信息：
    "当模板的左上角位于原图的 (x, y) 位置时，两者的匹配程度是多少"
'''
def matchTemplate_test1():
    template = cv2.imread(filename='images/car.png', flags=cv2.IMREAD_GRAYSCALE)
    template_w, template_h = template.shape[1], template.shape[0]

    search = cv2.imread(filename='images/search-car.png', flags=cv2.IMREAD_GRAYSCALE)
    
    cv2.imshow('template', template)
    cv2.imshow('search', search)
    cv2.waitKey()

    '''
    cv2.TM_SQDIFF	平方差匹配，值越小匹配度越高。
    cv2.TM_SQDIFF_NORMED	归一化平方差匹配，值越小匹配度越高。
    cv2.TM_CCORR	相关匹配，值越大匹配度越高。
    cv2.TM_CCORR_NORMED	归一化相关匹配，值越大匹配度越高。
    cv2.TM_CCOEFF	相关系数匹配，值越大匹配度越高。
    cv2.TM_CCOEFF_NORMED	归一化相关系数匹配，值越大匹配度越高。
    '''
    result = cv2.matchTemplate(image=search, templ=template, method=cv2.TM_CCORR)
    print(f"result.shape={result.shape},result.dtype={result.dtype}")

    # 获取
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(src=result)
    print(f"min_val={min_val},max_val={max_val},min_loc={min_loc},max_loc={max_loc}")

    min_x, min_y = min_loc
    max_x, max_y = max_loc

    # 画目标矩形
    cv2.rectangle(img=search, pt1=(min_x, min_y), pt2=(min_x + template_w, min_y + template_h), color=(0), thickness=2)
    cv2.rectangle(img=search, pt1=(max_x, max_y), pt2=(max_x + template_w, max_y + template_h), color=(0), thickness=2)

    cv2.imshow('template', template)
    cv2.imshow('search', search)
    cv2.waitKey()
    
    return

'''
形状匹配
比较两个轮廓（contours）形状相似度的函数。
它基于 Hu 矩（Hu Moments） 来衡量两个形状之间的差异.
常用于图像识别、形状匹配、模板匹配等任务
cv2.matchShapes
'''
def matchShapes_test1():
    template = cv2.imread(filename='images/template.png', flags=cv2.IMREAD_GRAYSCALE)
    search = cv2.imread(filename='images/troche.png', flags=cv2.IMREAD_GRAYSCALE)
    
    # 对图像进行二值化处理
    _, template_bin = cv2.threshold(template, 127, 255, cv2.THRESH_BINARY)
    _, search_bin = cv2.threshold(search, 127, 255, cv2.THRESH_BINARY)
    
    cv2.imshow('template', template)
    cv2.imshow('template_bin', template_bin)
    cv2.imshow('search', search)
    cv2.imshow('search_bin', search_bin)

    # 查找轮廓
    contours_template, _ = cv2.findContours(template_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    
    # 取面积最大的轮廓
    template_contour = max(contours_template, key=cv2.contourArea)
    
    # 在新画布上绘制模板的轮廓
    template_canvas = numpy.zeros(template.shape)
    cv2.drawContours(template_canvas, [template_contour], -1, color=(255), thickness=2)
    cv2.imshow('template_canvas', template_canvas)
    cv2.waitKey()

    # 在搜索图中，查找所有轮廓
    contours_search, _ = cv2.findContours(search_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    
    # 在新画布上绘制search的轮廓
    canvas_search = numpy.zeros(search.shape)
    cv2.drawContours(canvas_search, contours_search, -1, color=(255), thickness=2)
    cv2.imshow('canvas_search', canvas_search)
    cv2.waitKey()
    
    # 遍历目标图中的每一个轮廓，并与模板轮廓进行匹配
    distance_min = 1.0
    best_contour_index = -1
    contour_index = 0
    for cnt in contours_search:
        # 计算当前轮廓与模板轮廓的相似度
        '''
        匹配，返回距离
        method:
            cv2.CONTOURS_MATCH_I1
            cv2.CONTOURS_MATCH_I2
            cv2.CONTOURS_MATCH_I3
        '''
        distance = cv2.matchShapes(template_contour, cnt, cv2.CONTOURS_MATCH_I1, 0.0)
        
        if distance < distance_min:
            print(f"distance={distance},contour_index={contour_index}")
            distance_min = distance
            best_contour_index = contour_index
            
            # 获取轮廓的外接矩形
            boundRect = cv2.boundingRect(contours_search[contour_index])
            print(f"boundRect={boundRect}")
            # rect_x, rect_y, rect_w, rect_h = boundRect
            
            # 创建画布
            canvas = numpy.zeros(search.shape)
            
            # 绘制外接矩形
            cv2.rectangle(canvas, rec=boundRect, color=(255), thickness=2)
            cv2.imshow('canvas', canvas)
            cv2.waitKey()
        
        contour_index = contour_index + 1
        
    print("---loop over---")
    
    if best_contour_index == -1:
        print("not found best_contour_index")
        return
    
    print(f"distance_min={distance_min},best_contour_index={best_contour_index}")

# 获取高斯卷积核
# cv2.getGaussianKernel

# 图像向上采样(也称图像金字塔，图像宽高变成原来一半)
# cv2.pyrDown函数内使用的是高斯函数采样
def use_pyrDown()->None:
    origin_image = cv2.imread("images/lena.png")
    cv2.imshow("origin_image", origin_image)
    print(origin_image.shape)#(512, 512, 3)
    
    after_image = cv2.pyrDown(origin_image)
    cv2.imshow("after_image", after_image)
    print(after_image.shape)#(256, 256, 3)
    
    cv2.waitKey()
    cv2.destroyAllWindows()
    
    return None

# 使用SIFT算法进行关键点的检测；SIFT特征
def use_SIFT()->None:
    image = cv2.imread("images/lena.png")
    
    sift =  cv2.SIFT.create()
    
    # 返回关键点和描述
    key_points, descriptor = sift.detectAndCompute(image, None)
    print(key_points)
    print(descriptor)
    
    canvas = numpy.zeros(shape=(image.shape[0], image.shape[1], image.shape[2]), dtype=numpy.uint8)
    
    # 绘制关键点
    image_drawed = cv2.drawKeypoints(canvas, key_points, None)
    cv2.imshow("image_drawed", image_drawed)
    
    cv2.waitKey()
    cv2.destroyAllWindows()
    
    return None

# 使用视频采集器
def use_videocapture()->None:
    ret = False
    
    capture = cv2.VideoCapture()
    ret = capture.open(0)
    if ret == False:
        return None
    
    while 1:
        ret, img = capture.read()
        if ret == True:
            # print(img.shape)
            # print(img.dtype)
            cv2.imshow("img", img)

        cv2.waitKey(40)

# remap函数，在采样点采样，并插值
# dest[x,y] = src[xmap[x,y], ymap[x,y]]
def reamap_test1():
    src = numpy.full((300, 400), 255, dtype=numpy.uint8)
    cv2.imshow("src", src)
    cv2.waitKey()
    
    xmap = numpy.zeros((300, 400), dtype=numpy.float32)
    ymap = numpy.zeros((300, 400), dtype=numpy.float32)
    
    dest = cv2.remap(src=src, map1=xmap, map2=ymap, interpolation=0)
    cv2.imshow("dest", dest)
    cv2.waitKey()
    
# 离散余弦变换
def dct_test1():
    # 创建测试数据
    data = numpy.arange(8).astype(numpy.float32)
    # print(data)
    
    # 1. 计算DCT
    dct_coeff = cv2.dct(data)
    print(dct_coeff)
    
def dct_test2():
    # 创建测试数据
    data = numpy.arange(64).reshape(8, 8).astype(numpy.float32)
    # print(data)
    
    # 1. 计算DCT
    dct_coeff = cv2.dct(data)
    print(dct_coeff)

if __name__ == "__main__":
    print_cv_version()
    # img_resize_test1()
    # img_rotate_test1()
    # img_flip_test1()
    # img_threshold_test1()
    # img_threshold_test2()
    # img_threshold_test3()
    # img_blur_test1()
    # img_gaussian_blur_test1()
    # img_median_blur_test1()
    # img_bilateral_filter_test1()
    # img_erode_test1()
    # img_dilate_test1()
    # img_morphology_open_test1()
    # img_morphology_close_test1()
    # img_morphology_grad_test1()
    # img_morphology_tophat()
    # img_morphology_blackhat_test1()
    # canny_test1()
    # sobel_test1()
    # scharr_test1()
    # laplacian_test1()
    # img_contours_test1()
    # img_hist_test()
    # MeanShift_test1()
    # CamShift_test1()
    # MOG2_test1()
    # face_detect_test1()
    # matchTemplate_test1()
    matchShapes_test1()
    # use_pyrDown()
    # use_SIFT()
    # use_videocapture()
    # reamap_test1()
    # dct_test1()
    # dct_test2()
    
    exit(0)