import cv2
import rospy
import numpy as np
import threading
from sensor_msgs.msg import Image
from sensor_msgs.msg import Imu
from cv_bridge import CvBridge
from sensor_msgs.msg import CompressedImage
import module.IMU_Optimize
import module.lowlight_enhancement

# corners = cv2.goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[], corners[], mask[], blockSize[], useHarrisDetector[], harrisK[][][][][] )
# 作用：用于获得光流估计所需要的角点
# image：8位或32位浮点型输入图像，单通道
# maxCorners：角点数目最大值，如果实际检测的角点超过此值，则只返回前maxCorners个强角点
# qualityLevel：角点的品质因子，0 - 1中的数字
# minDistance：对于初选出的角点而言，如果在其周围minDistance范围内存在其他更强角点，则将此角点删除
# _mask：指定感兴趣区，如不需在整幅图上寻找角点，则用此参数指定ROI
# blockSize：计算协方差矩阵时的窗口大小
# useHarrisDetector：指示是否使用Harris角点检测，如不指定，则计算shi - tomasi角点
# harrisK：Harris角点检测需要的k值
# 一般来说，可以只输入image,maxCorners, qualityLevel, minDistanc


# pl, st, err = cv2.calcOpticalFlowPyrLK(prevImage, nextImage, prevPts,None, **lk_params)
# 用于获得光流检测后的角点位置
#     prevImage 前一帧图像
#
#     nextImage 当前帧图像
#
#     prevPts 待跟踪的特征点向量
#
#     winSize 搜索窗口的大小
#
#     maxLevel 最大的金字塔层数
#
# 返回：
#
#     nextPts 输出跟踪特征点向量
#
#     status 特征点是否找到，找到的状态为1，未找到的状态为0
#
#     err表示是否出错

feature_params = dict(maxCorners=100,
                      qualityLevel=0.01,
                      minDistance=30)

lk_params = dict( winSize  = (21,21),
                  maxLevel = 3)


img_buf=threading.Lock()
bridge = CvBridge()

def PubNoDelay(image):
    np_arr = np.fromstring(image.data, np.uint8)
    cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
    from PIL import Image
    frame = Image.fromarray(cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB))
    result = module.lowlight_enhancement.lowlight(frame)
    img = cv2.cvtColor(np.asarray(result), cv2.COLOR_RGB2BGR)
    image_message = bridge.cv2_to_imgmsg(img, encoding="bgr8")
    image_message.header = image.header
    imgpub.publish(image_message)

def PubToImage(image):
    img_buf.acquire()
    np_arr = np.fromstring(image.data, np.uint8)
    cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
    image_message = bridge.cv2_to_imgmsg(cv_image, encoding="bgr8")
    image_message.header = image.header
    imgpub.publish(image_message)
    img_buf.release()



def testOpencvFunction():
    img1 = cv2.imread("/usr/resource/dark1.png")
    img2 = cv2.imread("/usr/resource/first.png")
    from PIL import Image
    frame = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
    img_enhanced = module.lowlight_enhancement.lowlight(frame)
    result1 = cv2.cvtColor(np.asarray(img_enhanced), cv2.COLOR_RGB2BGR)
    gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(gray, mask=None, **feature_params)  # 寻找角点
    p1, st, err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None, **lk_params)  # 前一帧的角点和当前帧的图像作为输入来得到角点在当前帧的位置
    # cv2.imshow("origin",img1)
    # cv2.imshow("result",result1)
    cv2.waitKey(0)


if __name__ == "__main__":
    rospy.init_node('M2DGR_Test', anonymous=True)
    imgsub = rospy.Subscriber("/camera/color/image_raw/compressed", CompressedImage, PubToImage, queue_size=20)

    imgpub = rospy.Publisher("/imageProcessed", Image, queue_size=20)
    rospy.spin()


