# -- coding: utf-8 --

import sys
import threading
import os
import termios
import numpy as np 
import cv2
# from kafka import KafkaProducer
import json
import socket
import time

from ctypes import *

sys.path.append("../MvImport")
from MvCameraControl_class import *
g_bExit = False



def send_to_kafka(topic, jpeg_buffer):
	producer = KafkaProducer(
		bootstrap_servers=['10.48.0.4:9092'],
		value_serializer=lambda v: v
	)

	try:
		producer.send(topic, value=jpeg_buffer)
		producer.flush()
		print(f"Message sent to Kafka topic '{topic}'")
	except KafkaError as e:
		print(f"Failed to send message to Kafka: {e}")

# 生产者：发送图像数据
def send_image_kafka(topic, image_array):
    # 初始化 Kafka 生产者
    producer = KafkaProducer(bootstrap_servers=['10.48.0.4:9092'])

    # 将图像编码为JPEG格式的字节数据
    ret, buffer = cv2.imencode('.jpg', image_array)
    img_bytes = buffer.tobytes()  # 转为字节格式

    # 发送字节数据到Kafka
    producer.send(topic, value=img_bytes)
    producer.flush()  # 确保数据已发送
    print("Image sent to Kafka.")
    producer.close()

def send_image_for_tcp(server_ip,server_port, img_arr):
    # 将 ctypes.c_ubyte_Array_<size> 转换为标准字节数组
	# 创建并连接到服务器的 socket
	client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
	client_socket.connect((server_ip, server_port))  # 服务器 IP 和端口

	_, img_encoded = cv2.imencode('.jpg', img_arr)  # 编码为 JPEG 格式
	img_data = img_encoded.tobytes()  
    # 发送图像数据
	client_socket.sendall(img_data)

    # 关闭连接
	client_socket.close()
	print("Image sent successfully.")

def nHostTimeStamp2BeijingTime(timestamp):
	import datetime
	# 将时间戳转换为秒
	timestamp_seconds = timestamp / 1000

	# 转换为 UTC 时间
	utc_time = datetime.datetime.utcfromtimestamp(timestamp_seconds)

	# 转换为北京时间（UTC+8）
	beijing_time = utc_time + datetime.timedelta(hours=8)

	# # 输出结果
	# print("UTC Time:", utc_time)
	# print("Beijing Time:", beijing_time)

	return beijing_time

def img_buff_to_jpeg(img_buff, stFrameInfo):
    # 将 img_buff 转换为 NumPy 数组
    img_array = np.frombuffer(img_buff, dtype=np.uint8).reshape((stFrameInfo.nHeight, stFrameInfo.nWidth, -1))
    # 编码为JPEG格式
    ret, jpeg_buffer = cv2.imencode('.jpg', img_array)
    if not ret:
        print("Failed to encode image to JPEG format")
        raise ValueError("Failed to encode image to JPEG format")
    return jpeg_buffer.tobytes()

# 显示图像
def image_show(image):
	# image = cv2.resize(image, (600, 400), interpolation=cv2.INTER_AREA)  # 对图片进行裁剪
	cv2.imshow('fgmask', image)
	cv2.waitKey(1)

# 需要显示的图像数据转换
def image_control(data, stFrameInfo,show_image=False):
	if stFrameInfo.enPixelType == 17301505:  # 17301505为黑白图片 mono
		image = data.reshape((stFrameInfo.nHeight, stFrameInfo.nWidth))
		if show_image:
			image_show(image=image)

	elif stFrameInfo.enPixelType == 17301514:  # 17301514为Bater图像
		data = data.reshape(stFrameInfo.nHeight, stFrameInfo.nWidth, -1)
		image = cv2.cvtColor(data, cv2.COLOR_BAYER_GB2RGB)
		if show_image:
			image_show(image=image)

	elif stFrameInfo.enPixelType == 35127316:  # 35127316原始图像通道是RGB的,设置RGB8 PixelType_Gvsp_RGB8_Packed
		data = data.reshape(stFrameInfo.nHeight, stFrameInfo.nWidth, -1)
		image = data
		if show_image:
			image = cv2.cvtColor(data, cv2.COLOR_RGB2BGR)
			image_show(image=image)
				 
	elif stFrameInfo.enPixelType == 34603039:  # 34603039 为YUV图像
		data = data.reshape(stFrameInfo.nHeight, stFrameInfo.nWidth, -1)
		image = cv2.cvtColor(data, cv2.COLOR_YUV2BGR_Y422)
		if show_image:
			image_show(image=image)

	elif stFrameInfo.enPixelType == 17301513:  # 17301513 为BayerRG8  单通道的灰度值图像
		# data = np.frombuffer(data, dtype=np.uint8).reshape((stFrameInfo.nHeight, stFrameInfo.nWidth))
		data = data.reshape(stFrameInfo.nHeight, stFrameInfo.nWidth, -1)
		# bayer_data = np.frombuffer(data, dtype=np.uint8).reshape((stFrameInfo.nHeight, stFrameInfo.nWidth))
		# print(data.shape)
		# data = data.reshape(stFrameInfo.nHeight, stFrameInfo.nWidth, -1)
		
		image = cv2.cvtColor(data, cv2.COLOR_BayerRG2RGB)
		# image = cv2.cvtColor(data, cv2.COLOR_BAYER_RG2RGB)
		if show_image:
			image = cv2.cvtColor(data, cv2.COLOR_RGB2BGR)
			image_show(image=image)
	

	else:
		data = data.reshape(stFrameInfo.nHeight, stFrameInfo.nWidth, -1)
		image = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
		if show_image:
			image = cv2.cvtColor(data, cv2.COLOR_RGB2BGR)
			image_show(image=image)

	return image


def press_any_key_exit():
	fd = sys.stdin.fileno()
	old_ttyinfo = termios.tcgetattr(fd)
	new_ttyinfo = old_ttyinfo[:]
	new_ttyinfo[3] &= ~termios.ICANON
	new_ttyinfo[3] &= ~termios.ECHO
	#sys.stdout.write(msg)
	#sys.stdout.flush()
	termios.tcsetattr(fd, termios.TCSANOW, new_ttyinfo)
	try:
		os.read(fd, 7)
	except:
		pass
	finally:
		termios.tcsetattr(fd, termios.TCSANOW, old_ttyinfo)

# 通过设置 ROI 的宽度、高度、X 偏移和 Y 偏移，来调整相机采集图像的区域。
def setROIarea(cam):
	# 设置 ROI 的宽度
	nRet = cam.MV_CC_SetIntValue("Width", 1024)
	if nRet != 0:
		print("Set Width failed! nRet[0x%x]" % nRet)

	# 设置 ROI 的高度
	nRet = cam.MV_CC_SetIntValue("Height", 768)
	if nRet != 0:
		print("Set Height failed! nRet[0x%x]" % nRet)

	# 设置 ROI 的 X 偏移量
	nRet = cam.MV_CC_SetIntValue("OffsetX", 100)
	if nRet != 0:
		print("Set OffsetX failed! nRet[0x%x]" % nRet)

	# 设置 ROI 的 Y 偏移量
	nRet = cam.MV_CC_SetIntValue("OffsetY", 50)
	if nRet != 0:
		print("Set OffsetY failed! nRet[0x%x]" % nRet)

# 要获取当前的 ROI 区域，可以通过以下几个步骤调用 API 函数：
def findROIarea(cam):
	# 获取当前 ROI 的宽度
	stIntValue = MVCC_INTVALUE()
	nRet = cam.MV_CC_GetIntValue("Width", stIntValue)
	if nRet == 0:
		width = stIntValue.nCurValue
		print("Current ROI Width: ", width)

	# 获取当前 ROI 的高度
	nRet = cam.MV_CC_GetIntValue("Height", stIntValue)
	if nRet == 0:
		height = stIntValue.nCurValue
		print("Current ROI Height: ", height)

	# 获取当前 ROI 的 X 偏移量
	nRet = cam.MV_CC_GetIntValue("OffsetX", stIntValue)
	if nRet == 0:
		offsetX = stIntValue.nCurValue
		print("Current ROI OffsetX: ", offsetX)

	# 获取当前 ROI 的 Y 偏移量
	nRet = cam.MV_CC_GetIntValue("OffsetY", stIntValue)
	if nRet == 0:
		offsetY = stIntValue.nCurValue
		print("Current ROI OffsetY: ", offsetY)

# 设置相机参数
def set_camera_parameters(cam):
    # 设置增益 (例如设置为10)
    gain = 10
    cam.MV_CC_SetFloatValue("Gain", gain)

    # 设置曝光时间 (例如设置为20000微秒)
    exposure_time = 500  # 单位为微秒
    cam.MV_CC_SetFloatValue("ExposureTime", exposure_time)

    # 设置图片格式 (例如设置为 BayerRG8)
    pixel_format = PixelType_Gvsp_BayerRG8  # 对应的值
    cam.MV_CC_SetEnumValue("PixelFormat", pixel_format)	


def create_ImgBuffer_memory(pData,stFrameInfo):
	width = stFrameInfo.nWidth
	height = stFrameInfo.nHeight
	nFrameLen = int(stFrameInfo.nFrameLen)

	if stFrameInfo.enPixelType == 17301513:  # 17301513 为BayerRG8  单通道的灰度值图像
		image_size = width * height  # 对于 BGR/RGB，每个像素 3 字节
	else:
		image_size = width * height * 3  # 对于 BGR/RGB，每个像素 3 字节
	print("image_size:",image_size)

	img_buff = (c_ubyte * image_size)()  # 分配内存	
	# memmove(img_buff, pData, image_size)  # 将 pData 的内容复制到 img_buff
	'''
	pData返回的是一个地址指针，建议将pData里面的数据copy出来另建线程使用。memmove(img_buff, pData, image_size)  将 pData 的内容复制到 img_buff
	nFrameLen 和 image_size 的值通常会不相等
	nFrameLen 是指帧数据的实际字节长度，它通常包括了图像的所有信息，例如头信息、附加数据、压缩数据等。因此，这个值可能大于或小于单纯的图像数据的字节数。
	image_size 是根据图像的宽度和高度计算得出的，假设每个像素使用 3 个字节（RGB/BGR 格式），计算公式为：image_size=width×height×3 这个值仅表示图像本身的大小，不包括其他数据或元数据。
	'''
	if nFrameLen>=image_size:
		memmove(img_buff, pData, image_size)  # 将 pData 的内容复制到 img_buff
	else:
		memmove(img_buff, pData, min(image_size, nFrameLen))   # 将 pData 的内容复制到 img_buff
	
	# print(type(img_buff))  # 应输出类似于 <class 'ctypes.c_ubyte_Array_<size>'>

	# 使用 numpy 解析图像数据
	data_array = np.ctypeslib.as_array(img_buff)

	return data_array
	
def saveImage(image,save_folder):
	# cv2.imdecode(np_array, cv2.IMREAD_COLOR)
	image= cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
	timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
	filename = f"{save_folder}/received_image_{timestamp}.jpg"
	cv2.imwrite(filename, image)

# 为线程定义一个函数
def work_thread(cam=0, pData=0, nDataSize=0,server_ip="0.0.0.0",server_port=8084):
	stOutFrame = MV_FRAME_OUT()
	stFrameInfo = MV_FRAME_OUT_INFO_EX()
	
	'''
	typedef struct _MV_FRAME_OUT_INFO_EX_
	{
		unsigned short      nWidth;             // 图像宽度
		unsigned short      nHeight;            // 图像高度
		unsigned int        nFrameNum;          // 帧号
		unsigned int        nDevTimeStampHigh;  // 设备时间戳高32位
		unsigned int        nDevTimeStampLow;   // 设备时间戳低32位
		unsigned int        nHostTimeStamp;     // 主机端时间戳
		unsigned int        nFrameLen;          // 帧长度
		unsigned int        nLostPacket;        // 丢包数量（GigE 设备可能会有）
		unsigned int        enPixelType;        // 像素格式，见 PixelType
		unsigned int        nBlockId;           // 数据块ID
		unsigned int        nReserved[8];       // 保留字段
	} MV_FRAME_OUT_INFO_EX;
	'''
	# pData1 = byref(data_buf)
	memset(byref(stOutFrame), 0, sizeof(stOutFrame))

	while True:
		# ret = cam.MV_CC_GetImageBuffer(stOutFrame, 1000)
		'''
		MV_CC_GetOneFrameTimeout{
		void* handle,                          //指向相机设备句柄的指针   
		pData                                  //指向存储图像数据缓冲区的指针
		nDataSize                 			   //指向存储图像数据缓冲区大小
		MV_FRAME_OUT_INFO_EX* pFrameInfo,      //帧信息结构体指针
		unsigned int nTimeout                  //超时时间，单位为毫秒。该参数指定在没有捕获到图像帧的情况下，函数将等待的最长时间。
		}
		'''
		ret = cam.MV_CC_GetOneFrameTimeout(pData, nDataSize, stFrameInfo, 1000)
		if ret == 0:
			stConvertParam=MV_SAVE_IMAGE_PARAM_EX()
			print('======================================Start collecting images===============================')
			#新增
			# if frame_count % config.frame_jump == 0:  # 满足抽帧策略，则将图片传输给算法进行解析或存储
			# data = np.asarray(pData)
			width = stFrameInfo.nWidth
			height = stFrameInfo.nHeight
			# image_size = width * height * 3  # 对于 BGR/RGB，每个像素 3 字节
			image_size = width * height  # 对于 BGR/RGB，每个像素 3 字节
			print("image_size:",image_size)

			# 获取帧信息中的时间戳
			nHostTimeStamp = stFrameInfo.nHostTimeStamp
			print("nHostTimeStamp:",nHostTimeStamp)
			beijing_time = nHostTimeStamp2BeijingTime(nHostTimeStamp)
			print("BeijingTime:",beijing_time)

			# 第几帧
			nFrameNum = int(stFrameInfo.nFrameNum)
			print("nFrameNum:",nFrameNum)

			# 帧信息长度
			nFrameLen = int(stFrameInfo.nFrameLen)
			print("nFrameLen:",nFrameLen)

			# 统计有x帧 
			nFrameCounter = int(stFrameInfo.nFrameCounter)
			print("nFrameCounter:",nFrameCounter)

			# 分配内存空间
			data_array = create_ImgBuffer_memory(pData, stFrameInfo)

			#数据转换BayerGB8 to RGB/BGR
			image = image_control(data_array, stFrameInfo)

			if saveImage2local:
				save_folder = 'download_images'
				os.makedirs(save_folder,exist_ok=True)
				saveImage(image,save_folder)
				cv2.imwrite('frame.jpg',image)
			
			else:
				# 通过TCP传输到数据服务器
				send_image_for_tcp(server_ip,server_port, image)

			print ("get one frame: Width[%d], Height[%d], PixelType[0x%x], nFrameNum[%d]"  % (stFrameInfo.nWidth, stFrameInfo.nHeight, stFrameInfo.enPixelType, stFrameInfo.nFrameNum))
			print(f"Complete the collection of the {nFrameNum}th frame image and transmit it to the 10.48.0.4 server!!!!!")
			print('======================================End the collection task===============================\n')
			cam.MV_CC_FreeImageBuffer(stOutFrame)

		else:
			print ("no data[0x%x]" % ret)
		if g_bExit == True:
				break

		time.sleep(0.5)

if __name__ == "__main__":
	
	#--------------------------------------------------------------
	# tcp协议传输对象
	server_ip = '10.48.0.4'  # 替换为接收端 IP
	server_port = 8084         # 替换为接收端端口
	#--------------------------------------------------------------
	# 固定使用设备
	# key_to_find = 'MV-CS050-10GC-PRO'
	# key_to_find = 'MV-CS016-10GC'
	# key_to_find = 'ME2C-160-75GC-P'
	camera_ip = '10.254.0.251'
	#--------------------------------------------------------------
	# 保存在本地，保存在文件夹download_images下；若不保存通过TCP协议传输到ai服务器。
	saveImage2local = False
	#--------------------------------------------------------------

	# ch:初始化SDK | en: initialize SDK
	MvCamera.MV_CC_Initialize()

	SDKVersion = MvCamera.MV_CC_GetSDKVersion()
	print ("SDKVersion[0x%x]" % SDKVersion)

	deviceList = MV_CC_DEVICE_INFO_LIST()
	tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE
	
	# ch:枚举设备 | en:Enum device
	ret = MvCamera.MV_CC_EnumDevices(tlayerType, deviceList)
	if ret != 0:
		print ("enum devices fail! ret[0x%x]" % ret)
		sys.exit()

	if deviceList.nDeviceNum == 0:
		print ("find no device!")
		sys.exit()

	print ("Find %d devices!" % deviceList.nDeviceNum)

	# 列举设备信息
	camereID={}
	cameraIPs={}
	for i in range(0, deviceList.nDeviceNum):
		mvcc_dev_info = cast(deviceList.pDeviceInfo[i], POINTER(MV_CC_DEVICE_INFO)).contents
		if mvcc_dev_info.nTLayerType == MV_GIGE_DEVICE:
			print ("\ngige device: [%d]" % i)
			strModeName = ""
			for per in mvcc_dev_info.SpecialInfo.stGigEInfo.chModelName:
				strModeName = strModeName + chr(per)
			# 移除空格和空字节
			strModeName = strModeName.strip().replace('\x00', '')
			print ("device model name: %s" % strModeName)
			camereID[f"{strModeName}"] = i

			nip1 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0xff000000) >> 24)
			nip2 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x00ff0000) >> 16)
			nip3 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x0000ff00) >> 8)
			nip4 = (mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x000000ff)
			cameraIPs[f"{i}"] = f"{nip1}.{nip2}.{nip3}.{nip4}"
			print ("current ip: %d.%d.%d.%d\n" % (nip1, nip2, nip3, nip4))
		elif mvcc_dev_info.nTLayerType == MV_USB_DEVICE:
			print ("\nu3v device: [%d]" % i)
			strModeName = ""
			for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chModelName:
				if per == 0:
					break
				strModeName = strModeName + chr(per)
			print ("device model name: %s" % strModeName)

			strSerialNumber = ""
			for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chSerialNumber:
				if per == 0:
					break
				strSerialNumber = strSerialNumber + chr(per)
			print ("user serial number: %s" % strSerialNumber)
	
	##----------------------------------------------------------------------------------------------
	# 取消手动输入
	# if sys.version >= '3':
	# 	nConnectionNum = input("please input the number of the device to connect:")
	# else:
	# 	nConnectionNum = raw_input("please input the number of the device to connect:")

	# if int(nConnectionNum) >= deviceList.nDeviceNum:
	# 	print ("intput error!")
	# 	sys.exit()

	# 固定使用设备
	# nConnectionNum = list(camereID.keys()).index(key_to_find) if key_to_find in camereID else None
	nConnectionNum = list(cameraIPs.values()).index(camera_ip)
	print("nConnectionNum:",nConnectionNum)
	##----------------------------------------------------------------------------------------------


	# ch:创建相机实例 | en:Creat Camera Object
	cam = MvCamera()
	
	# ch:选择设备并创建句柄| en:Select device and create handle
	stDeviceList = cast(deviceList.pDeviceInfo[int(nConnectionNum)], POINTER(MV_CC_DEVICE_INFO)).contents

	ret = cam.MV_CC_CreateHandle(stDeviceList)
	if ret != 0:
		print ("create handle fail! ret[0x%x]" % ret)
		sys.exit()

	# ch:打开设备 | en:Open device
	ret = cam.MV_CC_OpenDevice(MV_ACCESS_Exclusive, 0)
	if ret != 0:
		print ("open device fail! ret[0x%x]" % ret)
		sys.exit()

	# ch:探测网络最佳包大小(只对GigE相机有效) | en:Detection network optimal package size(It only works for the GigE camera)
	if stDeviceList.nTLayerType == MV_GIGE_DEVICE:
		nPacketSize = cam.MV_CC_GetOptimalPacketSize()
		if int(nPacketSize) > 0:
			ret = cam.MV_CC_SetIntValue("GevSCPSPacketSize",nPacketSize)
			if ret != 0:
				print ("Warning: Set Packet Size fail! ret[0x%x]" % ret)
		else:
			print ("Warning: Get Packet Size fail! ret[0x%x]" % nPacketSize)

	# ch:设置触发模式为off | en:Set trigger mode as off
	ret = cam.MV_CC_SetEnumValue("TriggerMode", MV_TRIGGER_MODE_OFF)
	if ret != 0:
		print ("set trigger mode fail! ret[0x%x]" % ret)
		sys.exit()
	
	# # 将相机属性导入到FeatureFile.ini
	# ret = cam.MV_CC_FeatureSave("FeatureFile.ini")
	# if ret != 0:
	# 	print("下载相机参数配置不成功:", ret)
	# else:
	# 	print("下载相机参数配置成功")

	# exit()

	# 加载 .mfs 配置文件
	ret = cam.MV_CC_FeatureLoad("FeatureFile.ini")
	if ret != 0:
		print("加载相机参数配置文件失败，错误码:", ret)
	else:
		print("相机参数配置文件加载成功！")


	# ch:获取数据包大小 | en:Get payload size
	stParam =  MVCC_INTVALUE()
	memset(byref(stParam), 0, sizeof(MVCC_INTVALUE))
	
	ret = cam.MV_CC_GetIntValue("PayloadSize", stParam)
	if ret != 0:
		print ("get payload size fail! ret[0x%x]" % ret)
		sys.exit()
	nPayloadSize = stParam.nCurValue

	# ch:开始取流 | en:Start grab image
	ret = cam.MV_CC_StartGrabbing()
	if ret != 0:
		print ("start grabbing fail! ret[0x%x]" % ret)
		sys.exit()

	data_buf = (c_ubyte * nPayloadSize)()
	# print(data_buf)

	try:
		hThreadHandle = threading.Thread(target=work_thread, args=(cam, data_buf, nPayloadSize,server_ip,server_port))
		hThreadHandle.start()
	except:
		print ("error: unable to start thread")
		
	print ("press a key to stop grabbing.")
	press_any_key_exit()

	g_bExit = True
	hThreadHandle.join()

	# ch:停止取流 | en:Stop grab image
	ret = cam.MV_CC_StopGrabbing()
	if ret != 0:
		print ("stop grabbing fail! ret[0x%x]" % ret)
		del data_buf
		sys.exit()

	# ch:关闭设备 | Close device
	ret = cam.MV_CC_CloseDevice()
	if ret != 0:
		print ("close deivce fail! ret[0x%x]" % ret)
		del data_buf
		sys.exit()

	# ch:销毁句柄 | Destroy handle
	ret = cam.MV_CC_DestroyHandle()
	if ret != 0:
		print ("destroy handle fail! ret[0x%x]" % ret)
		del data_buf
		sys.exit()

	del data_buf

	# ch:反初始化SDK | en: finalize SDK
	MvCamera.MV_CC_Finalize()
