import cv2
import numpy as np
from numba import cuda
from numba.cuda.random import create_xoroshiro128p_states,xoroshiro128p_uniform_float32
from vibe import apply_mask_to_image,get_crop_info,slice_numpy_img

num_sam = DEFAULT_NUM_SAMPLES = 20
min_match = DEFAULT_MIN_MATCHES = 2
r = DEFAULT_RADIUS = 15
rand_sam = DEFAULT_RANDOM_SAMPLE = 16*2


# RTSP = "11.avi"
# zone1 = "580,1070,851,460,901,460,947,1068,-1"
# RTSP = "2.mp4"
# zone1 = "860,249,1156,650,1292,588,959,249,-1"
RTSP = "1.mp4"
zone1 = "0,0,1280,0,1280,720,0,720,-1"

zone = np.array(zone1.split(',-1')[0].split(','), dtype=np.int32).reshape((-1, 1, 2))
offset = get_crop_info(zone)
roi_mat = np.zeros((offset[3], offset[2],3), dtype=np.uint8)
new_boundary = zone - np.array((offset[0], offset[1]))
cv2.fillPoly(roi_mat, [new_boundary], (1,1,1))
d_roi_mat = cuda.to_device(np.ascontiguousarray(roi_mat[:,:,0]))

# 当前帧图像
# Current Raw Frame
RTSP = RTSP
Vs = cv2.VideoCapture(RTSP)
H,W,C = offset[3],offset[2],3
# Ret,Frame = Vs.read()
# Frame = slice_numpy_img(Frame,offset)
# H,W,C = Frame.shape
# Gray = cv2.cvtColor(Frame,cv2.COLOR_BGR2GRAY)
# d_gray = cuda.to_device(Gray.astype(np.float32))
Frame_id = 0

#====================================================
#        样本库相关  |  Sample Library Information Related
#====================================================
# 样本库
# Sample Library, size = img.rows * img.cols *  DEFAULT_NUM_SAMPLES
samples = np.empty((H,W,num_sam),dtype=np.float32)
d_samples  = cuda.to_device(samples)

# RGB通道图像样本库
# Sample Library, size = img.rows * img.cols *  DEFAULT_NUM_SAMPLES * 3 (The 3 values Save the pixel's [B, G, R])
samples_Frame = np.empty((H,W,num_sam,C))
d_samples_Frame = cuda.to_device(samples_Frame)

# 样本集灰度方差
# the Gray Value Variance of Sample Set
# double **samples_sumsqr;
samples_sumsqr = np.empty((H,W),np.float32)
d_samples_sumsqr = cuda.to_device(samples_sumsqr)

# 样本集灰度均值
# the Gray Value Average Value of Sample Set
# double **samples_ave;
samples_ave = np.empty((H,W),np.float32)
d_samples_ave = cuda.to_device(samples_ave)

# 样本连续记为前景次数
# the Number of Times Counted as Foreground Point Continuously
# int **samples_ForeNum;
samples_ForeNum = np.empty((H,W),np.float32)
d_samples_ForeNum = cuda.to_device(samples_ForeNum)

# 样本是否为背景内边缘?  0:  NO;  1:  YES
# Is the Sample Background's Inner Edge?
# 0 -- No;  1 -- Yes
# bool **samples_BGInner
samples_BGInner = np.empty((H,W),np.float32)
d_samples_BGInner = cuda.to_device(samples_BGInner)



# 样本在背景内边缘状况下，八邻域状态位
# the Samples' State Bits of 8 Neighbor Area (If Current Pixel Belongs to Background Inner Edge)
# int **samples_InnerState

samples_InnerState = np.empty((H,W),np.float32)
d_samples_InnerState = cuda.to_device(samples_InnerState)

# 样本闪烁等级
# Blink Level of the Samples
# int **samples_BlinkLevel

samples_BlinkLevel = np.empty((H,W),np.float32)
d_samples_BlinkLevel = cuda.to_device(samples_BlinkLevel)

# 样本邻域梯度最大值
# the Max Value of Sample
# int **samples_MaxInnerGrad

samples_MaxInnerGrad = np.empty((H,W),np.float32)
d_samples_MaxInnerGrad = cuda.to_device(samples_MaxInnerGrad)

# 前景模型二值图像，表示分割出的前景与背景信息；
# Foreground Model Binary Image
# It shows Foreground and Background Information After Segmatation
# Mat SegModel

# 更新模型二值图像，表示参与数据更新的像素位置；
# Update Model's Binary Image
# It shows Pixels' Location which will join in Data Update
# Mat UpdateModel


amp_multifactor = AMP_MULTIFACTOR = 0.5
rng_state = create_xoroshiro128p_states(H*W,seed=Frame_id)
pos_offset = np.asarray([-1,0,1,-1,0,1,-1,0,1],dtype=np.int32)
d_pos_offset = cuda.to_device(pos_offset)


Thread_Per_Block = 32
Block_Per_Grdi_H = (H + Thread_Per_Block -1) // Thread_Per_Block
Block_Per_Grid_W = (W + Thread_Per_Block -1) // Thread_Per_Block

@cuda.jit
def ProcessFirstFrame(init_frame,init_gray,samples,samples_Frame,samples_ave,samples_sumsqr,H,W,num_sam,rng_state,d_pos_offset,d_roi_mat):
    idx,idy = cuda.grid(2)
    if idx<H and idy<W :
        # if d_roi_mat[idx,idy] == 1:
        for k in range(num_sam):
            ramdom_float1 = xoroshiro128p_uniform_float32(rng_state,idx*W+idy)
            ramdom_float2 = xoroshiro128p_uniform_float32(rng_state,idx*W+idy)
            random_num_1 = int(0 + ramdom_float1*(9-0))
            random_num_2 = int(0 + ramdom_float2*(9-0))
            x_pos = idx + d_pos_offset[random_num_1]
            y_pos = idy + d_pos_offset[random_num_2]
            x_pos = 0 if x_pos < 0 else x_pos
            x_pos = H-1 if x_pos>=H else x_pos
            y_pos = 0 if y_pos<0 else y_pos
            y_pos = W-1 if y_pos>=W else y_pos
            samples[idx,idy,k] = init_gray[x_pos,y_pos]
            for m in range(3):
                samples_Frame[idx,idy,k,m] = init_frame[x_pos,y_pos,m]
            #  // 首次计算当前像素点样本集平均值灰度    
            samples_ave[idx,idy]  +=  samples[idx,idy,k]/num_sam
        #  计算方差值  |  Calculate the Variance 
        for q in range(num_sam):
            samples_sumsqr[idx,idy] += ((samples[idx,idy,q]-samples_ave[idx,idy])**2)/num_sam


        
            
        
        


@cuda.jit
def ExtractBG(frame,gray,samples,samples_Frame,samples_ave,samples_sumsqr,H,W,num_sam,rng_state,min_match,samples_ForeNum,SegModel,amp_multifactor,d_roi_mat):
    # 计算颜色畸变
    idx,idy = cuda.grid(2)
    if idx<H and idy<W:
        # if d_roi_mat[idx,idy] == 1:
        # colordist, RGB_Norm2, RGBSam_Norm2, RGB_Vec, p2
        B,G,R = frame[idx,idy,0]  ,frame[idx,idy,1] ,frame[idx,idy,2]
        matches = 0
        AdaThreshold = ((samples_sumsqr[idx][idy])**2) * amp_multifactor
        AdaThreshold = max(min(AdaThreshold,40),20)
        for channel_id in range(num_sam):
            B_sam,G_sam,R_sam = samples_Frame[idx,idy,channel_id,0] ,samples_Frame[idx,idy,channel_id,1] ,samples_Frame[idx,idy,channel_id,2]
            RGB_Norm2 = B**2  + G**2 + R**2
            RGBSam_Norm2 = B_sam**2  + G_sam**2 + R_sam**2
            RGB_Vec = (B*B_sam + G*G_sam + R*R_sam)**2
            p2 = RGB_Vec / RGBSam_Norm2
            colordist = (RGB_Norm2-p2)**0.5 if RGB_Norm2>p2 else 0
            # 若当前值与样本值之差小于自适应阈值，且颜色畸变值小于20，满足匹配条
            distance = abs(samples[idx,idy,channel_id] - gray[idx,idy])
            if distance<AdaThreshold and colordist<20:
                matches +=1
            if matches==min_match:
                break
        if matches>=min_match:
            samples_ForeNum[idx,idy] = 0
            SegModel[idx,idy] = 0
        else:
            samples_ForeNum[idx,idy] +=1
            SegModel[idx,idy] = 255
            if samples_ForeNum[idx,idy]>100:
                # SegModel[idx,idy] = 0
                ramdom_float3 = xoroshiro128p_uniform_float32(rng_state,idx*W+idy)
                random_num3 = int(0 + ramdom_float3*(num_sam-0))
                # 更新样本
                samples[idx,idy,random_num3] = gray[idx,idy]

                # 更新RBG样本
                for m in range(3):
                    samples_Frame[idx,idy,random_num3,m]  = frame[idx,idy,m]
            



@cuda.jit
def CalcuUpdateModel(SegModel,gray,samples_BGInner,samples_InnerState,samples_BlinkLevel,samples_MaxInnerGrad,UpdateModel,H,W,num_sam,d_roi_mat): 
    idx,idy = cuda.grid(2)
    # if d_roi_mat[idx,idy] == 1:
    if 1<idx<H-1 and 1<idy<W-1:
        state = 0
        maxGrad = 0 
        #  判断背景内边缘
        if SegModel[idx,idy] <=0:
            for tem_x in range(idx-1,idx+2):
                for temp_y in range(idy-1,idy+2):
                    if not (tem_x==idx and temp_y==idy):
                        # // 邻域内单点状态：前景为1，背景为0
                        bitstate = 1 if SegModel[tem_x,temp_y]==255 else 0
                        # 将 bitstate 的值添加到 state 的最低位，同时保留 state 之前累积的状态信息。
                        state = (state<<1) + bitstate
                        # // 计算最大梯度
                        tmpGrad = abs(gray[idx,idy] - gray[tem_x,temp_y])
                        maxGrad = max(maxGrad,tmpGrad)

            state = state & 255
            samples_BGInner[idx,idy] = True if state>0 else False
        else:
            samples_BGInner[idx,idy] = False
            samples_InnerState[idx,idy] = 0
        # 计算闪烁等级
        if samples_BGInner[idx,idy]:
            # 当前邻域状态与上一帧邻域状态相同，则说明当前点不闪烁；
            if state == samples_InnerState[idx,idy]:
                samples_BlinkLevel[idx,idy] = max(samples_BlinkLevel[idx,idy]-1,0)
            else:
                samples_BlinkLevel[idx,idy] = min(samples_BlinkLevel[idx,idy]+15,150)
        else:
            samples_BlinkLevel[idx,idy] = max(samples_BlinkLevel[idx,idy]-1,0)

        #  更新状态位
        samples_BlinkLevel[idx,idy] = state
        samples_MaxInnerGrad[idx,idy] = maxGrad

        if samples_BlinkLevel[idx,idy] >30:
            UpdateModel[idx,idy] = 255
            # UpdateModel[idx,idy] = 0





@cuda.jit
def Update(frame,gray,UpdateModel,samples,samples_Frame,samples_MaxInnerGrad,rng_state,H,W,rand_sam,num_sam,samples_ave,samples_sumsqr,d_pos_offset,d_roi_mat):
    idx,idy = cuda.grid(2)
    if idx < H and idy < W:
        # if d_roi_mat[idx,idy] == 1:
        # 更新模板 UpdateModel 的前景像素点不被用来更新样本库
        if UpdateModel[idx,idy] <=0:
            random_float4 = xoroshiro128p_uniform_float32(rng_state,idx*W + idy)
            random_num4 = int(0 + random_float4*(rand_sam - 0))
            # // 已经认为该像素是背景像素，那么它有 1 / φ 的概率去更新自己的模型样本值
            if random_num4 ==0:
                random_float5 = xoroshiro128p_uniform_float32(rng_state,idx*W + idy)
                random_num5 = int(0 + random_float5*(num_sam - 0))
                # 更新均值样本
                samples_ave[idx,idy] = (samples_ave[idx,idy] * num_sam - samples[idx,idy,random_num5] + gray[idx,idy]) / num_sam
                # print("temp_samples_ave",temp_samples_ave)
                # 更新方差样本
                for _ in range(num_sam):
                    samples_sumsqr[idx,idy] += ((samples[idx,idy,random_num5] - samples_ave[idx,idy])**2)/num_sam
                # 更新样本
                samples[idx,idy,random_num5] = gray[idx,idy]
                # 更新RGB样本
                for m in range(3):
                    samples_Frame[idx,idy,random_num5,m] = frame[idx,idy,m]
            
            #// 同时也有 1 / φ 的概率去更新它的邻居点的模型样本值
            random_float6 = xoroshiro128p_uniform_float32(rng_state,idx*W + idy)
            random_num6 = int(0+random_float6*(rand_sam - 0))
            if random_num6 == 0:
                # //   根据当前点最大梯度 maxGrad，跳出该次循环，便抑制传播
                if samples_MaxInnerGrad[idx,idy]<=50:
                    ramdom_float7 = xoroshiro128p_uniform_float32(rng_state,idx*W+idy)
                    ramdom_float8 = xoroshiro128p_uniform_float32(rng_state,idx*W+idy)
                    random_num_7 = int(0 + ramdom_float7*(9-0))
                    random_num_8 = int(0 + ramdom_float8*(9-0))
                    x_pos = idx + d_pos_offset[random_num_7]
                    y_pos = idy + d_pos_offset[random_num_8]
                    x_pos = 0 if x_pos < 0 else x_pos
                    x_pos = H-1 if x_pos>=H else x_pos
                    y_pos = 0 if y_pos<0 else y_pos
                    y_pos = W-1 if y_pos>=W else y_pos
                    ramdom_float9 = xoroshiro128p_uniform_float32(rng_state,idx*W+idy)
                    random_num_9 = int(0 + ramdom_float9*(num_sam-0))
                    # 更新均值样本
                    samples_ave[idx,idy] = (samples_ave[idx,idy] * num_sam - samples[idx,idy,random_num_9] + gray[idx,idy]) / num_sam
                    # 更新方差样本
                    for j in range(num_sam):
                        samples_sumsqr[idx,idy] += ((samples[idx,idy,j] - samples_ave[idx,idy])**2)/num_sam
                    #  // 同时更新RGB通道样本库
                    samples[x_pos,y_pos,random_num_9] = gray[idx,idy]
                    for m in range(3):
                        samples_Frame[idx,idy,random_num_9,m] = frame[idx,idy,m]



def fill_empty(imgtmp, SegModel):
    # 查找轮廓
    contours, hierarchy = cv2.findContours(imgtmp, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    for i, cnt in enumerate(contours):
        # 一级父轮廓
        father = hierarchy[0][i][3]
        # 二级父轮廓
        grandpa = hierarchy[0][father][3] if father >= 0 else -1

        # 有一级父轮廓，无两级父轮廓，说明该轮廓是等级为1的轮廓，即需要的前景空洞区域
        if father >= 0 and grandpa == -1:
            # 填充面积 <= 20 的前景空洞区域
            area = cv2.contourArea(cnt)
            if area <= 100:
                cv2.drawContours(SegModel, [cnt], -1, (255), -1)

        # 无父轮廓，说明该轮廓是最外围轮廓，即等级为0的前景斑点区域
        if father == -1:
            # 填充面积 < 10 的前景斑点区域
            area = cv2.contourArea(cnt)
            if area < 10:
                cv2.drawContours(SegModel, [cnt], -1, (0), -1)
    return SegModel


SegModel = np.zeros((H,W),dtype=np.uint8)
d_SegModel = cuda.to_device(SegModel)
UpdateModel = np.zeros((H,W),dtype=np.uint8)
d_UpdateModel = cuda.to_device(UpdateModel)

# 推流地址
rtmp = "rtmp://172.29.97.188:5678/123456"

# 设置FFmpeg命令行参数
command = [
    'ffmpeg',
    '-y',  # 覆盖输出文件
    '-f', 'rawvideo',  # 输入格式为原始视频
    '-vcodec', 'rawvideo',  # 输入视频编码为原始视频
    '-pix_fmt', 'bgr24',  # 像素格式为BGR24
    '-s', '{}x{}'.format(int(W), int(H)),  # 视频尺寸
    '-r', '25',  # 帧率为25
    '-i', '-',  # 从标准输入读取数据
    '-c:v', 'libx264',  # 使用x264编码器
    '-pix_fmt', 'yuv420p',  # 输出像素格式为YUV420P
    '-preset', 'ultrafast',  # 编码速度和质量的平衡
    '-f', 'flv',  # 输出格式为FLV
    rtmp  # RTMP推流地址
]


import subprocess
# 创建FFmpeg子进程
pipe = subprocess.Popen(command, stdin=subprocess.PIPE)

import time 
import numba
while True:
    
    ret,frame = Vs.read()
    if not ret :
        break
    frame_crop = slice_numpy_img(frame, offset) * roi_mat
    d_frame = cuda.to_device(frame_crop.astype(np.float32))
    Frame_id +=1 
    
    # SegModel = np.zeros((H,W),dtype=np.uint8)
    d_SegModel = cuda.to_device(SegModel)
    # UpdateModel = np.zeros((H,W),dtype=np.uint8)
    d_UpdateModel = cuda.to_device(UpdateModel)
    gray = cv2.cvtColor(frame_crop,cv2.COLOR_BGR2GRAY)
    d_gray = cuda.to_device(gray.astype(np.float32))
    if Frame_id ==1:
        ProcessFirstFrame[(Block_Per_Grdi_H,Block_Per_Grid_W),(Thread_Per_Block,Thread_Per_Block)](d_frame,d_gray,d_samples,d_samples_Frame,d_samples_ave,d_samples_sumsqr,H,W,num_sam,rng_state,d_pos_offset,d_roi_mat)
    else:
        # if Frame_id % 3!=0:
        #     continue
        t0 = time.time()
        ExtractBG[(Block_Per_Grdi_H,Block_Per_Grid_W),(Thread_Per_Block,Thread_Per_Block)](d_frame,d_gray,d_samples,d_samples_Frame,d_samples_ave,d_samples_sumsqr,H,W,num_sam,rng_state,min_match,d_samples_ForeNum,d_SegModel,amp_multifactor,d_roi_mat)
        cuda.synchronize()  # 确保所有CUDA操作完成
        SegModel = d_SegModel.copy_to_host()
        SegModel = cv2.threshold(SegModel,1,255,cv2.THRESH_BINARY)[1]
        SegModel = cv2.dilate(SegModel,(7,7))
        SegModel = fill_empty(SegModel,SegModel.copy())
        d_SegModel = cuda.to_device(SegModel)
        CalcuUpdateModel[(Block_Per_Grdi_H,Block_Per_Grid_W),(Thread_Per_Block,Thread_Per_Block)](d_SegModel,d_gray,d_samples_BGInner,d_samples_InnerState,d_samples_BlinkLevel,d_samples_MaxInnerGrad,d_UpdateModel,H,W,num_sam,d_roi_mat)
        cuda.synchronize()  # 确保所有CUDA操作完成
        SegModel = d_SegModel.copy_to_host()
        SegModel = cv2.threshold(SegModel,1,255,cv2.THRESH_BINARY)[1]
        SegModel = cv2.dilate(SegModel,(7,7))
        SegModel = fill_empty(SegModel,SegModel.copy())
        d_SegModel = cuda.to_device(SegModel)
        Update[(Block_Per_Grdi_H,Block_Per_Grid_W),(Thread_Per_Block,Thread_Per_Block)](d_frame,d_gray,d_UpdateModel,d_samples,d_samples_Frame,d_samples_MaxInnerGrad,rng_state,H,W,rand_sam,num_sam,d_samples_ave,d_samples_sumsqr,d_pos_offset,d_roi_mat)
        cuda.synchronize()  # 确保所有CUDA操作完成
        SegModel = d_SegModel.copy_to_host()
        UpdateModel = d_UpdateModel.copy_to_host()
        t5 = time.time()
        print(t5-t0)
        
        # SegModel = cv2.mea(UpdateModel,(3,3))
        SegModel = cv2.threshold(SegModel,1,255,cv2.THRESH_BINARY)[1]
        SegModel = cv2.dilate(SegModel,(7,7))
        SegModel = fill_empty(SegModel,SegModel.copy())

        # UpdateModel = cv2.blur(UpdateModel,(3,3))
        UpdateModel = cv2.threshold(UpdateModel,1,255,cv2.THRESH_BINARY)[1]
        UpdateModel = fill_empty(UpdateModel,UpdateModel.copy())

        # dection_result = cv2.medianBlur(dection_result,3)
        apply_img = apply_mask_to_image(frame_crop,SegModel.copy())
        cv2.putText(apply_img, f"FPS: {int(1/(t5-t0))}", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
        pipe.stdin.write(cv2.cvtColor(apply_img, cv2.COLOR_BGR2RGB).tobytes())
        # if cv2.waitKey(10) & 0xff== ord("q"):
        #     break
        # cv2.namedWindow("VIBE_PLUS",cv2.WINDOW_NORMAL)
        # cv2.imshow("VIBE_PLUS",apply_img)
        # cv2.imwrite(f"vibeplus{Frame_id}.jpg",apply_img)
numba.cuda.close()



            
        
