import time
#import PyNvVideoCodec as nvc
#import pycuda.driver as cuda
#import torch
import numpy as np 
import cv2
import datetime
import os
import shutil
import sys
sys.path.append("av-12.2.0-py3.10-linux-x86_64.egg")
import av



def remove_tree(folder_path):
    try:  
        shutil.rmtree(folder_path)  
        print(f"文件夹 {folder_path} 已成功删除。")  
    except OSError as e:  
        print(f"删除文件夹时发生错误: {e.strerror}")  
    except Exception as e:  
        print(f"发生错误: {e}")


# def get_frame_nvc(enc_file_path):
#     cuda.init() 
#     cuda_device = cuda.Device(0) 
#     cuda_ctx = cuda_device.retain_primary_context() 
#     cuda_ctx.push() 
#     cuda_stream_decoder = cuda.Stream() 
    
#     oud_dir = "outputs/101_/nvc"
#     remove_tree(oud_dir)
#     os.makedirs(oud_dir,exist_ok=True)
#     #enc_file_path = "rtsp://admin:fx@123456@172.10.2.101:554/h264/ch1/main/av_stream"
#     seq_triggered = False 
#     demuxer = nvc.CreateDemuxer(filename=enc_file_path) 
#     decoder = nvc.CreateDecoder(gpuid=0,
#                                 codec=nvc.cudaVideoCodec.H264, 
#                                 cudacontext=cuda_ctx.handle, 
#                                 cudastream=cuda_stream_decoder.handle, 
#                                 usedevicememory=True) 

#     count = 0
#     tensor_list = []
#     for packet in demuxer: 
#         for decoded_frame in decoder.Decode(packet): 
#             if not seq_triggered: 
#                 #decoded_frame_size = decoder.GetFrameSize()
                
#                 count +=1
#                 if count < -1:
#                     continue
                
#                 if count > 100:
#                     for tensor,_time in tensor_list:
#                         raw_frame = tensor.cpu().numpy()
#                         rgb24 = cv2.cvtColor(raw_frame, cv2.COLOR_YUV2BGR_NV12)
#                         dt_object = datetime.datetime.fromtimestamp(_time)
#                         temp = dt_object.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
#                         out_fpath = os.path.join(oud_dir,+temp+".jpg")
#                         print(cv2.imwrite(out_fpath,rgb24))
#                     cuda_ctx.pop()
#                     return
                
#                 st = time.time()
#                 tensor = torch.from_dlpack(decoded_frame)
#                 tensor_list.append([tensor,time.time()])
#                 print(int((time.time() - st)*1000))
                



def get_frame_opencv(enc_file_path):
    
    oud_dir = "outputs/101_/opencv"
    remove_tree(oud_dir)
    os.makedirs(oud_dir,exist_ok=True)
    
    

    cap = cv2.VideoCapture(enc_file_path)
    count = 0
    tensor_list = []
    while 1:
        ret, frame = cap.read() 
        count +=1
        if count < 50:
            continue
        
        if count > 100:
            for img,_time in tensor_list:
                dt_object = datetime.datetime.fromtimestamp(_time)
                temp = dt_object.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
                out_fpath = os.path.join(oud_dir,temp+".jpg")
                print(cv2.imwrite(out_fpath,img))
            return
        
        st = time.time()
        tensor_list.append([frame,time.time()])
        print(int((time.time() - st)*1000))



def get_frame_av(enc_file_path):
    oud_dir = "outputs/101_/av"
    remove_tree(oud_dir)
    os.makedirs(oud_dir,exist_ok=True)
    
    av.logging.set_level(av.logging.VERBOSE)
    container = av.open(enc_file_path)

    tensor_list = []
    for count, frame in enumerate(container.decode(video=0)):
        
        if count > 20:
            for img,_time in tensor_list:
                dt_object = datetime.datetime.fromtimestamp(_time)
                temp = dt_object.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
                out_fpath = os.path.join(oud_dir,temp+".jpg")
                np_img = frame.to_rgb().to_ndarray()
                print(cv2.imwrite(out_fpath,np_img))
                #img.to_image().save(out_fpath)
            return
        
        st = time.time()
        sei = frame.side_data.values()
        sei = frame.side_data.get("SEI_UNREGISTERED")
        print("sei",sei.to_bytes())
        
        tensor_list.append([frame,time.time()])
        print(int((time.time() - st)*1000))
        
if __name__ == "__main__": 
    enc_file_path = "rtsp://admin:fxszn@2024@172.10.2.101:554/Streaming/Channels/101"
    get_frame_av(enc_file_path)      
    
# python SampleDecode.py 0 rtsp://freja.hiof.no:1935/rtplive/definst/hessdalen03.stream output.nv12


'''
apt-get install -y python-dev-is-python3  python3-virtualenv pkg-config
or
sudo apt-get install -y python-dev python-virtualenv pkg-config

sudo sudo apt-get install -y \
    libavformat-dev libavcodec-dev libavdevice-dev \
    libavutil-dev libswscale-dev libavresample-dev
git clone https://github.com/PyAV-Org/PyAV.git
cd PyAV
pip install Cython
python3 setup.py build

python3 setup.py build --ffmpeg-dir=/usr/include/x86_64-linux-gnu
'''