//
//  VTDecode.m
//  TUTKTest
//
//  Created by kai_leedarson on 2017/10/24.
//  Copyright © 2017年 maple_leedarson. All rights reserved.
//

#import "VTDecode.h"
#import <VideoToolbox/VideoToolbox.h>
@interface VTDecode ()
@property (nonatomic) VTDecompressionSessionRef decompressionSession;
@property (nonatomic) CMVideoFormatDescriptionRef videoFormatDescription;
@property (nonatomic, strong) dispatch_queue_t      decodeQueue;

@property (nonatomic) uint8_t   *sps;
@property (nonatomic) size_t    spsSize;
@property (nonatomic) uint8_t   *pps;
@property (nonatomic) size_t    ppsSize;

@end
@implementation VTDecode

- (instancetype)init
{
    self = [super init];
    if (self) {
        self.decodeQueue = dispatch_queue_create("VideoDecodeQueue", DISPATCH_QUEUE_SERIAL);
    }
    return self;
}

- (void)dealloc
{
    [self decoderSetdown];
    if (self.sps) {
        free(self.sps);
        self.sps = NULL;
    }
    if(self.pps){
        free(self.pps);
        self.pps = NULL;
    }
    self.spsSize = self.ppsSize = 0;
}

#pragma mark - 硬解回调函数
static void didDecompress(void *decompressionOutputRefCon, void *sourceFrameRefCon, OSStatus status, VTDecodeInfoFlags infoFlags, CVImageBufferRef pixelBuffer, CMTime presentationTimeStamp, CMTime presentationDuration ){
    CVPixelBufferRef *outputPixelBuffer = (CVPixelBufferRef *)sourceFrameRefCon;
    *outputPixelBuffer = CVPixelBufferRetain(pixelBuffer);
}

#pragma mark - 硬解码器创建销毁
-(BOOL)decoderSetup
{
    if(self.decompressionSession) {
        return YES;
    }
    
    const uint8_t* const parameterSetPointers[2] = { _sps, _pps };
    const size_t parameterSetSizes[2] = { _spsSize, _ppsSize };
    OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault,
                                                                          2, //param count
                                                                          parameterSetPointers,
                                                                          parameterSetSizes,
                                                                          4, //nal start code size
                                                                          &_videoFormatDescription);
    
    if(status == noErr) {
        NSDictionary* pixelBufferOptions = @{
                                             // Output pixel type required here since it would default to video range
                                             (NSString*) kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange),
                                             (NSString*) kCVPixelBufferOpenGLESCompatibilityKey : @YES,
                                             (NSString*) kCVPixelBufferIOSurfacePropertiesKey : @{}};
        
        VTDecompressionOutputCallbackRecord callBackRecord;
        callBackRecord.decompressionOutputCallback = didDecompress;
        callBackRecord.decompressionOutputRefCon = NULL;
        
        status = VTDecompressionSessionCreate(kCFAllocatorDefault,
                                              self.videoFormatDescription,
                                              NULL,
                                              (__bridge CFDictionaryRef _Nullable)(pixelBufferOptions),
                                              &callBackRecord,
                                              &_decompressionSession);
        if (status != noErr) {
            NSLog(@"IOS8VT: reset decoder session failed status=%d", (int)status);
            [self decoderSetdown];
            return NO;
        }
    } else {
        NSLog(@"IOS8VT: reset decoder session failed status=%d", (int)status);
        [self decoderSetdown];
        return NO;
    }
    return YES;
}

- (void)decoderSetdown
{
    if(self.decompressionSession) {
        VTDecompressionSessionInvalidate(self.decompressionSession);
        CFRelease(self.decompressionSession);
        self.decompressionSession = NULL;
    }
    
    if(self.videoFormatDescription) {
        CFRelease(self.videoFormatDescription);
        self.videoFormatDescription = NULL;
    }
}

//NSData *data = [NSData dataWithBytes:h264Bytes length:length];
//NSLog(@"before Frame, %@",data);
- (void)decodeWithBytes:(char *)bytes length:(uint32_t)length timeStamp:(UInt32)timeStamp {
//    NSLog(@"start------------");
    char *h264Bytes = calloc(length, sizeof(char));
    memcpy(h264Bytes, bytes,length);
    dispatch_async(self.decodeQueue, ^{
        uint32_t nalSize = (uint32_t)(length - 4);
        uint32_t *pNalSize = (uint32_t *)h264Bytes;
        *pNalSize = CFSwapInt32HostToBig(nalSize);
        CVPixelBufferRef pixelBuffer = NULL;
        int nalType = h264Bytes[4] & 0x1F;
        //NSLog(@"nal type:%d", nalType);
        switch (nalType) {
            case 0x01:  //NSLog(@"Nal type is B/P frame");
                pixelBuffer = [self decode:h264Bytes length:length];
                break;
            case 0x05:   //NSLog(@"Nal type is IDR frame");
                if([self decoderSetup]) {
                    pixelBuffer = [self decode:h264Bytes length:length];
                }
                break;
            case 0x07:  //NSLog(@"Nal type is SPS");
            {
                if(_sps) {
                    free(_sps);
                    _sps = NULL;
                }
                _spsSize = length - 4;// VTD doesn't want you to include the start code header (4 bytes long) so we add the - 4 here
                _sps = malloc(_spsSize);// allocate enough data to fit the SPS and PPS parameters into our data objects.
                memcpy(_sps, h264Bytes + 4, _spsSize); // copy in the actual sps and pps values, again ignoring the 4 byte header
                break;
            }
            case 0x08:
            {
                if(_pps) {
                    free(_pps);
                    _pps = NULL;
                }
                _ppsSize = length - 4;
                _pps = malloc(_ppsSize);
                memcpy(_pps, h264Bytes + 4, _ppsSize);
                break;
            }
            default:
                break;
        }
        if (pixelBuffer != NULL) {
            dispatch_async(dispatch_get_main_queue(), ^{
                if (self.delegate && [self.delegate respondsToSelector:@selector(decoderPixelBuffer:timeStamp:)]) {
                    [self.delegate decoderPixelBuffer:pixelBuffer timeStamp:timeStamp];
                }
            });
        }
        free(h264Bytes);
     });
}


- (CVPixelBufferRef)decode:(char *)bytes length:(uint32_t)length {
    CVPixelBufferRef outputPixelBuffer = NULL;
    CMBlockBufferRef blockBuffer = NULL;
    OSStatus status  = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,
                                                          (void*)bytes,
                                                          length,
                                                          kCFAllocatorNull,
                                                          NULL,
                                                          0,
                                                          length,
                                                          0,
                                                          &blockBuffer);
    if (status == kCMBlockBufferNoErr) {
        CMSampleBufferRef sampleBuffer = NULL;
        const size_t sampleSizeArray[] = {length};
        status = CMSampleBufferCreateReady(kCFAllocatorDefault,
                                           blockBuffer,
                                           self.videoFormatDescription,
                                           1,
                                           0,
                                           NULL,
                                           1,
                                           sampleSizeArray,
                                           &sampleBuffer);
        if (status == kCMBlockBufferNoErr && sampleBuffer) {
            VTDecodeFrameFlags flags = 0;
            VTDecodeInfoFlags flagOut = 0;
            OSStatus decodeStatus = VTDecompressionSessionDecodeFrame(self.decompressionSession,
                                                                      sampleBuffer,
                                                                      flags,
                                                                      &outputPixelBuffer,
                                                                      &flagOut);
            if (decodeStatus == kVTInvalidSessionErr) {
                NSLog(@"IOS8VT: Invalid session, reset decoder session");
                CVPixelBufferRelease(outputPixelBuffer);
                outputPixelBuffer = NULL;
                [self decoderSetdown];
            } else if (decodeStatus == kVTParameterErr) {
                NSLog(@"IOS8VT: decode failed status=%d(ParameterErr)", (int)decodeStatus);
                CVPixelBufferRelease(outputPixelBuffer);
                outputPixelBuffer = NULL;
                [self decoderSetdown];
            } else if (decodeStatus == kVTVideoDecoderMalfunctionErr) {
                NSLog(@"IOS8VT: decode failed status=%d(MalfunctionErr)", (int)decodeStatus);
                CVPixelBufferRelease(outputPixelBuffer);
                outputPixelBuffer = NULL;
                [self decoderSetdown];
            } else if (decodeStatus == kVTVideoDecoderBadDataErr) {
                NSLog(@"IOS8VT: decode failed status=%d(Bad data)", (int)decodeStatus);
                CVPixelBufferRelease(outputPixelBuffer);
                outputPixelBuffer = NULL;
            } else if (decodeStatus != noErr) {
                NSLog(@"IOS8VT: decode failed status=%d", (int)decodeStatus);
                CVPixelBufferRelease(outputPixelBuffer);
                outputPixelBuffer = NULL;
            }
            CFRelease(sampleBuffer);
        }
        CFRelease(blockBuffer);
    }
    return outputPixelBuffer;
}

@end
