	//
	//  GaoMediaInfoManager.m
	//  JssAssetManagerDemo
	//
	//  Created by Gikki Ares on 2020/7/24.
	//  Copyright © 2020 vgemv. All rights reserved.
	//

#import "JsMediaInfoManager.h"



@implementation JsMediaInfoManager
//CMVideoCodecType

+ (NSString *)codecTypeStringFromCMVideoCodecType:(FourCharCode)i_type {
	switch (i_type) {
		case kCMVideoCodecType_H264:{
			return @"h264";
		}
		case kAudioFormatMPEG4AAC:{
			return @"aac";
		}
		default:{
			return [NSString stringWithFormat:@"Type id is %lu, this type is not configured.",(unsigned long)i_type];
		}
	}
}

//mm:ss.SSS
+ (NSString *)timeStringForTime:(CMTime)cmtime {
	float f_sec = CMTimeGetSeconds(cmtime);
	return [self formattedTimeStringWithSecond:f_sec];
}

+ (NSString *)stringForCMTime:(CMTime)cmtime {
	long value = cmtime.value;
	long timescale = cmtime.timescale;
	Float64 seconds = CMTimeGetSeconds(cmtime);
	NSString * str = [NSString stringWithFormat:@"Value:%ld,Timescale:%ld,Second:%.3f",value,timescale,seconds];
	return str;
}

+ (const char *)cstringForCMTime:(CMTime)cmtime {
	return [self stringForCMTime:cmtime].UTF8String;
}

+ (const char *)timeCStringForTime:(CMTime)cmtime {
	float f_sec = CMTimeGetSeconds(cmtime);
	return [self formattedTimeCStringWithSecond:f_sec];
}

+ (NSString *)formattedTimeStringWithSecond:(double)f_sec {
	int i_sec_total = (int)(f_sec);
	int i_minute = i_sec_total/60;
	int i_sec = i_sec_total%60;
	float f_milisec = f_sec - i_sec_total;
	NSString * str_milisec = [NSString stringWithFormat:@"%.3f",f_milisec];
	NSString * str_milisecPart = [str_milisec substringFromIndex:2];
	NSString * str = [NSString stringWithFormat:@"%02d:%02d.%@",i_minute,i_sec,str_milisecPart];
	return str;
}

+ (nullable const char *)formattedTimeCStringWithSecond:(double)f_sec {
	NSString * str = [self formattedTimeStringWithSecond:f_sec];
	const char * pchar = [str cStringUsingEncoding:NSUTF8StringEncoding];
	return pchar;
}

+ (void)showAudioInfoFromCMSampleBuffer:(CMSampleBufferRef)sampleBuffer {
	const AudioStreamPacketDescription * aspd;
	size_t size;
	//为什么这里获取Samplebuffer也不对呢..
	CMSampleBufferGetAudioStreamPacketDescriptionsPtr(sampleBuffer, &aspd, &size);
	if(aspd) {
		[self printAudioStreamPacketDescription:*aspd];
	}
	CMAudioFormatDescriptionRef cmAudioFormatDescription =    (CMAudioFormatDescriptionRef)CMSampleBufferGetFormatDescription(sampleBuffer);
	
	const AudioStreamBasicDescription * inAudioStreamBasicDescription = CMAudioFormatDescriptionGetStreamBasicDescription(cmAudioFormatDescription);
	[self printAudioStreamBasicDescription:*inAudioStreamBasicDescription];
}

+ (void)printAudioStreamPacketDescription:(AudioStreamPacketDescription)aspd {
	printf("mStartOffset:        		%10X\n",    (unsigned int)aspd.mStartOffset);
	printf("mVariableFramesInPacket:    %10d\n",    (unsigned int)aspd.mVariableFramesInPacket);
	printf("mDataByteSize:  	 			%10d\n",    (unsigned int)aspd.mDataByteSize);
	printf("\n");
}


+ (void)printAudioStreamBasicDescription:(AudioStreamBasicDescription)asbd {
	char formatID[5];
	UInt32 mFormatID = CFSwapInt32HostToBig(asbd.mFormatID);
	bcopy (&mFormatID, formatID, 4);
	formatID[4] = '\0';
	printf(" ++++++ AudioStreamBasicDescription Start +++++++\n");
	printf("Sample Rate:         %10.0f\n",  asbd.mSampleRate);
	printf("Format ID:           %10s\n",    formatID);
		//%X,16进制输出.
	printf("Format Flags:        %10X\n",    (unsigned int)asbd.mFormatFlags);
	printf("Bytes per Packet:    %10d\n",    (unsigned int)asbd.mBytesPerPacket);
	printf("Frames per Packet:   %10d\n",    (unsigned int)asbd.mFramesPerPacket);
	printf("Bytes per Frame:     %10d\n",    (unsigned int)asbd.mBytesPerFrame);
	printf("Channels per Frame:  %10d\n",    (unsigned int)asbd.mChannelsPerFrame);
	printf("Bits per Channel:    %10d\n",    (unsigned int)asbd.mBitsPerChannel);
	printf(" ++++++ AudioStreamBasicDescription End +++++++\n");
	printf("\n");
}

+ (void)printSampleBufferForAudio:(CMSampleBufferRef)sampleBuffer withFrameCount:(long)frameCount {
	
	long sampleNums = CMSampleBufferGetNumSamples(sampleBuffer);
	long sampleSize = CMSampleBufferGetTotalSampleSize(sampleBuffer);
	CMTime outputPresentationTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer);
	CMTime presentationTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
	CMTime outputDecodeTime = CMSampleBufferGetOutputDecodeTimeStamp(sampleBuffer);
	CMTime decodeTime = CMSampleBufferGetDecodeTimeStamp(sampleBuffer);
	CMTime outputDurationTime = CMSampleBufferGetOutputDuration(sampleBuffer);
	
	
	CMAudioFormatDescriptionRef cmAudioFormatDescription =    (CMAudioFormatDescriptionRef)CMSampleBufferGetFormatDescription(sampleBuffer);
	if(cmAudioFormatDescription != NULL) {
	const AudioStreamBasicDescription * pasbd = CMAudioFormatDescriptionGetStreamBasicDescription(cmAudioFormatDescription);
	
	AudioStreamBasicDescription asbd = *pasbd;
	
	char formatID[5];
	UInt32 mFormatID = CFSwapInt32HostToBig(asbd.mFormatID);
	bcopy (&mFormatID, formatID, 4);
	formatID[4] = '\0';
	
	//对于常量的比特率和帧率的音频数据,返回的也是没有.
	
	const AudioStreamPacketDescription * aspd;
	size_t packetSize;
	CMSampleBufferGetAudioStreamPacketDescriptionsPtr(sampleBuffer, &aspd, &packetSize);

	
	
	printf(" ++++++ CMSampleBufferInfo ForAudio Start +++++++\n");
	if(aspd) {
		[self printAudioStreamPacketDescription:*aspd];
	}
	else {
	printf("No packet info\n");
	}
	if(frameCount) {
	printf("Frame Count:         %3ld\n",  frameCount);
	}
	printf("Sample Rate:         %10.0f\n",  asbd.mSampleRate);
	printf("Format ID:           %10s\n",    formatID);
		//%X,16进制输出.
	printf("Format Flags:        %10X\n",    (unsigned int)asbd.mFormatFlags);
	printf("Bytes per Packet:    %10d\n",    (unsigned int)asbd.mBytesPerPacket);
	printf("Frames per Packet:   %10d\n",    (unsigned int)asbd.mFramesPerPacket);
	printf("Bytes per Frame:     %10d\n",    (unsigned int)asbd.mBytesPerFrame);
	printf("Channels per Frame:  %10d\n",    (unsigned int)asbd.mChannelsPerFrame);
	printf("Bits per Channel:    %10d\n",    (unsigned int)asbd.mBitsPerChannel);
	printf("\n");
	printf("Sample Nums:   		 %10d\n",    (unsigned int)sampleNums);
	printf("Sample Size:   		 %10d\n",    (unsigned int)sampleSize);
	printf("outputPresentationTime:    %s\n",   [self timeCStringForTime:outputPresentationTime]);
	printf("presentationTime:    %s\n",   [self timeCStringForTime:presentationTime]);
	CMTimeShow(presentationTime);
	printf("presentationTime:    %.2f\n",   CMTimeGetSeconds(presentationTime));
	printf("outputDecodeTime:    %s\n",    [self timeCStringForTime:outputDecodeTime]);
	CMTimeShow(decodeTime);
	printf("decodeTime:    %s\n",    [self timeCStringForTime:decodeTime]);
	printf("outputDurationTime:    %s\n",   [self timeCStringForTime:outputDurationTime]);
	
	
	printf(" ++++++ CMSampleBufferInfo ForAudio End +++++++\n");
	printf("\n");
	}
	else {
		NSLog(@"This is an empty sample");
	}
}


+ (void)printSampleBufferForVideo:(CMSampleBufferRef)sampleBuffer withFrameCount:(long)frameCount isShowPixelBufferInfo:(BOOL)isShowPixelBufferInfo{
	
	long sampleNums = CMSampleBufferGetNumSamples(sampleBuffer);
	long sampleSize = CMSampleBufferGetTotalSampleSize(sampleBuffer);
	CMTime outputPresentationTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer);
	CMTime presentationTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
	CMTime outputDecodeTime = CMSampleBufferGetOutputDecodeTimeStamp(sampleBuffer);
	CMTime decodeTime = CMSampleBufferGetDecodeTimeStamp(sampleBuffer);
	CMTime outputDurationTime = CMSampleBufferGetOutputDuration(sampleBuffer);
	
	printf(" ++++++ CMSampleBufferInfo For Video Start +++++++\n");
	printf("Video Frame Count:         %3ld\n",  frameCount);
	printf("Sample Nums:   		 %10d\n",    (unsigned int)sampleNums);
	printf("Sample Size:   		 %10d\n",    (unsigned int)sampleSize);
	printf("outputPresentationTime:    %s\n",   [self cstringForCMTime:outputPresentationTime]);
	printf("presentationTime:    %s\n",   [self cstringForCMTime:presentationTime]);
	printf("outputDecodeTime:    %s\n",    [self cstringForCMTime:outputDecodeTime]);
	printf("decodeTime:    %s\n",    [self cstringForCMTime:decodeTime]);
	printf("outputDurationTime:    %s\n",   [self cstringForCMTime:outputDurationTime]);
	printf(" ====== CMSampleBufferInfo For Vedio Start ======\n");
	printf("\n");
	if(isShowPixelBufferInfo) {
		CVPixelBufferRef pb = CMSampleBufferGetImageBuffer(sampleBuffer);
		CVPixelBufferLockBaseAddress(pb, 0);
		[self checkCVPixelBuffeInfo:pb];
		CVPixelBufferUnlockBaseAddress(pb, 0);
	}
}


//- (CMSampleBufferRef)modifySampleBuffer(CMSampleBufferRef sb) {
//		CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
//			for (int y = 0; y < audioBufferList.mNumberBuffers; y++) {
//			  AudioBuffer audioBuffer = audioBufferList.mBuffers[y];
//			  Float32 *frame = (Float32*)audioBuffer.mData;
//			
//			  [data appendBytes:frame length:audioBuffer.mDataByteSize];
//			}
//}




/**
 CMSampleBuffer中包含了图片的格式等信息,要创建和该buffer内容一致的信息才可以成功;
 */
+ (void)checkCVPixelBuffeInfo:(CVPixelBufferRef)pixelBuffer {
	CVPixelBufferLockBaseAddress(pixelBuffer,0);
	uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(pixelBuffer);
	size_t bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer);
		//1134
	size_t width = CVPixelBufferGetWidth(pixelBuffer);
		//640
	size_t height = CVPixelBufferGetHeight(pixelBuffer);
	
	size_t size = CVPixelBufferGetDataSize(pixelBuffer);
	
	
	NSLog(@"PixelBuffer baseAddress is %p, width is %zu, height is %zu,bytesPerRow is %zu,dataSize is %zu",baseAddress,width,height,bytesPerRow,size);
	
		//309是CVPixelBuffer类型的类型id
	CFTypeID typeId = CVPixelBufferGetTypeID();
	NSLog(@"Type id is %lu",typeId);
		//875704422-->34323066-->kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
		//1111970369-->42475241-->kCVPixelFormatType_32BGRA
	unsigned long formatType = CVPixelBufferGetPixelFormatType(pixelBuffer);
	NSString * formatTypeName = @"Unkown";
	switch (formatType) {
		case 1111970369:{
			formatTypeName = @"kCVPixelFormatType_32BGRA";
			break;
		}
		case 875704422:{
			formatTypeName = @"kCVPixelFormatType_420YpCbCr8BiPlanarFullRange";
		}
		default:
			break;
	}
	NSLog(@"Format type  is %zu",formatType);
	
	
	BOOL isPlannar = CVPixelBufferIsPlanar(pixelBuffer);
	if(isPlannar) {
		NSLog(@"平面的");
		size_t planeCount = CVPixelBufferGetPlaneCount(pixelBuffer);
		NSLog(@"一共有%zu个平面",planeCount);
			//		第0个平面,width is:1134,height is:1134
			//		第1个平面,width is:567,height is:567
		
		for(int i = 0 ;i<planeCount;i++) {
			size_t width = CVPixelBufferGetWidthOfPlane(pixelBuffer,i);
			size_t height = CVPixelBufferGetHeightOfPlane(pixelBuffer,i);
			uint8_t * baseAddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer,i);
			size_t size = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer,i);
			NSLog(@"第%d个平面,baseAddress is: %p, width is:%zu,height is:%zu,size is:%zu",i,baseAddress,width,height,size);
		}
	}
	else {
		NSLog(@"不是平面的");
		
	}
	CVPixelBufferUnlockBaseAddress(pixelBuffer,0);
	return;
}



/**
 获取整个音频帧的音频强度.
 */
+ (int)dbFromCMSampleBuffer:(CMSampleBufferRef)sampleBuffer {
		//pcm数据大小
	size_t size = CMSampleBufferGetTotalSampleSize(sampleBuffer);
		//分配空间
	Byte * audio_data = (Byte *)malloc(size);
	memset(audio_data,0,size);
	
		//获取CMBlockBuffer
	CMBlockBufferRef blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
	CMBlockBufferCopyDataBytes(blockBuffer, 0, size, audio_data);
	int db = getPcmDB(audio_data, size);
	free(audio_data);
	return db;
}

int getPcmDB(const unsigned char *pcmdata, size_t size) {
	
	int db = 0;
	short int value = 0;
	double sum = 0;
	for(int i = 0; i < size; i += 2)
	{
		memcpy(&value, pcmdata+i, 2); //获取2个字节的大小（值）
		sum += abs(value); //绝对值求和
	}
	sum = sum / (size / 2); //求平均值（2个字节表示一个振幅，所以振幅个数为：size/2个）
	if(sum > 0)
	{
		db = (int)(20.0*log10(sum));
	}
	return db;
}

//+ (CMSampleBufferRef) sampleBufferFromPixelBuffer:(CVPixelBufferRef)pb {
//	Boolean dataReady = YES;
//	CMSampleBufferMakeDataReadyCallback callBack = NULL;
//	void * makeDataReadyRefcon = (__bridge void *)(self);
//	CMVideoFormatDescriptionRef formatDescription = NULL;
//	CMSampleTimingInfo * info = NULL;
//	CMSampleBufferRef sampleBuffer_new;
//	CMSampleBufferCreateForImageBuffer(kCFAllocatorDefault, pb, dataReady, callBack, makeDataReadyRefcon, formatDescription, info, &sampleBuffer_new);
//	return sampleBuffer_new;
//}

+ (CMSampleBufferRef)videoSampleBufferForPixelBuffer:(CVPixelBufferRef)pixelBuffer withPresentationTime:(CMTime)presentationTime {
	CMSampleBufferRef sampleBuffer = NULL;
	CMFormatDescriptionRef outputFormatDescription = NULL;
	CMVideoFormatDescriptionCreateForImageBuffer( kCFAllocatorDefault, pixelBuffer, &outputFormatDescription );
	
	
	CMSampleTimingInfo timingInfo = {0,};
	timingInfo.duration = kCMTimeInvalid;
	timingInfo.decodeTimeStamp = kCMTimeInvalid;
	timingInfo.presentationTimeStamp = presentationTime;
	
//	OSStatus err =
	CMSampleBufferCreateForImageBuffer( kCFAllocatorDefault, pixelBuffer, true, NULL, NULL, outputFormatDescription, &timingInfo, &sampleBuffer );
	
	return sampleBuffer;
//	if ( sampleBuffer ) {
//			// do some thing
//		CFRelease( sampleBuffer );
//	}
//	else {
//		NSString *exceptionReason = [NSString stringWithFormat:@"sample buffer create failed (%i)", (int)err];
//		@throw [NSException exceptionWithName:NSInvalidArgumentException reason:exceptionReason userInfo:nil];
//	}
}

//#pragma mark Create Video Sample
//+ (CMSampleBufferRef)createVideoSampleWithImageData:(void *)imageData {
//
//}




#pragma mark Create Audio Sample
+ (CMSampleBufferRef)createEmptyAudioSampleBuffrWithTiming:(CMSampleTimingInfo)timing channels:(int)channels sampleRate:(int)sampleRate numSamples:(int)numSamples bitsPerChannel:(int)bitsPerChannel{
	
	UInt32 dataSize = numSamples * channels * bitsPerChannel / 16;
	Byte * audioData = malloc(dataSize);
	memset(audioData, 1, dataSize);
	CMSampleBufferRef cmSampleBuffer = [self createAudioSampleWithAudioData:audioData numSamples:numSamples timing:timing channels:channels sampleRate:sampleRate bitsPerChannel:bitsPerChannel];
	//这个创建的数据,在穿改进sampleBUffer的时候,是会复制一份的,所以需要释放掉.
	free(audioData);
	return cmSampleBuffer;
}

/**
 1,带有音频数据的audioBufferList.
 2,duration,是一个sample的duration.!!!
 */
+ (CMSampleBufferRef)createAudioSampleWithAudioBufferList:(AudioBufferList)audioBufferList audioStreamBasicDescription:(AudioStreamBasicDescription)asbd duration:(CMTime)duration pts:(CMTime)pts numSamples:(int)numSamples {
	
	
	CMSampleBufferRef cmSampleBuffer = NULL;
	
	static CMFormatDescriptionRef cmFormatDescription = NULL;
	OSStatus error = 0;
		//该数据,显然要手动释放.
	error = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &asbd, 0, NULL, 0, NULL, NULL, &cmFormatDescription);
	if (error) {
		NSLog(@"CMAudioFormatDescription create failed.");
		return NULL;
	}
	
	/**
	 $6,样本数,采样率小了,样本数要丢弃吗?
	 len,是dataByteSize,是pcm数据总长度 = 每个样本的字节 * 样本数.
	 采样率如果变了,
	 
	 $7 numSampleTimingEntries,指定传入的CMTime的个数.
	 
	 $8 sampleTimingArray
	 
	 $9 numSampleSizeEntries
	 指定参数10中sizeEntry的个数.
	 $10 size entry
	 指定样本大小的数组
	 
	 $11,返回的sampleBuffer.
	 
	 */
		//样本数=采样率*声道数*时间
	//指定样本大小数组中元素个数,1表示所有样本使用同一个sampleSize.
	int numSampleSizeEntry = 1;
	int channels = asbd.mChannelsPerFrame;
	int bytesPerChannel = asbd.mBitsPerChannel / 8;
	//每个样本的数据大小,单位为字节.  双通道的话,样本大小是一个通道还是两个通道?
	size_t sampleSize = channels * bytesPerChannel;
	
	//指定时间信息的个数,1表示每个样本具有相同的duration,并且是连续的.
	//这时只需要指定pts为第一个样本的pts.
	int numSampleTimingEntries = 1;
	CMSampleTimingInfo timingInfo = {0};
	timingInfo.duration = duration;
	timingInfo.presentationTimeStamp = pts;
	timingInfo.decodeTimeStamp = kCMTimeInvalid;
	
	error = CMSampleBufferCreate(kCFAllocatorDefault, NULL, false, NULL, NULL, cmFormatDescription, numSamples , numSampleTimingEntries, &timingInfo, numSampleSizeEntry, &sampleSize, &cmSampleBuffer);
	if (error) {
		return NULL;
	}
	
	error = CMSampleBufferSetDataBufferFromAudioBufferList(cmSampleBuffer, kCFAllocatorDefault, kCFAllocatorDefault, 0, &audioBufferList);
	if(error){
			//-12731 kCMSampleBufferError_RequiredParameterMissing
		/**
		 the numSamples parameter passed to CMSampleBufferCreate doesn't jibe with the AudioBuffers buffer size / the audio formats bytes per frame calculation.
		 
		 mDataByteSize/ asbd.sizePerFrame = numFrame;     1个Frame如果是双声道是包含2个sample.
		 */
		NSLog(@"Set data buffer failed!");
		return NULL;
	}
	CFRelease(cmFormatDescription);
	return cmSampleBuffer;
}


+ (CMSampleBufferRef)createAudioSampleWithAudioData:(void *)audioData numSamples:(int)numSamples timing:(CMSampleTimingInfo)timing channels:(int)channels sampleRate:(int)sampleRate bitsPerChannel:(int)bitsPerChannel {
	AudioBufferList audioBufferList;
	audioBufferList.mNumberBuffers = 1;
	audioBufferList.mBuffers[0].mNumberChannels=channels;
	
	//设置音频数据量  数据大小 = 样本数 * 声道数 * 单个声道bit数 / 8
	UInt32 dataSize = numSamples * channels * bitsPerChannel / 8;
	audioBufferList.mBuffers[0].mDataByteSize = dataSize;
	audioBufferList.mBuffers[0].mData = audioData;
	
	
	AudioStreamBasicDescription asbd;
	asbd.mSampleRate = sampleRate;
	asbd.mFormatID = kAudioFormatLinearPCM;
	asbd.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger|kAudioFormatFlagIsPacked;
		//每个Packet包含1帧音频数据
	asbd.mFramesPerPacket = 1;
		//一个frame包含的声道
	asbd.mChannelsPerFrame = channels;
	//每个样本的大小 = 声道数 * 每个声道的单样本数据大小bitsPerChannel / 8
	const unsigned long sampleSize = channels * bitsPerChannel / 8;
	asbd.mBytesPerFrame =  (int)sampleSize;
	asbd.mBytesPerPacket =  (int)sampleSize;
		//1个声道的一个样本的数据
	asbd.mBitsPerChannel = bitsPerChannel;
	asbd.mReserved = 0;
	
	
	CMSampleBufferRef cmSampleBuffer = NULL;
	
	static CMFormatDescriptionRef cmFormatDescription = NULL;
	OSStatus error = 0;
	//该数据,显然要手动释放.
	error = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &asbd, 0, NULL, 0, NULL, NULL, &cmFormatDescription);
	if (error) {
		return NULL;
	}
	
	/**
	 $6,样本数,采样率小了,样本数要丢弃吗?
	 len,是dataByteSize,是pcm数据总长度 = 每个样本的字节 * 样本数.
	 采样率如果变了,
	 
	 $7 numSampleTimingEntries,指定传入的CMTime的个数.
	 
	 $8 sampleTimingArray
	 
	 $9 numSampleSizeEntries
	 指定参数10中sizeEntry的个数.
	 $10 size entry
	 指定样本大小的数组
	 
	 $11,返回的sampleBuffer.
	 
	 */
		//样本数=采样率*声道数*时间
	
	error = CMSampleBufferCreate(kCFAllocatorDefault, NULL, false, NULL, NULL, cmFormatDescription, numSamples , 1, &timing, 1, &sampleSize, &cmSampleBuffer);
	if (error) {
		return NULL;
	}
	
	error = CMSampleBufferSetDataBufferFromAudioBufferList(cmSampleBuffer, kCFAllocatorDefault, kCFAllocatorDefault, 0, &audioBufferList);
	if(error){
			//-12731 kCMSampleBufferError_RequiredParameterMissing
		/**
		 the numSamples parameter passed to CMSampleBufferCreate doesn't jibe with the AudioBuffers buffer size / the audio formats bytes per frame calculation.
		 
		 mDataByteSize/ asbd.sizePerFrame = numFrame;     1个Frame如果是双声道是包含2个sample.
		 */
		NSLog(@"Set data buffer failed!");
		return NULL;
	}
	
	CFRelease(cmFormatDescription);
	return cmSampleBuffer;
}

+ (void)printOSTypeInfo:(OSType)osType {
	NSLog(@"OSType intValue:%d,four char value:(%c%c%c%c)",osType,(char)(osType>>24),(char)(osType>>16),(char)(osType>>8),(char)(osType>>0));
}


@end
