﻿//
// Copyright (c) 2019-2022 yanggaofeng
//
#include <yangipc/YangIpcEncoder.h>

#include <yangutil/yang_unistd.h>

#include <yangutil/yangavinfotype.h>
#include <yangutil/sys/YangLog.h>
#include <yangutil/sys/YangEndian.h>
#include <stdlib.h>
#include <malloc.h>

#include "readh264.h"

//extern struct chn_conf chn[];
void yang_jzEncoder_sendMsgToEncoder(YangEncoderSession* session,YangRequestType request){

	if(request==Yang_Req_Sendkeyframe){
		int ret = 0;
		//ret = 0IMP_Encoder_RequestIDR(0);
		//yang_trace("IMP_Encoder_RequestIDR rel:%d\n", ret);
	}else if(request==Yang_Req_HighLostPacketRate){

	}else if(request==Yang_Req_LowLostPacketRate){

	}
}


// 关键是I帧组合，p帧去掉nal头
// I帧:四字节sps长度+sps+四字节pps长度+pps+四字节I帧长度+I帧

int yang_jzEncoder_save_stream(YangVideoEncoderBuffer2* buf,YangFrame* frame, char *frame_ptr, int frame_len, int key_frame, int time_stamp)
{
	frame->nb = frame_len;
	int32_t nb=0;
	
	uint8_t* tmp=frame->payload;
	memcpy(tmp, (void *)(frame_ptr), frame_len);
	//frame->payload = frame_ptr+4;
	frame->frametype = key_frame;
	frame->pts = time_stamp;
	
			
	if(key_frame){
		printf("key frame->pts:3333\n");
		//yang_put_be32((char*)tmp,frame_len);
		//nb+=frame_len;
		frame->frametype=1;
		//if ((*(tmp + 4) & kNalTypeMask) == YangAvcNaluTypeIDR) 
		{
			//frame->nb= nb;
			//printf("key frame->pts:44444\n");
			buf->putEVideo(&buf->mediaBuffer,frame);
			//printf("key frame->pts:55555\n");
			//printf("key frame->pts:%lld\n", frame->pts);
			//yang_trace("\nkey pts:%lld:\n", frame->pts);
			
		}
		tmp+=frame_len;

		
	}else{
		frame->nb= frame_len;
		//frame->payload=tmp;
		frame->frametype=0;
		buf->putEVideo(&buf->mediaBuffer,frame);

	}
		
		
	//buf->putEVideo(&buf->mediaBuffer,frame);
			
#if 0    // for jz			
	int ret, i, nr_pack = stream->packCount;
	uint8_t* tmp=frame->payload;
	int32_t isKeyframe=nr_pack>1?1:0;
	int32_t nb=0;
	//yang_debug( "----------packCount=%d, stream->seq=%u start----------\n", stream->packCount, stream->seq);
	
	for (i = 0; i < nr_pack; i++) {
		//yang_debug( "[%d]:%10u,%10lld,%10u,%10u,%10u\n", i, stream->pack[i].length, stream->pack[i].timestamp, stream->pack[i].frameEnd, *((uint32_t *)(&stream->pack[i].nalType)), stream->pack[i].sliceType);
		IMPEncoderPack *pack = &stream->pack[i];
		if(pack->length){
			uint32_t remSize = stream->streamSize - pack->offset;
			if(remSize < pack->length){
				memcpy(tmp, (void *)(stream->virAddr + pack->offset), remSize);
				memcpy(tmp+remSize, (void *)stream->virAddr, pack->length - remSize);

			}else {
				memcpy(tmp, (void *)(stream->virAddr + pack->offset), pack->length);
			}
		}
		frame->pts=frame->dts=pack->timestamp;
		
		if(isKeyframe){
			yang_put_be32((char*)tmp,pack->length-4);
			nb+=pack->length;
			frame->frametype=1;
			if ((*(tmp + 4) & kNalTypeMask) == YangAvcNaluTypeIDR) {
				frame->nb= nb;
				buf->putEVideo(&buf->mediaBuffer,frame);
				//printf("key frame->pts:%lld\n", frame->pts);
				yang_trace("\nkey pts:%lld:", frame->pts);
				for(int k=0;k<50;k++){
					yang_trace("%02x,",frame->payload[k]);
				}
			}
			tmp+=pack->length;

			
		}else{
			frame->nb= pack->length-4;
			//frame->payload=tmp+4;
			frame->frametype=0;
			//buf->putEVideo(&buf->mediaBuffer,frame);

		}	
	}
	//yang_debug( "----------packCount=%d, stream->seq=%u end----------\n", stream->packCount, stream->seq);
#endif
	return 0;
}

#if 0
void* yang_jzEncoder_start_thread(void *obj)
{
	int val, i, chnNum, ret;
	char stream_path[64];
	IMPEncoderEncType encType;
	int stream_fd = -1, totalSaveCnt = 0;
	YangEncoderSession* session=(YangEncoderSession*)obj;
	val = (int) (((chn[0].payloadType >> 24) << 16) | chn[i].index);
	chnNum = val & 0xffff;
	encType = (IMPEncoderEncType)((val >> 16) & 0xffff);
	ret = sample_framesource_streamon();
	ret = IMP_Encoder_StartRecvPic(chnNum);
	if (ret < 0) {
		yang_error( "IMP_Encoder_StartRecvPic(%d) failed\n", chnNum);
		return NULL;
	}


	totalSaveCnt = NR_FRAMES_TO_SAVE;
	YangFrame videoFrame;
	memset(&videoFrame,0,sizeof(YangFrame));
	uint8_t buffer[1024*1024]={0};
	videoFrame.payload=buffer;
	while (session->isConvert == 1) {
		ret = IMP_Encoder_PollingStream(chnNum, 1000);
		if (ret < 0) {
			yang_error( "IMP_Encoder_PollingStream(%d) timeout\n", chnNum);
			continue;
		}

		IMPEncoderStream stream;
		/* Get H264 or H265 Stream */
		ret = IMP_Encoder_GetStream(chnNum, &stream, 1);
#ifdef SHOW_FRM_BITRATE
		int i, len = 0;
		for (i = 0; i < stream.packCount; i++) {
			len += stream.pack[i].length;
		}
		bitrate_sp[chnNum] += len;
		frmrate_sp[chnNum]++;

		int64_t now = IMP_System_GetTimeStamp() / 1000;
		if(((int)(now - statime_sp[chnNum]) / 1000) >= FRM_BIT_RATE_TIME){
			double fps = (double)frmrate_sp[chnNum] / ((double)(now - statime_sp[chnNum]) / 1000);
			double kbr = (double)bitrate_sp[chnNum] * 8 / (double)(now - statime_sp[chnNum]);

			printf("streamNum[%d]:FPS: %0.2f,Bitrate: %0.2f(kbps)\n", chnNum, fps, kbr);
			//fflush(stdout);

			frmrate_sp[chnNum] = 0;
			bitrate_sp[chnNum] = 0;
			statime_sp[chnNum] = now;
		}
#endif
		if (ret < 0) {
			yang_error( "IMP_Encoder_GetStream(%d) failed\n", chnNum);
			return NULL;
		}


		ret = yang_jzEncoder_save_stream(session->out_videoBuffer,&videoFrame, &stream);
		if (ret < 0) {
			close(stream_fd);
			return NULL;
		}


		IMP_Encoder_ReleaseStream(chnNum, &stream);
	}

	close(stream_fd);

	ret = IMP_Encoder_StopRecvPic(chnNum);
	if (ret < 0) {
		yang_error( "IMP_Encoder_StopRecvPic(%d) failed\n", chnNum);
		return NULL;
	}
	sample_framesource_streamoff();
	return NULL;
}
#endif

// int yang_jzEncoder_save_stream(YangVideoEncoderBuffer2* buf,YangFrame* frame, char *frame_ptr, int frame_len, int key_frame, int time_stamp)
//C 库函数 void *memset(void *str, int c, size_t n) 复制字符 c（一个无符号字符）到参数 str 所指向的字符串的前 n 个字符。d *str2, size_t n) 从存储区 str2 复制 n 个字节到存储区 str1。
//C 库函数 void *memcpy(void *str1, const void *str2, size_t n) 从存储区 str2 复制 n 个字节到存储区 str1。
void* yang_jzEncoder_start_thread(void *obj)
{
	YangEncoderSession* session=(YangEncoderSession*)obj;
	
	uint8_t *buffer = (uint8_t*)malloc(128*1024);
	uint8_t *bufferR = (uint8_t*)malloc(128*1024);
	int len =0;//帧长度
	int time_stamp = 0;//时间戳
	int key_frame = 0;//关键帧
	uint8_t sps_len = 19;
	//FILE *fp_out = fopen("out.h264","w+");
	
	YangFrame videoFrame;
	memset(&videoFrame,0,sizeof(YangFrame));
	uint8_t buffer_pu[1024*1024]={0};
	videoFrame.payload=buffer_pu;
	
	char sps[sps_len];
	char pps[8];
	
	while (session->isConvert == 1) {
		usleep(40 * 1000);
		len = getOneNal(buffer,128*1024);
		if( (len > 0)  && (buffer[3] = 0x01) )
		{
			
			// key_frame = checkNal(buffer[4]);//获取帧类型
			key_frame = buffer[4] & ((1<<5)-1);//获取帧类型
			// printf("\nkey_frame = %d\n",key_frame);
			if(6 == key_frame)//SEI
			{
				continue;
			}
			if(7 == key_frame)//SPS
			{
				memcpy(&sps[0], buffer, len);
				int t = htonl(len-4);//帧长度减去头四个字节
				memcpy(sps, &t, 4);//把字节长度放到前面四个字节
				continue;
			}
			if(8 == key_frame)//PPS
			{
				memcpy(&pps[0], buffer, len);
				int t = htonl(len-4);
				memcpy(pps, &t, 4);
				continue;
			}
			if(5 == key_frame)//I帧
			{
				memcpy(bufferR, sps, sps_len);
				memcpy(&bufferR[sps_len], pps, 8);
				memcpy(&bufferR[sps_len + 8], buffer, len);
				//int t = htonl(len);
				//memcpy(&bufferR[37 + 8], &t, 4);
				key_frame = 1;
				
				yang_debug( "----------packKey=%d, stream->seq=%u end----------", key_frame, len);
				for(int k=0;k<50;k++){
					printf("%02x, ",bufferR[k]);
				}
				printf("\n\n");
				yang_jzEncoder_save_stream(session->out_videoBuffer,&videoFrame, bufferR, len + sps_len + 8, key_frame, time_stamp);
			}
			else//P帧
			{
				key_frame = 0;
				yang_debug( "----------packKey=%d, stream->seq=%u end----------", key_frame, len);
				yang_jzEncoder_save_stream(session->out_videoBuffer,&videoFrame, buffer + 4, len - 4, key_frame, time_stamp);
			}
			
			time_stamp+=40;
			//yang_debug( "----------packCount=%d, stream->seq=%u end----------", 1, len);
		}
		
	}
	
	//fclose(fp_out);
	free(buffer);
	free(bufferR);
}

void yang_jzEncoder_start(YangEncoderSession* session)  {
	if(session->isStart) return;
	session->isStart = 1;

	if (pthread_create(&session->threadId, 0, yang_jzEncoder_start_thread, session)) {
		yang_error("YangThread::start could not start thread");
	}
	session->isStart = 0;
}

void yang_jzEncoder_stop(YangEncoderSession* session) {
	session->isConvert = 0;
	deinit();
	
}



int32_t yang_jzEncoder_init(YangEncoderSession* session) {
	int32_t ret=0;

	if(init())
	{
		return -1;
	}
	
	return Yang_Ok;
}

void yang_create_videoEncoder(YangEncoderVideo* encoder){	
	YangEncoderSession* session=&encoder->session;

	session->isStart = 0;
	session->isConvert = 1;

	session->out_videoBuffer = NULL;

	//init_chn();
	encoder->init=yang_jzEncoder_init;
	encoder->start=yang_jzEncoder_start;
	encoder->stop=yang_jzEncoder_stop;
	encoder->sendMsgToEncoder=yang_jzEncoder_sendMsgToEncoder;
}
void yang_destroy_videoEncoder(YangEncoderVideo* encoder){
	YangEncoderSession* session=&encoder->session;
	if (session->isConvert) {
		yang_jzEncoder_stop(session);
			while (session->isStart) {
				yang_usleep(1000);
			}
		}
		session->out_videoBuffer = NULL;
}



