#include "videostream_rtsp.h"
#include <string>

// Implementation of "DummySink":

// Even though we're not going to be doing anything with the incoming data, we still need to receive it.
// Define the size of the buffer that we'll use:

DummySink::DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId)
	: MediaSink(env), fSubsession(subsession) 
{
	fStreamId = strDup(streamId);
	fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
}

DummySink::~DummySink() 
{
	delete[] fReceiveBuffer;
	delete[] fStreamId;
}

// If you don't want to see debugging output for each received frame, then comment out the following line:
#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1

void
DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned durationInMicroseconds)
{

//    if (fStreamId != NULL)
//        envir() << "Stream \"" << fStreamId << "\"; ";
//   envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes\n";
//    if (numTruncatedBytes > 0)
//        envir() << " (with " << numTruncatedBytes << " bytes truncated)";
//    char uSecsStr[6+1];
//    sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
//    envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
//    if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
//        envir() << "!";
//    }
//    envir() << "\n";

		//std::cout << "onData: length-> " << frameSize << "  " << presentationTime.tv_sec << " " << presentationTime.tv_usec/1000  << std::endl;
//	std::cout << fSubsession.fmtp_spropparametersets() << std::endl;
    unsigned int Num = 0;
    unsigned int &SPropRecords = Num;
    SPropRecord* p_record = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), SPropRecords);
    SPropRecord &sps = p_record[0];
    SPropRecord &pps = p_record[1];
    //printf(":=====> sps:%s\t %d\t pps:%s\t %d\t\n", sps.sPropBytes, sps.sPropLength, pps.sPropBytes, pps.sPropLength);
    m_pFfmpeg->DecodeFrame(sps.sPropBytes, sps.sPropLength, pps.sPropBytes, pps.sPropLength, fReceiveBuffer, frameSize, presentationTime);
	//std::cout << "onData: length-> " << frameSize << "  " << presentationTime.tv_sec << " " << presentationTime.tv_usec/1000  << std::endl;
	if(m_bIsWorking) {	
		this->continuePlaying();
	}
}

Boolean
DummySink::continuePlaying()
{
    if (fSource == NULL) return False;
	fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE, afterGettingFrame, this, onSourceClosure, this);
    return True;
}

Boolean 
DummySink::Stop()
{
	if (fSource == NULL) return False;
	m_bIsWorking = false;
	fSource->stopGettingFrames();
    return True;
}


VSRTSPClient::VSRTSPClient(UsageEnvironment* env, char const* rtspURL, char const* sUser, char const* sPasswd) :
    RTSPClient(*env, rtspURL, 255, NULL, 0, -1), m_pAuthenticator(sUser, sPasswd)
{

}

VSRTSPClient::~VSRTSPClient()
{

}

int
VSRTSPClient::open()
{
    m_nState = VS_NULL;
    this->SendNextCommand();
    do {
        usleep(10);
    } while(m_nState == VS_NULL);
    return m_nState;
}


bool
VSRTSPClient::close()
{
	m_nState = VS_NULL;
	if(m_pDummySink) {
		m_pDummySink->Stop();
		delete m_pDummySink; m_pDummySink = nullptr;
	}
	this->sendTeardownCommand(*m_pSession, continueAfterTearDown, &m_pAuthenticator);
	usleep(100000); //等待100毫秒，结束当前解码状态
	if(m_pFfmpegH264) {
		delete m_pFfmpegH264; m_pFfmpegH264 = nullptr;
	}
	m_pSubSessionIter = nullptr;
	return true;
}

void
VSRTSPClient::ContinueAfterDescribe(int resultCode, char* resultString)
{
	if (resultCode != 0) 
	{
		rtsprintf(GDB_RED"Failed to DESCRIBE: %s\n" GDB_END, resultString);
		m_nState = VS_DESCRIBE_ERROR;
	}
	else
	{
        rtsprintf("Got SDP: %s\n", resultString);
        m_pSession = MediaSession::createNew(envir(), resultString);
		m_pSubSessionIter = new MediaSubsessionIterator(*m_pSession);
		this->SendNextCommand();  
	}
	delete[] resultString;

}
void
VSRTSPClient::ContinueAfterSetup(int resultCode, char* resultString)
{
	if (resultCode != 0) 
	{
		rtsprintf(GDB_RED "Failed to SETUP: %s\n"GDB_END, resultString);
		m_nState = VS_SETUP_ERROR;
	}
	else
	{	
		//Live555CodecType codec = GetSessionCodecType(m_subSession->mediumName(), m_subSession->codecName());
        m_pDummySink = new DummySink(envir(), *m_pSubSession, this->url());
		if (m_pDummySink == NULL) 
		{
            std::cout << "Failed to create a data sink for " << m_pSubSession->mediumName() << "/" << m_pSubSession->codecName() << " subsession: " << envir().getResultMsg() << "\n";
			m_nState = VS_SETUP_ERROR;
		}
		else
		{
			std::cout << "Created a data sink for the \"" << m_pSubSession->mediumName() << "/" << m_pSubSession->codecName() << "\" subsession.\n";
			if(strcmp(m_pSubSession->codecName(), "H264") == 0) 
			{
				m_pFfmpegH264 = new VSFFH264;
			    if(m_pFfmpegH264 == nullptr || m_pFfmpegH264->Init() == false) {
					rtsprintf(GDB_RED"Error:----> VSFFH264 Error\n"GDB_END);
					m_nState =  VS_FFH264_ERROR;
			    }
				else 
				{
				    m_pDummySink->SetFFmpegH264Decode(m_pFfmpegH264);
					m_pDummySink->startPlaying(*(m_pSubSession->readSource()), NULL, NULL);
				}
			}
			else
			{
				m_nState = VS_NO_H264;
			}
		}
	}
	delete[] resultString;
	this->SendNextCommand();  
}	

void
VSRTSPClient::ContinueAfterPlay(int resultCode, char* resultString)
{
	if (resultCode != 0) 
	{
		rtsprintf(GDB_RED "Failed to PLAY: %s\n" GDB_END, resultString);
		m_nState = VS_PLAY_ERROR;
	}
	else
	{
		rtsprintf("PLAY OK\n");
		m_nState = VS_OK;
	}
	delete[] resultString;
}

void
VSRTSPClient::ContinueAfterTearDown(int resultCode, char* resultString)
{
	if (resultCode != 0) 
	{
		rtsprintf(GDB_RED "Failed to TearDown: %s\n" GDB_END, resultString);
	}
	else
	{
		rtsprintf("TearDown OK\n");
		m_nState = VS_CLOSE;
	}
	delete[] resultString;
}

void
VSRTSPClient::continueAfterSetup(RTSPClient* rtspClient, int resultCode, char* resultString)
{
	static_cast<VSRTSPClient*>(rtspClient)->ContinueAfterSetup(resultCode, resultString);
}

void
VSRTSPClient::continueAfterPlay(RTSPClient* rtspClient, int resultCode, char* resultString)
{
	static_cast<VSRTSPClient*>(rtspClient)->ContinueAfterPlay(resultCode, resultString);
}

void
VSRTSPClient::continueAfterDescribe(RTSPClient* rtspClient, int resultCode, char* resultString)
{
	static_cast<VSRTSPClient*>(rtspClient)->ContinueAfterDescribe(resultCode, resultString);
}


void
VSRTSPClient::continueAfterTearDown(RTSPClient* rtspClient, int resultCode, char* resultString)
{
	static_cast<VSRTSPClient*>(rtspClient)->ContinueAfterTearDown(resultCode, resultString);
}


void
VSRTSPClient::SendNextCommand()
{
	if(m_nState == VS_NULL) {
	    if (m_pSubSessionIter == NULL) {
	        // no SDP, send DESCRIBE
	        this->sendDescribeCommand(continueAfterDescribe, &m_pAuthenticator);
	    } 
	    else {
	        m_pSubSession = m_pSubSessionIter->next();
	        if (m_pSubSession != NULL) {
	            // still subsession to SETUP
	            if (!m_pSubSession->initiate()) {
	                std::cout << "Failed to initiate " << m_pSubSession->mediumName() << "/" << m_pSubSession->codecName() << " subsession: " << envir().getResultMsg() << "\n";
	                this->SendNextCommand();
	            }
	            else {
	                std::cout << "Initiated " << m_pSubSession->mediumName() << "/" << m_pSubSession->codecName() << " subsession" << "\n";
	            }

	            /* Change the multicast here */
	            this->sendSetupCommand(*m_pSubSession, continueAfterSetup, false, true, false, &m_pAuthenticator); //tcp
	        }
	        else {
	            // no more subsession to SETUP, send PLAY
	            this->sendPlayCommand(*m_pSession, continueAfterPlay, (double)0, (double)-1, (float)0, &m_pAuthenticator);
	        }
	    }
	}
}


