#include <strmif.h>
#include <comdef.h>
#include "mkvcontext.hpp"
#include "mkvstream.hpp"
#include <cassert>
#include <ctime>
#ifdef _DEBUG
#include "odbgstream.hpp"
using std::endl;
#endif

namespace MkvMux
{

MkvContext::MkvContext() :
   m_pVideo(0),
   m_pAudio(0)
{
    //Seed the random number generator, which is needed
    //for creation of unique TrackUIDs.
    
    const time_t time_ = time(0);
    const unsigned seed = static_cast<unsigned>(time_);
    srand(seed);
}


MkvContext::~MkvContext()
{
   assert(m_pVideo == 0);
   assert(m_pAudio == 0);
   assert(m_file.GetStream() == 0);
}


void MkvContext::SetVideo(MkvStream* pVideo)
{
   assert((pVideo == 0) || (m_pVideo == 0));
   m_pVideo = pVideo;
}


void MkvContext::SetAudio(MkvStream* pAudio)
{
   assert((pAudio == 0) || (m_pAudio == 0));   
   m_pAudio = pAudio;  //TODO: generlize this to many audio streams
}


void MkvContext::Open(IStream* pStream)
{
    assert(m_file.GetStream() == 0);
    assert(m_vframes.empty());
    assert(m_aframes.empty());
    assert(m_rframes.empty());

    m_max_timecode_ms = 0;  //keep track of duration    
    m_cEOS = 0; 
    m_bEOSVideo = false;
    m_bEOSAudio = false;

    int tn = 0;

    if (m_pVideo)
    {
        m_pVideo->SetTrackNumber(++tn);
        ++m_cEOS;
    }
     
    if (m_pAudio)
    {
        m_pAudio->SetTrackNumber(++tn);
        ++m_cEOS;
    }
    
    if (pStream)
    {
        m_file.SetStream(pStream);
        m_file.SetPosition(0);
        WriteEbmlHeader();
        InitSegment();
    }
}


void MkvContext::Close()
{
    if (m_pVideo)
        EOSVideo(0);

    if (m_pAudio)
        EOSAudio(0);

    Final();
}


void MkvContext::Final()
{
    if (m_file.GetStream())
    {
        FinalSegment();
        m_file.SetStream(0);
    }
}


MkvContext::Frame::Frame()
{
}


MkvContext::Frame::~Frame()
{
}


void MkvContext::Frame::Release()
{
   delete this;
}


MkvContext::VideoFrame::VideoFrame()
{
}


MkvContext::AudioFrame::AudioFrame()
{
}


ULONG MkvContext::GetTime() const
{
   return m_max_timecode_ms;
}


void MkvContext::WriteEbmlHeader()
{
    m_file.WriteID4(0x1A45DFA3);
    
    //Allocate 1 byte of storage for Ebml header size.
    const __int64 start_pos = m_file.SetPosition(1, STREAM_SEEK_CUR);

    //EBML Version
    
    m_file.WriteID2(0x4286);
    m_file.Write1UInt(1);
    m_file.Serialize1UInt(1);  //EBML Version = 1

    //EBML Read Version
    
    m_file.WriteID2(0x42F7);
    m_file.Write1UInt(1);
    m_file.Serialize1UInt(1);  //EBML Read Version = 1
    
    //EBML Max ID Length
    
    m_file.WriteID2(0x42F2);
    m_file.Write1UInt(1);
    m_file.Serialize1UInt(4);  //EBML Max ID Length = 4
    
    //EBML Max Size Length
    
    m_file.WriteID2(0x42F3);
    m_file.Write1UInt(1);
    m_file.Serialize1UInt(8);  //EBML Max Size Length = 8

    //Doc Type

    m_file.WriteID2(0x4282);    
    m_file.Write1String("matroska");
        
    //Doc Type Version
    
    m_file.WriteID2(0x4287);
    m_file.Write1UInt(1);
    m_file.Serialize1UInt(1);  //Doc Type Version = 1
    
    //Doc Type Read Version
    
    m_file.WriteID2(0x4285);
    m_file.Write1UInt(1);
    m_file.Serialize1UInt(1);  //Doc Type Read Version = 1
    
    const __int64 stop_pos = m_file.GetPosition();
    
    const __int64 size_ = stop_pos - start_pos;
    assert(size_ <= 126);  //1-byte EBML u-int type
    
    const BYTE size = static_cast<BYTE>(size_);
    
    m_file.SetPosition(start_pos - 1);
    m_file.Write1UInt(size);
    
    m_file.SetPosition(stop_pos);
}


void MkvContext::InitSegment()
{   
    m_segment_pos = m_file.GetPosition();
    
    m_file.WriteID4(0x18538067);  //Segment ID
    m_file.Write8UInt(0);         //will need to be filled in later
    
    InitSeekHead();  //Meta Seek
    InitInfo();      //Segment Info
    WriteTrack();
}


void MkvContext::FinalSegment()
{
    m_cues_pos = m_file.GetPosition();  //end of clusters    
    WriteCues();
    
    const __int64 maxpos = m_file.GetPosition();    
    m_file.SetSize(maxpos);
    
    const __int64 size = maxpos - m_segment_pos - 12;
    assert(size >= 0);
    
    m_file.SetPosition(m_segment_pos);

    const ULONG id = m_file.ReadID4();
    assert(id == 0x18538067);  //Segment ID
    id;
    
    m_file.Write8UInt(size);  //total size of the segment

    FinalSeekHead();    
    FinalInfo();    
    //FinalClusters(m_cues_pos);
}


void MkvContext::FinalInfo()
{
    m_file.SetPosition(m_duration_pos + 2 + 1);  //2 byte ID + 1 byte size
    
    //TODO: account for duration of last frame too    
    const float duration = static_cast<float>(m_max_timecode_ms);
    
    m_file.Serialize4Float(duration);
}



void MkvContext::InitSeekHead()
{
    m_seekhead_pos = m_file.GetPosition();
    
    //Let's try to write a cluster about every second.
    //We need to figure out about how may clusters we'll need.
    //The longest movie I think I've seen is Lawrence of Arabia,
    //which imdb.com is telling me is 227 min, or 13620 sec.
    //The director's cut of Cleopatra is a whopping 320 min, 
    //or 19200 sec.  So let's round up to 20000 as the max
    //number of seek entries.  (It's a pity we must do this,
    //but whatever.  We could make an optimization by using
    //IMediaSeeking to return the duration of the stream.)
    
    //The SeekID is 2 + 1 + 4 = 7 bytes.
    //The SeekPos is 2 + 1 + 8 = 11 bytes.
    //Total payload for a seek entry is 7 + 11 = 18 bytes.
    
    //The Seek entry is 2 + 1 + 18 = 21 bytes.
    //Total payload for SeekHead is 20000 * 21 = 420000 bytes.
    
    //A void element is 1 + 4 + len.
    //For now the Seek payload will be 1 + 4 + 420000 = 420005.
    
    const ULONG size = 420000;
    
    m_file.WriteID4(0x114D9B74);  //Seek Head
    m_file.Write4UInt(size + 5);  //5 = 1 byte ID + 4 byte size
    
    //The size of the SeekHead element is now fixed.  The
    //entries within payload part of the SeekHead will need 
    //adjusting later.  But the total size itself
    //is determined apriori (unlike the case for, say, the 
    //Segment element).
    
    //Note that we are giving outselves a budget of 20000
    //entries.  Once we spend our budget, we won't be able
    //be able to write any more level-1 elements to the file.
    //(Or we could write them, but not put mention them
    //in the SeekHead element.)
    
    //Note also that our SeekHead element will always contain
    //at least one Void element.  (In fact, it should contain
    //exactly one Void element.)  We have budgeted for this
    //when we specified the size of the SeekHead element.
    
    //TODO: add a CRC to this element.

    //We don't have the data yet, so we'll void it for now.

    m_file.WriteID1(0xEC);      //Void ID
    m_file.Write4UInt(size);    
    m_file.SetPosition(size, STREAM_SEEK_CUR);
}


void MkvContext::FinalSeekHead()
{
    //TODO: it is permissible to write more than one seek head element.
    //That would mean we could have a small seek head up front,
    //comprising just 4 entries, for the info, track, cue, and other the seek head,
    //and we could have the other seek head have entries for just the clusters.
    //Alternatively, we could include the position of the first cluster
    //in the first seek head.
    
    //TODO: another possibility is that we've already budgeted a large
    //amount of data, we could see if we have enough space to put the 
    //cues up front.
    
    //TODO: splitter considerations: an async source can take a long
    //time to download, especially if this is a network stream.
    //We wouldn't have an index of keyframes until we've downloaded
    //the entire file (becuase the cues go at the end).  The benefit
    //of having cluster entries up front is that that would give us
    //a defacto index early.  Even without cues, you could get
    //rough indexing early.  You wouldn't get every keyframe (you
    //need the cues for that), but you can get something.  However,
    //if this is a network source, you can't really seek anyway,
    //since you don't yet have the frames!  However, we do wish
    //to minimize how much data needs to be downloaded before
    //playback can begin (even without the ability to seek),
    //so that would favor a smaller seekhead, and hence
    //fewer (but larger) clusters.  With 1ms resolution, a 
    //cluster can contain up to 32s of payload, so maybe a better
    //idea is to create clusters once every 10s or 20s or 30s,
    //instead of once every 1s as we do now.
    
    //Another option is to write the first cluster into the 
    //first seekhead (near the front of the file), and the remaining 
    //clusters into the second seekhead (near the back of the file).
    //That would minimize the amount of data required to begin
    //playback of the file.  (Really, who cares about the position
    //of clusters, except for the first one.  If you're really
    //interested in seeking, you're going to use the cues, not
    //the seekhead.)
    
    //Yet another option is to write two cues into the file.  The
    //first one containing a single entry, for the first keyframe
    //(at tc=0), and then have another cues at the end, with the
    //remaing keyframes.  Is this permitted?
    
    const __int64 start_pos = m_file.SetPosition(m_seekhead_pos + 8);
    
    WriteSeekEntry(0x1549A966, m_info_pos);   //Segment Info
    WriteSeekEntry(0x1654AE6B, m_track_pos);  //Track
 
    typedef clusters_t::const_iterator iter_t;
    
    clusters_t& cc = m_clusters;

    iter_t i = cc.begin();
    const iter_t j = cc.end();
    
    while (i != j)
    {
        const Cluster& c = *i++;        
        WriteSeekEntry(0x1F43B675, c.m_pos);
    }
    
    WriteSeekEntry(0x1C53BB6B, m_cues_pos);
    
    const __int64 stop_pos = m_file.GetPosition();
    
    const __int64 size_ = 420000 - (stop_pos - start_pos);
    assert(size_ >= 0);
    
    const ULONG size = static_cast<ULONG>(size_);
    
    m_file.WriteID1(0xEC);  //Void ID
    m_file.Write4UInt(size);
    //f.SetPosition(size, STREAM_SEEK_CUR);
}


void MkvContext::WriteSeekEntry(ULONG id, __int64 pos_)
{
    //The SeekID is 2 + 1 + 4 = 7 bytes.
    //The SeekPos is 2 + 1 + 8 = 11 bytes.
    //Total payload for a seek entry is 7 + 11 = 18 bytes.
    
    //The Seek entry is 2 + 1 + 18 = 21 bytes.
    //Total payload for SeekHead is 20000 * 21 = 420000 bytes.
    
#ifdef _DEBUG    
    const __int64 start_pos = m_file.GetPosition();
#endif
    
    m_file.WriteID2(0x4DBB);  //Seek Entry ID
    m_file.Write1UInt(18);    //payload size of this Seek Entry
    
    m_file.WriteID2(0x53AB);  //SeekID ID
    m_file.Write1UInt(4);     //payload size is 4 bytes
    m_file.WriteID4(id);
    
    const __int64 pos = pos_ - m_segment_pos - 12;
    assert(pos >= 0);
    
    m_file.WriteID2(0x53AC);     //SeekPos ID
    m_file.Write1UInt(8);        //payload size is 8 bytes
    m_file.Serialize8UInt(pos);  //payload
    
#ifdef _DEBUG
    const __int64 stop_pos = m_file.GetPosition();
    assert((stop_pos - start_pos) == 21);
#endif
}


void MkvContext::InitInfo()
{
    m_info_pos = m_file.GetPosition();
    
    m_file.WriteID4(0x1549A966);  //Segment Info ID
    
    //allocate 1 byte of storage for size
    const __int64 pos = m_file.SetPosition(1, STREAM_SEEK_CUR);
    
    m_file.WriteID3(0x2AD7B1);       //TimeCodeScale ID
    m_file.Write1UInt(4);            //payload size
    m_file.Serialize4UInt(1000000);  //1ms resolution

    m_duration_pos = m_file.GetPosition();  //remember where duration is

    m_file.WriteID2(0x4489);         //Duration ID
    m_file.Write1UInt(4);            //payload size
    m_file.Serialize4Float(0.0);     //set value again during close
    
    //MuxingApp  //TODO
    //WritingApp  //TODO
    
    const __int64 newpos = m_file.GetPosition();

    const __int64 size_ = newpos - pos;
    assert(size_ <= 126);  //1-byte EBML uint
    
    const BYTE size = static_cast<BYTE>(size_);
    
    m_file.SetPosition(pos - 1);
    m_file.Write1UInt(size);
    
    m_file.SetPosition(newpos);
}


void MkvContext::WriteTrack()
{
    m_track_pos = m_file.GetPosition();
    
    m_file.WriteID4(0x1654AE6B);  //Tracks element (level 1)
    
    //allocate 2 bytes of storage for size of Tracks element (level 1)
    const __int64 begin_pos = m_file.SetPosition(2, STREAM_SEEK_CUR);
    
    int tn = 0;
    
    if (m_pVideo)
        m_pVideo->WriteTrackEntry(++tn);
        
    if (m_pAudio)  //TODO: allow for multiple audio streams
        m_pAudio->WriteTrackEntry(++tn);
    
    const __int64 end_pos = m_file.GetPosition();
    
    const __int64 size_ = end_pos - begin_pos;
    assert(size_ <= USHRT_MAX);
    
    const USHORT size = static_cast<USHORT>(size_);
    
    m_file.SetPosition(begin_pos - 2);
    m_file.Write2UInt(size);
    
    m_file.SetPosition(end_pos);
}


#if 0
void MkvContext::Cluster::Final(
    MkvContext& ctx,
    ULONG& prev,
    const Cluster& next)
{
    const __int64 size_ = next.m_pos - m_pos;
    assert(size_ >= 8);
    assert(size_ <= ULONG_MAX);
    
    const ULONG new_prev = static_cast<ULONG>(size_);
    const ULONG size = new_prev - 8;
    
    EbmlIO::File& f = ctx.m_file;
    
    f.SetPosition(m_pos);

    const ULONG id = f.ReadID4();
    assert(id == 0x1F43B675);
    id;
        
    f.Write4UInt(size);
    
    prev = new_prev;
}
#endif


void MkvContext::Cluster::WriteCuePoints(MkvContext& ctx) const
{
    //cue point container = 1 + size len(2) + payload len
    //  time = 1 + size len(1) + payload len(4)
    //  track posns container = 1 + size len + payload len
    //     track = 1 + size len + payload len (track number val)
    //     cluster pos = 1 + size len + payload len (pos val)
    //     block num = 2 + size len + payload len (block num val)
    
    //TODO: for now just write video keyframes
    //Do we even need audio here?
    //We would need something, if this is an audio-only mux.
    
    assert(m_keyframes.size() <= 255);
    
    typedef keyframes_t::const_iterator iter_t;
    
    iter_t i = m_keyframes.begin();
    const iter_t j = m_keyframes.end();
    
    while (i != j)
    {
        const Keyframe& k = *i++;
        k.WriteCuePoint(ctx, *this);
    }
}


MkvContext::Keyframe::Keyframe() :
   m_timecode_ms(0),
   m_blockNumber(0)
{
}


MkvContext::Keyframe::Keyframe(
   ULONG t,
   ULONG n) :
   m_timecode_ms(t),
   m_blockNumber(n)
{
}


void MkvContext::Keyframe::WriteCuePoint(
    MkvContext& ctx,
    const Cluster& c) const
{
    EbmlIO::File& f = ctx.m_file;
    
    f.WriteID1(0xBB);  //CuePoint ID
    f.Write1UInt(28);  //payload size

#ifdef _DEBUG    
    const __int64 start_pos = f.GetPosition();
#endif
    
    f.WriteID1(0xB3);                 //CueTime ID
    f.Write1UInt(4);                  //payload len is 4
    f.Serialize4UInt(m_timecode_ms);  //payload
    
    f.WriteID1(0xB7);  //CueTrackPositions
    f.Write1UInt(20);  //payload size

#ifdef _DEBUG    
    const __int64 start_track_pos = f.GetPosition();
#endif
    
    const int tn_ = ctx.m_pVideo->GetTrackNumber();
    assert(tn_ > 0);
    assert(tn_ <= 126);
    
    const BYTE tn = static_cast<BYTE>(tn_);

    f.WriteID1(0xF7);        //CueTrack ID
    f.Write1UInt(1);         //payload size is 1 byte
    f.Serialize1UInt(tn);    //payload
    
    const __int64 off = c.m_pos - ctx.m_segment_pos - 12;
    assert(off >= 0);
    
    f.WriteID1(0xF1);        //CueClusterPosition ID
    f.Write1UInt(8);         //payload size is 8 bytes
    f.Serialize8UInt(off);   //payload

    //TODO: Keyframe::m_block_number is a 4-byte
    //number, and we serialize all 4 bytes.  However,
    //it's unlikely we'll have block numbers that large
    //(because we create a new cluster every second).
    //Right now we always decide statically how many
    //bytes to serialize (we're using 4 bytes of storage,
    //so we serialize all 4 bytes, irrespective of the
    //value at run-time), but in the future we
    //could decide to check at run-time how large a value
    //we have, and then only serialize the minimum number
    //of bytes required for that value.
    
    f.WriteID2(0x5378);                //CueBlockNumber
    f.Write1UInt(4);                   //payload size
    f.Serialize4UInt(m_blockNumber);  //payload

#ifdef _DEBUG
    const __int64 stop_pos = f.GetPosition();
    assert((stop_pos - start_track_pos) == 20);
    assert((stop_pos - start_pos) == 28);
#endif
}



void MkvContext::WriteCues()
{
    m_file.WriteID4(0x1C53BB6B);   //Cues ID

    //allocate 4 bytes of storage for size of cues element
    const __int64 start_pos = m_file.SetPosition(4, STREAM_SEEK_CUR);
    
    typedef clusters_t::const_iterator iter_t;
    
    iter_t i = m_clusters.begin();
    const iter_t j = m_clusters.end();
    
    while (i != j)
    {
        const Cluster& c = *i++;
        c.WriteCuePoints(*this);
    }
    
    const __int64 stop_pos = m_file.GetPosition();
    
    const __int64 size_ = stop_pos - start_pos;
    assert(size_ <= ULONG_MAX);
    
    const ULONG size = static_cast<ULONG>(size_);
    
    m_file.SetPosition(start_pos - 4);
    m_file.Write4UInt(size);
    
    m_file.SetPosition(stop_pos);
}


#if 0
void MkvContext::FinalClusters(__int64 pos)
{
    //pos represents the position of the next cluster
    
    m_clusters.push_back(Cluster());
    
    {
        Cluster& c = m_clusters.back();
        c.m_pos = pos;
    }
    
    ULONG prev = 0;
    
    clusters_t::iterator i = m_clusters.begin();
    
    for (;;)
    {
        Cluster& c = *i;
        
        if (c.m_pos == pos)
            break;
        
        c.Final(*this, prev, *++i);
        
        m_clusters.pop_front();
    }
}
#endif


void MkvContext::WriteVideo(
    MkvStream* pVideo,
    VideoFrame* pFrame)
{
    pVideo;
    assert(pVideo);
    assert(pVideo == m_pVideo);
    assert(pFrame);    
    assert(!m_bEOSVideo);
   
    if (m_file.GetStream() == 0)
        return;
        
    //establish invariant:
    //  if vframes.emtpy, then reference frame list empty
    //  otherwise,
    //    first entry on reference frame list pts to first vframe entry
    //    the first vframe entry is never a b-frame
    //    distance between a reference frame entries, and all of the 
    //      frames up to (but not including) the next reference frame entry,
    //      are guaranteed to have delta time <= SHRT_MAX
    //    distance between reference frames can be greater than SHRT_MAX
    //      but that's OK, because these are different clusters
    //Ideally, every entry on the reference frame list would be
    //a keyframe.  If a reference frame entry is not a keyframe, it's
    //because the distance between that reference frame entry and the 
    //previous reference frame entry was more than 32sec (so creation
    //of the reference frame entry was forced).
    //
    //We will flush at least 1sec worth of data.  If reference frame
    //entries happen more often than once every sec, then we flush
    //at least one entry (the reference frame entry, and everything
    //that follows up to the next reference frame entry).
    //
    //If refernce frames happen less than once per second, then we
    //flush only one refence frame.
    //
    //When not eos, we don't do anything until we have at least two
    //reference frame entries in the list.
        
    //push new frame onto back of queue
    //if keyframe, then put its iter on keyframe list
    //if this is a b-frame, don't do anything else
    //if video only then
    //  if this is keyframe, and we have 1+ seconds, then flush
    //  if this is not a keyframe, and 32+ seconds enqueued, then flush
    //else if video + audio then
    //  if this is keyframe, and we have 1+ seconds, and we have
    //    the requisite audio, then flush now; otherwise, wait for more audio
    //  (if not keyframe, don't do anything else, just wait for keyframe)
    //end if
        
#if 0
    if (ReadyToCreateNewClusterVideo(pFrame))
        CreateNewClusterVideo(pFrame);

    m_vframes.push_back(pFrame);
#else
    assert(!m_vframes.empty() || (pFrame->type() != VideoFrame::kTypeBFrame));

    m_vframes.push_back(pFrame);
    
    switch (pFrame->type())
    {
        case VideoFrame::kTypeBFrame:
        default:
            break;
            
        case VideoFrame::kTypeIFrame:
        {
            m_rframes.push_back(pFrame);
            
            const VideoFrame* const pvf = m_rframes.front();
            assert(pvf);
            
            const ULONG t0 = pvf->curr_timecode_ms();

            const ULONG t = pFrame->curr_timecode_ms();
            assert(t >= t0);

            const LONG dt = LONG(t) - LONG(t0);
            assert(dt >= 0);
            
            if (dt < 1000)
                break;
            
            if (ReadyToCreateNewClusterVideo(*pFrame))
                CreateNewCluster(pFrame);

            break;                
        }    
        case VideoFrame::kTypePFrame:
        {
            assert(!m_rframes.empty());
            
            const VideoFrame* const pvf = m_rframes.back();
            assert(pvf);
            
            const ULONG t0 = pvf->curr_timecode_ms();

            const ULONG t = pFrame->curr_timecode_ms();
            assert(t >= t0);

            const LONG dt = LONG(t) - LONG(t0);
            assert(dt >= 0);
            
            if (dt <= SHRT_MAX)
                break;
                
            //TODO: at this point, 32 secs have gone by without
            //receiving an I-frame.  Something is wrong.  We handle
            //it here by creating a 32sec segment, but that's 
            //really too long.  We need an inner loop here to 
            //break up this large queue into 1sec segments.  I
            //haven't bothered do that yet because it's a 
            //pathological case that in practice should never occur.

            m_rframes.push_back(pFrame);
            
            if (ReadyToCreateNewClusterVideo(*pFrame))
                CreateNewCluster(pFrame);
                
            break;
        }
    }  //end switch (pFrame->type())
#endif
}


void MkvContext::WriteAudio(
    MkvStream* pAudio,
    AudioFrame* pFrame)
{
    pAudio;
    assert(pAudio);
    assert(pAudio == m_pAudio);
    assert(pFrame);
    assert(!m_bEOSAudio);
    
    if (m_file.GetStream() == 0)
        return; 
        
    m_aframes.push_back(pFrame);
    
    const ULONG at = pFrame->curr_timecode_ms();

    if ((m_pVideo == 0) || (m_vframes.empty() && m_bEOSVideo))
    {
        const AudioFrame* const paf = m_aframes.front();
        assert(paf);
        
        const ULONG at0 = paf->curr_timecode_ms();
        assert(at >= at0);
        
        const LONG dt = LONG(at) - LONG(at0);
        
        if (dt >= 1000)
            CreateNewClusterAudioOnly();
            
        return;
    }
    
    if (m_vframes.empty())
        return;
        
    assert(!m_rframes.empty());
    assert(m_rframes.front() == m_vframes.front());
    
    const VideoFrame* const pvf0 = m_rframes.front();
    assert(pvf0);
    assert(pvf0->type() != VideoFrame::kTypeBFrame);
    
    const ULONG vt0 = pvf0->curr_timecode_ms();
    
    //if video eos then
    //  if more than one rframe then
    //    if audio time >= reftime then
    //      dequeue some video + audio
    //    else
    //      wait for more audio
    //    endif
    //  elseif just one rframe then
    //     compare audio time to last vframe
    //     if audio time >= last video time then
    //        deque all video frames
    //     else
    //        wait for more audio
    //     endif
    //  endif
    //
    //elseif not video eos then
    //   if only one rframe then
    //     do nothing now, wait for completion of cluster segment
    //   elseif we have more than one rframe then
    //     if distance between 1st and 2nd rframe >= 1000 then
    //       if audio time >= 2nd reftime then
    //          deque 1st chunk
    //       else
    //          do nothing now, wait for more audio
    //       endif
    //     elseif distance between 1st and 2nd rframe < 1000 then
    //       if distacne between 1st and 3rd rframe >= 1000 then
    //          //...
    //       //and so one
    //     endif
    //     NOTE: what we want to do here is find the smallest group
    //     of segments that's larger than 1000ms.  Once we identify
    //     those groups, we compare audio time to end of group.  If
    //     audio time >= end, then we requeue groups now.  Otherwise
    //     we wait for more audio.
    //   end if
    //endif
    
    typedef rframes_t::const_iterator iter_t;

    if (m_bEOSVideo)
    {
        const iter_t i = m_vframes.begin();
        iter_t j = m_vframes.end();
        
        for (;;)
        {
            const VideoFrame* const pvf = *--j;
            assert(pvf);
            
            if (pvf->type() != VideoFrame::kTypeBFrame)
                break;
                
            assert(j != i);
        }

        const VideoFrame* const pvf = *j;
        assert(pvf);
        assert(pvf->type() != VideoFrame::kTypeBFrame);
        
        const ULONG vt = pvf->curr_timecode_ms();
        assert(vt >= vt0);

        if (at >= vt)
            CreateNewCluster(0);  //NULL means deque all video
                        
        return;
    }
        
    typedef rframes_t::const_iterator iter_t;

    iter_t i = m_rframes.begin();
    assert(*i == pvf0);
    
    const iter_t j = m_rframes.end();
    
    for (;;)
    {
        ++i;
        
        if (i == j)   //not enough video
            return;
            
        const VideoFrame* const pvf = *i;
        assert(pvf);
        
        const ULONG vt = pvf->curr_timecode_ms();
        assert(vt >= vt0);
        
        const LONG dt = LONG(vt) - LONG(vt0);
        
        if (dt >= 1000)  //we have enough video
        {
            if (at >= vt)
                break;
                
            return;  //not enough audio
        }
    }
    
    CreateNewCluster(*i);
}


int MkvContext::EOSVideo(MkvStream* pSource)
{
    if (m_bEOSVideo)
        return 0;

    m_bEOSVideo = true;
    
    if (m_file.GetStream())
    {
        if ((m_pAudio == 0) || m_bEOSAudio)
            CreateNewCluster(0);
    }
    
    return EOS(pSource);
}
    

int MkvContext::EOSAudio(MkvStream* pSource)
{
    if (m_bEOSAudio)
        return 0;

    m_bEOSAudio = true; 
    
    if (m_file.GetStream())
    {
        if (m_pVideo == 0)
            CreateNewClusterAudioOnly();
        else if (m_bEOSVideo)
            CreateNewCluster(0);
    }

    return EOS(pSource);
}


int MkvContext::EOS(MkvStream*)
{
    assert(m_cEOS > 0);
    --m_cEOS;
    
    if (m_cEOS > 0)
        return 0;

    Final();
    
    return 1;  //signal done
}


void MkvContext::WriteVideoFrame(Cluster& c, ULONG& cFrames)
{
   assert(!m_vframes.empty());
   assert(m_pVideo);
      
   VideoFrame* const pf = m_vframes.front();
   assert(pf);
   assert(m_rframes.empty() || (pf != m_rframes.front()));
   
   const VideoFrame& f = *pf;
   
   assert(cFrames < ULONG_MAX);
   ++cFrames;

   //block = 1 byte ID + 4 byte size + f->size
   //block duration = 1 byte ID + 1 byte size + 1 byte value
   //reference block = 1 byte ID + 2 byte size + 2(?) byte signed value

   const ULONG block_size = 1 + 2 + 1 + f.size();      //tn, tc, flg, f
   ULONG block_group_size = 1 + 4 + block_size; //block id, size, payload

   if (f.type() != VideoFrame::kTypeIFrame)
      block_group_size += 4;  //for back ref
     
   if (f.type() == VideoFrame::kTypeBFrame)
      block_group_size += 4;  //for fwd ref

   m_file.WriteID1(0xA0);                //block group ID
   m_file.Write4UInt(block_group_size);  //size of payload for this block group    

#ifdef _DEBUG    
   const __int64 pos = m_file.GetPosition();
#endif

   //begin block

   m_file.WriteID1(0xA1);  //Block ID
   m_file.Write4UInt(block_size);

   const int tn_ = m_pVideo->GetTrackNumber();
   assert(tn_ > 0);
   assert(tn_ <= 127);
   
   const BYTE tn = static_cast<BYTE>(tn_);

   m_file.Write1UInt(tn);   //track number

   const ULONG ft = f.curr_timecode_ms();
      
   {
      const LONG tc_ = LONG(ft) - LONG(c.m_timecode_ms);
      assert(tc_ >= SHRT_MIN);
      assert(tc_ <= SHRT_MAX);

      const SHORT tc = static_cast<SHORT>(tc_);

      m_file.Serialize2SInt(tc);       //relative timecode 
   }
   
   const BYTE flags = 0;   
   m_file.Write(&flags, 1);   //written as binary, not uint

   m_file.Write(f.data(), f.size());  //frame

   //end block

   if (f.type() == VideoFrame::kTypeIFrame)
   {
      keyframes_t& kk = c.m_keyframes;
      const Keyframe k(ft, cFrames);

      kk.push_back(k);
   }
   else
   {
      m_file.WriteID1(0xFB);
      m_file.Write1UInt(2);
      m_file.Serialize2SInt(f.prev_timecode_ms());

      if (f.type() == VideoFrame::kTypeBFrame)
      {
         m_file.WriteID1(0xFB);
         m_file.Write1UInt(2);
         m_file.Serialize2SInt(f.next_timecode_ms());
      }
   }

#ifdef _DEBUG
   const __int64 newpos = m_file.GetPosition();
   assert((newpos - pos) == block_group_size);
#endif

   if (ft > m_max_timecode_ms)
      m_max_timecode_ms = ft;

   m_vframes.pop_front();
   pf->Release();
}


void MkvContext::WriteAudioFrame(Cluster& c, ULONG& cFrames)
{
   assert(!m_aframes.empty());
   assert(m_pAudio);
      
   AudioFrame* const pf = m_aframes.front();
   assert(pf);
   
   const AudioFrame& f = *pf;
   
   assert(cFrames < ULONG_MAX);
   ++cFrames;

   //block = 1 byte ID + 4 byte size + f->size
   //block duration = 1 byte ID + 1 byte size + 1 byte value
   //reference block = 1 byte ID + 2 byte size + 2(?) byte signed value

   const ULONG block_size = 1 + 2 + 1 + f.size();     //tn, tc, flg, f
   const ULONG block_group_size = 1 + 4 + block_size; //block id, size, payload

   m_file.WriteID1(0xA0);                //block group ID
   m_file.Write4UInt(block_group_size);  //size of payload for this block group    

#ifdef _DEBUG    
   const __int64 pos = m_file.GetPosition();
#endif

   //begin block

   m_file.WriteID1(0xA1);  //Block ID
   m_file.Write4UInt(block_size);

   const int tn_ = m_pAudio->GetTrackNumber();
   assert(tn_ > 0);
   assert(tn_ <= 127);
   
   const BYTE tn = static_cast<BYTE>(tn_);

   m_file.Write1UInt(tn);   //track number

   const ULONG ft = f.curr_timecode_ms();
      
   {
      const LONG tc_ = LONG(ft) - LONG(c.m_timecode_ms);
      assert(tc_ >= SHRT_MIN);
      assert(tc_ <= SHRT_MAX);

      const SHORT tc = static_cast<SHORT>(tc_);

      m_file.Serialize2SInt(tc);       //relative timecode 
   }
   
   const BYTE flags = 0;   
   m_file.Write(&flags, 1);   //written as binary, not uint

   m_file.Write(f.data(), f.size());  //frame

   //end block

#ifdef _DEBUG
   const __int64 newpos = m_file.GetPosition();
   assert((newpos - pos) == block_group_size);
#endif

   if (ft > m_max_timecode_ms)
      m_max_timecode_ms = ft;

   m_aframes.pop_front();
   pf->Release();
}


#if 0
bool MkvContext::ReadyToCreateNewClusterVideo(
   const VideoFrame* plast_vframe) const
{
   if (plast_vframe == 0)  //EOS
      return true;
      
   const VideoFrame& last_vframe = *plast_vframe;
      
   if (last_vframe.type() == VideoFrame::kTypeBFrame)
      return false;
      
   if (m_vframes.empty())
      return false;
      
   const ULONG last_vframe_time = last_vframe.curr_timecode_ms();

   //If we decide to create a new cluster, we flush the frames queues
   //to disk.  
   //If we have no audio, queue frame and wait for audio.  Don't
   //flush (unless we have to).
   //If we have audio, but the last timecode was saw was less than f.timecode,
   //than queue frame and wait for audio.  (We don't flush, unless we have to.)
   //If we have audio, and the time is equal or greater than f.time, then
   //we can flush now.  But that's not quite right.  We only flush if 
   //video frame f satisfies the criterion for creation a cluster.
   
   const VideoFrame* const pFirst_vframe = m_vframes.front();
   assert(pFirst_vframe);
   
   const VideoFrame& first_vframe = *pFirst_vframe;
   assert(first_vframe.type() != VideoFrame::kTypeBFrame);
   
   const ULONG first_vframe_time = first_vframe.curr_timecode_ms();
   assert(last_vframe_time >= first_vframe_time);   

   const LONG vframe_dt = LONG(last_vframe_time) - LONG(first_vframe_time);
   
   if (vframe_dt < 1000)
      return false;

   if (vframe_dt > SHRT_MAX)
      return true;
      
   if (last_vframe.type() == VideoFrame::kTypePFrame)
      return false;  //TODO: audio conditions might force flush here

   //We have an I-frame (our preference for the start
   //of a cluster).  If this were a video-only mux,
   //then we would create a new cluster.  However,
   //we are muxing audio too.  If the last audio-frame
   //we has a time >= than last_time, then we can flush
   //both video and audio queues to disk.
   
   //There is a problem here.  If we wait to receive audio,
   //then we enque this video frame.  But that means there's
   //no simple way to identify this video frame again,
   //when we flush as a result of receiving an audio frame.
   //
   //We could just fudge.  Our ideal would be for all of the 
   //audio that goes with video in a cluster to have a timecode
   //that >= than the timecode of the cluster.  (This is true
   //of the video frames as well, except for b-frames.)
   //If we flush now, it would mean we'd have a few frames
   //of audio in the next cluster whose time less than the
   //timecode of the cluster.
   
   if (m_pAudio == 0)  //video-only mux
      return true;
      
   //At this point we have a candidate range of video.
   //We would like to flush now, but we might need to 
   //wait for more audio.  The actual flush will occur
   //when we receive audio equal or greater to our
   //candidate I-frame.  The problem is that once we leave
   //this subprogram, we don't know what that candidate
   //video frame is anymore.  We could mark it somehow,
   //and then later compare the audio time to the 
   //marked video frame.  We would then purge the 
   //video (and any audio) waiting to be flushed.
   
   //TODO: handle audio EOS
   
#if 0  //TODO: for now, just flush immediately

   if (m_aframes.empty())
      return false;  //wait until we have more audio
   
   const AudioFrame* const pLast_aframe = m_aframes.back();
   assert(pLast_aframe);  //TODO: use NULL to indicate audio EOS?
   
   const AudioFrame& last_aframe = *pLast_aframe;
   const ULONG last_aframe_time = last_aframe.curr_timecode_ms();

   if (last_aframe_time < last_vframe_time)
      return false;  //wait until we have more audio      
      
#endif  //TODO
        
   return true;
}


void MkvContext::CreateNewClusterVideo(const VideoFrame* plast_vframe)
{
   if (m_vframes.empty())
      return;
   
   //const ULONG last_vframe_time = last_vframe.curr_timecode_ms();

   clusters_t& cc = m_clusters;

   if (!cc.empty())   
   {
      const VideoFrame* const pvf = m_vframes.front();
      assert(pvf);  //TODO: use NULL to indicate EOS?
         
      const ULONG t = pvf->curr_timecode_ms();
      assert(t > cc.back().m_timecode_ms);
      t;
   }
   
   cc.push_back(Cluster());    
   Cluster& c = cc.back();
   
   c.m_pos = m_file.GetPosition();
   
   {
      const VideoFrame* const pvf = m_vframes.front();
      assert(pvf);  //TODO: use NULL to indicate EOS?
         
      const ULONG t = pvf->curr_timecode_ms();
      assert((plast_vframe == 0) || (t <= plast_vframe->curr_timecode_ms()));

      c.m_timecode_ms = t;
   }

   m_file.WriteID4(0x1F43B675);  //Cluster ID
   m_file.Write4UInt(0);         //patch size later, during close

   m_file.WriteID1(0xE7);
   m_file.Write1UInt(4);
   m_file.Serialize4UInt(c.m_timecode_ms);
   
   m_cFrames = 0;   
   WriteVideoFrame(c);
   
   while (!m_vframes.empty())
   {
      const VideoFrame* const pvf = m_vframes.front();
      assert(pvf);
      
      const ULONG first_vframe_time = pvf->curr_timecode_ms();
      assert((plast_vframe == 0) ||
             (first_vframe_time <= plast_vframe->curr_timecode_ms()));
      
      if (m_aframes.empty())
      {
         while (!m_vframes.empty())  //TODO: handle dt > SHRT_MAX
            WriteVideoFrame(c);
            
         return;
      }

      const AudioFrame* const paf = m_aframes.front();
      assert(paf);
      
      const ULONG af_time = paf->curr_timecode_ms();
      
      //TODO: this is what would like to be true:
      //assert(af_time >= c.m_timecode_ms);
      //However, for now we flush immediately, when video
      //stream is ready, without waiting for audio.
      
      if (af_time <= first_vframe_time)
         WriteAudioFrame(c);         
      else
         WriteVideoFrame(c);
   }

   while (!m_aframes.empty())
   {
      const AudioFrame* const paf = m_aframes.front();
      assert(paf);
      
      const ULONG af_time = paf->curr_timecode_ms();
      
      //TODO: see comment above
      //assert(af_time >= c.m_timecode_ms);      
      
      if ((plast_vframe != 0) && (af_time >= plast_vframe->curr_timecode_ms()))
         return;
      
      if ((af_time - c.m_timecode_ms) > SHRT_MAX)
         return;
         
      WriteAudioFrame(c);      
   }
}
#endif


bool MkvContext::ReadyToCreateNewClusterVideo(const VideoFrame& vf) const
{
    //Called when new reference frame created.
    
    assert(!m_vframes.empty());
    assert(&vf == m_rframes.back());
    
    if (m_pAudio == 0)
        return true;
        
    if (m_bEOSAudio)
        return true;
        
    if (m_aframes.empty())
        return false;
        
    const AudioFrame* const paf = m_aframes.back();
    assert(paf);
    
    const ULONG at = paf->curr_timecode_ms();        
    const ULONG vt = vf.curr_timecode_ms();
    
    if (at >= vt)
        return true;
    
    return false;            
}


void MkvContext::CreateNewCluster(const VideoFrame* pvf_stop)
{
    assert(!m_vframes.empty());
    assert(!m_rframes.empty());
    assert(m_vframes.front() == m_rframes.front());
    assert(m_aframes.empty() || (m_pAudio != 0));

    clusters_t& cc = m_clusters;

    if (!cc.empty())   
    {
        const Cluster& c = cc.back();
        c;
        
        const VideoFrame* const pvf = m_vframes.front();
        assert(pvf);

        const ULONG t = pvf->curr_timecode_ms();
        t;
        assert(t > c.m_timecode_ms);
    }

    cc.push_back(Cluster());    
    Cluster& c = cc.back();

    c.m_pos = m_file.GetPosition();

    {
        const VideoFrame* const pvf = m_vframes.front();
        assert(pvf);

        c.m_timecode_ms = pvf->curr_timecode_ms();
    }

#if 0
    odbgstream os;
    os << "CreateNewCluster: c.time=" << c.m_timecode_ms << endl;
#endif

    m_file.WriteID4(0x1F43B675);  //Cluster ID

#if 0
    m_file.Write4UInt(0);         //patch size later, during close
#else
    m_file.SetPosition(4, STREAM_SEEK_CUR);
#endif

    m_file.WriteID1(0xE7);
    m_file.Write1UInt(4);
    m_file.Serialize4UInt(c.m_timecode_ms);
    
#if 0
    {
        assert(!m_vframes.empty());
        
        const VideoFrame* const pvf = m_vframes.front();        
        assert(pvf);
        assert(pvf != pvf_stop);
        
        const ULONG vt = pvf->curr_timecode_ms();
        
        const LONG dt = LONG(vt) - LONG(c.m_timecode_ms);            
        assert(dt >= SHRT_MIN);
        
        os << "vt=" << vt << " dt=" << dt << endl;
    }
#endif
    
    ULONG cFrames = 0;

    m_rframes.pop_front();         //because we're about to delete this frame
    WriteVideoFrame(c, cFrames);   //flush frame and delete it
    
    while (!m_vframes.empty())
    {
        const VideoFrame* const pvf = m_vframes.front();
        assert(pvf);
        
        if (pvf == pvf_stop)
        {
            assert(!m_rframes.empty());
            assert(m_rframes.front() == pvf);
            break;
        }
        
        const ULONG vt = pvf->curr_timecode_ms();
        //We can't assert this, because of B-frames:
        //assert(vt >= c.m_timecode_ms);
        
        const LONG dt = LONG(vt) - LONG(c.m_timecode_ms);            
        dt;
        assert(dt >= SHRT_MIN);
        assert(dt <= SHRT_MAX);
        
        //os << "vt=" << vt << " dt=" << dt << endl;

        if (m_aframes.empty())
        {
            if (!m_rframes.empty() && (pvf == m_rframes.front()))
                m_rframes.pop_front();
                
            WriteVideoFrame(c, cFrames);
            continue;
        }
        
        const AudioFrame* const paf = m_aframes.front();
        assert(paf);

        const ULONG at = paf->curr_timecode_ms();
        //TODO: this is what would like to be true:
        //assert(at >= c.m_timecode_ms);
        //But we can't guarantee this at the time we
        //flush video, so we might have a few stray
        //audio frames that we need to put in the next
        //cluster (that is, the one being created now).

        //os << "at=" << at << endl;

        if (at <= vt)
            WriteAudioFrame(c, cFrames);         
        else
        {
            if (!m_rframes.empty() && (pvf == m_rframes.front()))
                m_rframes.pop_front();

            WriteVideoFrame(c, cFrames);
        }
    }

    LONG stop_time;
    
    if (pvf_stop == 0)
        stop_time = -1;
    else
    {
        stop_time = pvf_stop->curr_timecode_ms();
        assert(ULONG(stop_time) > c.m_timecode_ms);
    }

    while (!m_aframes.empty())
    {
        const AudioFrame* const paf = m_aframes.front();
        assert(paf);

        const ULONG at = paf->curr_timecode_ms();
        //TODO: see comment above
        //assert(at >= c.m_timecode_ms);
        
        if ((stop_time >= 0) && (at >= ULONG(stop_time)))
            break;
            
        const LONG dt = LONG(at) - LONG(c.m_timecode_ms);            
        assert(dt >= SHRT_MIN);
        
        if (dt > SHRT_MAX)
            break;
        
        WriteAudioFrame(c, cFrames);      
    }
    
    const __int64 pos = m_file.GetPosition();
    
    const __int64 size_ = pos - c.m_pos - 8;
    assert(size_ <= ULONG_MAX);
    
    const ULONG size = static_cast<ULONG>(size_);

    m_file.SetPosition(c.m_pos + 4);
    m_file.Write4UInt(size);
    
    m_file.SetPosition(pos);
}


void MkvContext::CreateNewClusterAudioOnly()
{
    assert(!m_aframes.empty());
      
    const AudioFrame* const paf_first = m_aframes.front();
    assert(paf_first);

    const AudioFrame& af_first = *paf_first;   

    const ULONG af_first_time = af_first.curr_timecode_ms();

    clusters_t& cc = m_clusters;
    assert(cc.empty() || (af_first_time > cc.back().m_timecode_ms));

    cc.push_back(Cluster());    
    Cluster& c = cc.back();

    c.m_pos = m_file.GetPosition();   
    c.m_timecode_ms = af_first_time;
      
    m_file.WriteID4(0x1F43B675);  //Cluster ID
    m_file.Write4UInt(0);         //patch size later, during close

    m_file.WriteID1(0xE7);
    m_file.Write1UInt(4);
    m_file.Serialize4UInt(c.m_timecode_ms);

    ULONG cFrames = 0;   //TODO: must write cues for audio

    while (!m_aframes.empty())
    {
        const AudioFrame* const paf = m_aframes.front();
        assert(paf);
      
        const ULONG t = paf->curr_timecode_ms();
        assert(t >= c.m_timecode_ms);
      
        const LONG dt = LONG(t) - LONG(c.m_timecode_ms);
      
        if (dt > 1000)
            break;
      
        WriteAudioFrame(c, cFrames);
    }
}


}  //end namespace MkvMux
