#include <strmif.h>
#include <uuids.h>
#include "mkvsplitfilter.hpp"
#include "cenumpins.hpp"
#include "mkvsplitoutpin.hpp"
#include "mkvparser.hpp"
#include "mkvparserstreamvideo.hpp"
#include "mkvparserstreamaudio.hpp"
#include <new>
#include <cassert>
#include <vfwmsgs.h>
#include <process.h>
#ifdef _DEBUG
#include "iidstr.hpp"
#include "odbgstream.hpp"
using std::endl;
#endif

using std::wstring;
//using std::wistringstream;

namespace MkvSplit
{

// {ADB85B5C-AA6B-4192-85FC-B0E087E9CA37}
extern const CLSID CLSID_MkvSplit = 
{ 0xadb85b5c, 0xaa6b, 0x4192, { 0x85, 0xfc, 0xb0, 0xe0, 0x87, 0xe9, 0xca, 0x37 } };

// {4CAB9818-1989-4a5f-8474-2D1F66B420F2}
extern const GUID MEDIASUBTYPE_MKV =   //TODO: use std value
{ 0x4cab9818, 0x1989, 0x4a5f, { 0x84, 0x74, 0x2d, 0x1f, 0x66, 0xb4, 0x20, 0xf2 } };


Filter::Lock::Lock() : m_hMutex(0)
{
}


Filter::Lock::~Lock()
{
    Release();
}


HRESULT Filter::Lock::Seize(Filter* pFilter, DWORD timeout)
{
    assert(m_hMutex == 0);
    assert(pFilter);
    
    DWORD index;

    const HRESULT hr = CoWaitForMultipleHandles(
                            0,  //wait flags
                            timeout, 
                            1, 
                            &pFilter->m_hMutex, 
                            &index);

    //despite the "S" in this name, this is an error                        
    if (hr == RPC_S_CALLPENDING)
        return VFW_E_TIMEOUT;

    if (FAILED(hr))
        return hr;
                
    assert(index == 0);

    m_hMutex = pFilter->m_hMutex;
        
    return S_OK;
}


void Filter::Lock::Release()
{
    if (m_hMutex)
    {
        const BOOL b = ReleaseMutex(m_hMutex);
        assert(b);
        b;
        
        m_hMutex = 0;
    }
}



HRESULT CreateInstance(
    IClassFactory* pClassFactory,
    IUnknown* pOuter, 
    const IID& iid, 
    void** ppv)
{
    if (ppv == 0)
        return E_POINTER;
        
    *ppv = 0;

    if ((pOuter != 0) && (iid != __uuidof(IUnknown)))
        return E_INVALIDARG;
    
    Filter* p = new (std::nothrow) Filter(pClassFactory, pOuter);
    
    if (p == 0)
        return E_OUTOFMEMORY;
        
    assert(p->m_nondelegating.m_cRef == 0);
    
    const HRESULT hr = p->m_nondelegating.QueryInterface(iid, ppv);
    
    if (SUCCEEDED(hr))
    {
        assert(*ppv);
        assert(p->m_nondelegating.m_cRef == 1);
        
        return S_OK;
    }
    
    assert(*ppv == 0);
    assert(p->m_nondelegating.m_cRef == 0);

    delete p;
    p = 0;

    return hr;
}


#pragma warning(disable:4355)  //'this' ptr in member init list
Filter::Filter(IClassFactory* pClassFactory, IUnknown* pOuter)
    : m_pClassFactory(pClassFactory),
      m_nondelegating(this),
      m_pOuter(pOuter ? pOuter : &m_nondelegating),
      m_state(State_Stopped),
      m_clock(0),
      m_hThread(0),
      m_hStop(0),
      m_pSegment(0),
      m_inpin(this)
{
    m_pClassFactory->LockServer(TRUE);
            
    m_hMutex = CreateMutex(0, 0, 0);
    assert(m_hMutex);  //TODO
    
    m_hStop = CreateEvent(0, 0, 0, 0);
    assert(m_hStop);  //TODO
    
    m_info.pGraph = 0;
    m_info.achName[0] = L'\0';
    
#ifdef _DEBUG        
    odbgstream os;
    os << "mkvsrc::ctor" << endl;
#endif    
}
#pragma warning(default:4355)


Filter::~Filter()
{
#ifdef _DEBUG
    odbgstream os;
    os << "mkvsrc::dtor" << endl;
#endif

#if 1
    assert(m_outpins.empty());
    assert(m_pSegment == 0);
#else
    while (!m_outpins.empty())
    {
        Outpin* p = m_pins.back();
        assert(p);
        
        m_pins.pop_back();
        delete p;
    }
    
    delete m_pSegment;
#endif

    assert(m_hThread == 0);

    BOOL b = CloseHandle(m_hMutex);
    assert(b);
    
    b = CloseHandle(m_hStop);
    assert(b);

    m_pClassFactory->LockServer(FALSE);
}      


void Filter::Init()
{
    assert(m_hThread == 0);
    
    const BOOL b = ResetEvent(m_hStop);
    assert(b);
    
    const uintptr_t h = _beginthreadex(
                            0,  //security
                            0,  //stack size
                            &Filter::ThreadProc,
                            this,
                            0,   //run immediately
                            0);  //thread id
                            
    m_hThread = reinterpret_cast<HANDLE>(h);
    assert(m_hThread);
}


void Filter::Final()
{
    if (m_hThread == 0)
        return;
        
    BOOL b = SetEvent(m_hStop);
    assert(b);
    
    const DWORD dw = WaitForSingleObject(m_hThread, INFINITE);
    assert(dw == WAIT_OBJECT_0);
 
    b = CloseHandle(m_hThread);
    assert(b);
    
    m_hThread = 0;   
}


Filter::CNondelegating::CNondelegating(Filter* p)
    : m_pFilter(p),
      m_cRef(0)  //see CreateInstance
{
}


Filter::CNondelegating::~CNondelegating()
{
}


HRESULT Filter::CNondelegating::QueryInterface(
    const IID& iid, 
    void** ppv)
{
    if (ppv == 0)
        return E_POINTER;
        
    IUnknown*& pUnk = reinterpret_cast<IUnknown*&>(*ppv);
     
    if (iid == __uuidof(IUnknown))    
    {
        pUnk = this;  //must be nondelegating
    }   
    else if ((iid == __uuidof(IBaseFilter)) ||
             (iid == __uuidof(IMediaFilter)) ||
             (iid == __uuidof(IPersist)))
    {
        pUnk = static_cast<IBaseFilter*>(m_pFilter);
    }
    else
    {
#if 0
        wodbgstream os;
        os << "mkvsource::filter::QI: iid=" << IIDStr(iid) << std::endl;
#endif        
        pUnk = 0;
        return E_NOINTERFACE;
    }

    pUnk->AddRef();
    return S_OK;
}


ULONG Filter::CNondelegating::AddRef()
{
    return InterlockedIncrement(&m_cRef);
}

    
ULONG Filter::CNondelegating::Release()
{
    if (LONG n = InterlockedDecrement(&m_cRef))
        return n;
    
    delete m_pFilter;
    return 0;
}


HRESULT Filter::QueryInterface(const IID& iid, void** ppv)
{
    return m_pOuter->QueryInterface(iid, ppv);
}


ULONG Filter::AddRef()
{
    return m_pOuter->AddRef();
}


ULONG Filter::Release()
{
    return m_pOuter->Release();
}


HRESULT Filter::GetClassID(CLSID* p)
{
    if (p == 0)
        return E_POINTER;
        
    *p = CLSID_MkvSplit;
    return S_OK;
}



HRESULT Filter::Stop()
{
    //Stop is a synchronous operation: when it completes,
    //the filter is stopped.

    Lock lock;
    
    HRESULT hr = lock.Seize(this);
    
    if (FAILED(hr))
        return hr;
        
    switch (m_state)
    {
        case State_Paused:
        case State_Running:
            
            //Stop is synchronous.  When stop completes, all threads
            //should be stopped.  What does "stopped" mean"  In our
            //case it probably means "terminated".
            //It's a bit tricky here because we hold the filter
            //lock.  If threads need to acquire filter lock
            //then we'll have to release it.  Only the FGM can call
            //Stop, etc, so there's no problem to release lock
            //while Stop is executing, to allow threads to acquire
            //filter lock temporarily.
            //The streaming thread will receiving an indication
            //automatically (assuming it's connected), either via
            //GetBuffer or Receive, so there's nothing this filter
            //needs to do to tell the streaming thread to stop.
            //One implementation strategy is to have build a
            //vector of thread handles, and then wait for a signal
            //on one of them.  When the handle is signalled 
            //(meaning that the thread has terminated), then 
            //we remove that handle from the vector, close the
            //handle, and the wait again.  Repeat until the
            //all threads have been terminated.
            //We also need to clean up any unused samples,
            //and decommit the allocator.  (In fact, we could
            //decommit the allocator immediately, and then wait
            //for the threads to terminated.)
            
            lock.Release();

            OnStop();            
            
            hr = lock.Seize(this);
            assert(SUCCEEDED(hr));  //TODO
            
            break;            

        case State_Stopped:
        default:
            break;
    }
    
    m_state = State_Stopped;
    return S_OK;
}


HRESULT Filter::Pause()
{
    //Unlike Stop(), Pause() can be asynchronous (that's why you have
    //GetState()).  We could use that here to build the samples index.    

    Lock lock;
    
    HRESULT hr = lock.Seize(this);
    
    if (FAILED(hr))
        return hr;
        
    switch (m_state)
    {
        case State_Stopped:
            OnStart();
            break;

        case State_Running:
        case State_Paused:
        default:
            break;            
    }
    
    m_state = State_Paused;
    return S_OK;
}


HRESULT Filter::Run(REFERENCE_TIME start)
{
    Lock lock;
    
    HRESULT hr = lock.Seize(this);
    
    if (FAILED(hr))
        return hr;
    
    switch (m_state)
    {
        case State_Stopped:
            OnStart();
            break;

        case State_Paused:        
        case State_Running:
        default:
            break;            
    }

    m_start = start;
    m_state = State_Running;
    
    return S_OK;
}


HRESULT Filter::GetState( 
    DWORD /* timeout */ ,
    FILTER_STATE* p)
{
    if (p == 0)
        return E_POINTER;
        
    //What the GetState.timeout parameter refers to is not to locking
    //the filter, but rather to waiting to determine the current state.
    //A request to Stop is always synchronous (hence no timeout parameter), 
    //but a request to Pause can be asynchronous, so the caller can say
    //how long he's willing to wait for the transition (to paused) to
    //complete.
    
    //TODO: implement a waiting scheme here.  We'll probably have to 
    //use SignalObjectAndWait atomically release the mutex and then
    //wait for the condition variable to change.    
    //if (hr == VFW_E_TIMEOUT)
    //    return VFW_S_STATE_INTERMEDIATE;
    
    Lock lock;
    
    const HRESULT hr = lock.Seize(this);
    
    //The lock is only used for synchronization.  If Seize fails,
    //it means there's a serious problem with the filter.
    
    if (FAILED(hr))
        return E_FAIL;

    *p = m_state;
    return S_OK;
}



HRESULT Filter::SetSyncSource( 
    IReferenceClock* clock)
{
    Lock lock;
    
    HRESULT hr = lock.Seize(this);
    
    if (FAILED(hr))
        return hr;

    if (m_clock)
        m_clock->Release();
        
    m_clock = clock;
    
    if (m_clock)
        m_clock->AddRef();

    return S_OK;
}


HRESULT Filter::GetSyncSource( 
    IReferenceClock** pclock)
{
    if (pclock == 0)
        return E_POINTER;
        
    Lock lock;
    
    HRESULT hr = lock.Seize(this);
    
    if (FAILED(hr))
        return hr;

    IReferenceClock*& clock = *pclock;
        
    clock = m_clock;
    
    if (clock)
        clock->AddRef();

    return S_OK;
}


HRESULT Filter::EnumPins(IEnumPins** pp)
{
    Lock lock;
    
    HRESULT hr = lock.Seize(this);
    
    if (FAILED(hr))
        return hr;
        
    const ULONG outpins_count = static_cast<ULONG>(m_outpins.size());
    const ULONG n = 1 + outpins_count;
    
    const size_t cb = n * sizeof(IPin*);
    IPin** const pins = (IPin**)_alloca(cb);
        
    IPin** pin = pins;
    
    *pin++ = &m_inpin;
    
    typedef outpins_t::iterator iter_t;

    iter_t i = m_outpins.begin();
    const iter_t j = m_outpins.end();
    
    while (i != j)
        *pin++ = *i++;        
    
    return CEnumPins::CreateInstance(pins, n, pp);        
}



HRESULT Filter::FindPin( 
    LPCWSTR id1,
    IPin** pp)
{
    if (pp == 0)
        return E_POINTER;
        
    IPin*& p = *pp;
    p = 0;
    
    if (id1 == 0)
        return E_INVALIDARG;
        
    {
        Pin* const pPin = &m_inpin;

        const wstring& id2_ = pPin->m_id;
        const wchar_t* const id2 = id2_.c_str();
        
        if (wcscmp(id1, id2) == 0)  //case-sensitive
        {
            p = pPin;
            p->AddRef();
            
            return S_OK;
        }
    }    
        
    typedef outpins_t::const_iterator iter_t;

    iter_t i = m_outpins.begin();
    const iter_t j = m_outpins.end();
    
    while (i != j)
    {
        Pin* const pPin = *i++;

        const wstring& id2_ = pPin->m_id;
        const wchar_t* const id2 = id2_.c_str();
        
        if (wcscmp(id1, id2) == 0)  //case-sensitive
        {
            p = pPin;
            p->AddRef();
            
            return S_OK;
        }
    }    
    
    return VFW_E_NOT_FOUND;
}



HRESULT Filter::QueryFilterInfo(FILTER_INFO* p)
{
    if (p == 0)
        return E_POINTER;
        
    Lock lock;
    
    HRESULT hr = lock.Seize(this);
    
    if (FAILED(hr))
        return hr;

    enum { size = sizeof(p->achName)/sizeof(WCHAR) };
    const errno_t e = wcscpy_s(p->achName, size, m_info.achName);
    e;
    assert(e == 0);

    p->pGraph = m_info.pGraph;
    
    if (p->pGraph)
        p->pGraph->AddRef();
        
    return S_OK;
}


HRESULT Filter::JoinFilterGraph( 
    IFilterGraph *pGraph,
    LPCWSTR name)
{
    Lock lock;
    
    HRESULT hr = lock.Seize(this);
    
    if (FAILED(hr))
        return hr;
    
    //NOTE: 
    //No, do not adjust reference counts here!
    //Read the docs for the reasons why.
    //ENDNOTE.    
    
    m_info.pGraph = pGraph;

    if (name == 0)
        m_info.achName[0] = L'\0';
    else
    {
        enum { size = sizeof(m_info.achName)/sizeof(WCHAR) };
        const errno_t e = wcscpy_s(m_info.achName, size, name);
        e;
        assert(e == 0);  //TODO
    }
    
    return S_OK;
}


HRESULT Filter::QueryVendorInfo(LPWSTR* pstr)
{
    if (pstr == 0)
        return E_POINTER;
        
    wchar_t*& str = *pstr;
    
    str = 0;
    return E_NOTIMPL;
}


HRESULT Filter::Open(IAsyncReader* pReader)
{
    assert(pReader);
    assert(m_pSegment == 0);    
    //assert(!bool(m_pAllocator));
    assert(m_outpins.empty());
    
    __int64 result, pos;
    
    //TODO: must initialize header to defaults
    
    MkvParser::EBMLHeader h;
    
    result = h.Parse(pReader, pos);
    
    if (result < 0)  //error
        return static_cast<HRESULT>(result);
        
    if (result > 0)  //need more data
        return VFW_E_BUFFER_UNDERFLOW;  //require full header 
        
    if (h.m_version > 1)
        return VFW_E_INVALID_FILE_FORMAT;
        
    if (h.m_maxIdLength > 8)
        return VFW_E_INVALID_FILE_FORMAT;
    
    if (h.m_maxSizeLength > 8)
        return VFW_E_INVALID_FILE_FORMAT;
        
    if (_stricmp(h.m_docType.c_str(), "matroska") != 0)
        return VFW_E_INVALID_FILE_FORMAT;
        
    //Just the EBML header has been consumed.  pos points
    //to start of (first) segment.
    
    MkvParser::Segment* p;
    
    result = MkvParser::Segment::CreateInstance(pReader, pos, p);
    
    if (result < 0)
        return static_cast<HRESULT>(result);
        
    if (result > 0)
        return VFW_E_BUFFER_UNDERFLOW;
        
    assert(p);
    
    std::auto_ptr<MkvParser::Segment> pSegment(p);
    
    result = pSegment->Parse();
    
    if (result < 0)
        return static_cast<HRESULT>(result);
        
    if (result > 0)
        return VFW_E_BUFFER_UNDERFLOW;
    
    const MkvParser::Tracks* const pTracks = pSegment->GetTracks();
        
    if (pTracks == 0)
        return VFW_E_INVALID_FILE_FORMAT;
        
    const MkvParser::SegmentInfo* const pInfo = pSegment->GetInfo();
    
    if (pInfo == 0)
        return VFW_E_INVALID_FILE_FORMAT;  //TODO: liberalize
    
    using MkvParser::VideoTrack;
    using MkvParser::VideoStream;

    using MkvParser::AudioTrack;
    using MkvParser::AudioStream;
    
    typedef TCreateOutpins<VideoTrack, VideoStream> EV;
    typedef TCreateOutpins<MkvParser::AudioTrack, MkvParser::AudioStream> EA;
    
    const EV ev(this, &VideoStream::CreateInstance);
    pTracks->EnumerateVideoTracks(ev);
    
    const EA ea(this, &AudioStream::CreateInstance);
    pTracks->EnumerateAudioTracks(ea);
    
    if (m_outpins.empty())
        return VFW_E_INVALID_FILE_FORMAT;  //TODO: better return value here?
        
    ALLOCATOR_PROPERTIES props;
    props.cbBuffer = GetMaxBufferSize();
    props.cbAlign = 1;
    props.cbPrefix = 0;
    props.cBuffers = 1;
    
    //HRESULT hr = pReader->RequestAllocator(0, &props, &m_pAllocator);
    //assert(SUCCEEDED(hr));  //TODO
    //assert(bool(m_pAllocator));
    
    m_pSegment = pSegment.release();

    return S_OK;
}



void Filter::OnStart()
{
    //TODO: init inpin
    
    typedef outpins_t::iterator iter_t;
    
    iter_t i = m_outpins.begin();
    const iter_t j = m_outpins.end();
    
    while (i != j)
    {
        Outpin* const pPin = *i++;
        assert(pPin);
        
        pPin->Init();
    }
    
    Init();
}


void Filter::OnStop()
{
    Final();
    
    typedef outpins_t::iterator iter_t;
    
    iter_t i = m_outpins.begin();
    const iter_t j = m_outpins.end();
    
    while (i != j)
    {
        Outpin* const pPin = *i++;
        assert(pPin);
        
        pPin->Final();
    }
    
    //TODO: final inpin
}


int Filter::GetConnectionCount() const
{
    //filter already locked by caller
    
    int n = 0;
    
    typedef outpins_t::const_iterator iter_t;
    
    iter_t i = m_outpins.begin();
    const iter_t j = m_outpins.end();
    
    while (i != j)
    {
        const Outpin* const pin = *i++;
        assert(pin);
        
        if (pin->m_pPinConnection)
            ++n;
    }
    
    return n;
}        


#if 0
long Filter::GetMaxBufferSize() const
{
    Lock lock;
    
    const HRESULT hr = lock.Seize(this);
    assert(SUCCEEDED(hr));
    
    long maxsize = 0;
    
    typedef outpins_t::const_iterator iter_t;
    
    iter_t i = m_outpins.begin();
    const iter_t j = m_outpins.end();
    
    while (i != j)
    {
        const Outpin* const pin = *i++;
        assert(pin);
        
        const long size = pin->GetBufferSize();
        assert(size >= 0);
        
        if (size > maxsize)
            maxsize = size;
    }
    
    return maxsize;
} 
#endif       


unsigned Filter::ThreadProc(void* pv)
{
    Filter* const pFilter = static_cast<Filter*>(pv);
    assert(pFilter);
    
    return pFilter->Main();
}


#if 1
unsigned Filter::Main()
{
    //assert(bool(m_pAllocator));
    
    //TODO: this isn't perfect, because an EBML is necessarily larger
    //than the frame holds.
    //
    //We have been loading clusters in-total, but the size of a cluster
    //will be larger than the largest frame, which all we calculate
    //in GetMaxBufferSize.  We could attempt to allocate a very large
    //buffer for the media sample, larger than a cluster, but this probably
    //won't work because we have no control over how large the cluster is.
    //
    //That means we're going to have to incrementally load a cluster
    //similar to how we load the segment.  But then again, we only need 
    //to wait for the few header bytes to identify the size.  The actual
    //problem is that for us to asynchronously read, we need to read
    //the entire buffer.  I don't really care about asynchronously
    //reading -- the only reason I'm going that is because I need a way
    //to do a timed read, because I want to know whether the bytes are 
    //available.  I'm just as satisfied to do a synchronous read,
    //once I know I have the data.  
    
    //Segment::Parse() returns the total number of bytes as returned
    //by IAsyncReader::Length(&avail, ...).  But if we want to do
    //an async read, we need the start and stop posn, not the total
    //number of bytes available.  Segment::m_pos should be pretty
    //close to value of available bytes.
    //
    //We could read 1 byte from the end of the range we need 
    //asynchronously, and then synchrnously read all of the bytes.
    //Or we could read chunks (aligned reads if more efficient),
    //and then read from the chunks.  That might be simpler because
    //we don't have to typed reads until later.  We load page-by-page
    //as untyped bytes, and then read elements from the chunks.
    //This does mean we'll need a secondary abstraction.
    
    //We don't really need to do this.
    //We could do reads along alignment boundaries (in fact be must,
    //even for async reads).  What Segment::Parse should pass back
    //is pos and size of element (instead of just pos + size, which
    //is pos of last byte needed).  When then read in a series of 
    //pages, that includes all the requested data.  (The pages
    //are aligned, so we will have read in extra data on both ends
    //of the range.)  This has the benefit that we need not compute
    //a max buffer size for reads from the input stream.  In a sense
    //we're building our own cache.
    const long size = GetMaxBufferSize();
    assert(size > 0);
    
    for (;;)
    {
        //const __int64 result = Parse(size);
        //if result < 0 then error
        //  handle this by announcing EOS for all streams
        //
        //if result > 0 then we need to wait for availability
        //  
        //if result = 0 then we have successfully loaded a new cluster
        
        //const __int64 result = m_pSegment->Parse();
        
        //We know how many bytes we need available.
        
        //__int64 pos, size;
        //hr = pSegment->Parse(pos, size);
        //if failed(hr) return;
        //
        //pSegment is bound to the cache.  It doesn't know anything
        //about the input's IAsyncReader interaface.  This thread
        //is the only thread that knows about that.
        //
        //Here we can load the cache one page at a time.
        //We wait for the read of a single page to complete.
        //If the read times out, then we attempt to read that page again.
        //If the read is successful, we move on to th next page,
        //  and read that.
        //What all pages in this request have been swapped in, then
        //we try the parse again: since all pages are now in cache,
        //Segment::Parse should return 0 (meaning success).
        //That means we have loaded a new cluster.  We announce this
        //fact by setting an event, that the streaming threads are
        //waiting for.  The streaming threads consume blocks in this
        //newly-loaded cluster.  This worker thread goes back to the
        //top of the loop, and calls Segment::Parse again.
        //
        //We need to convert from (pos, size) 2-tuple to a list
        //of pages.  When we can then loop over the pages.
        
        //I we give our cache read an special operation that says,
        //load all of the data in (pos, size) in cache, this will
        //correspond to a cluster.  It's probably OK to keep a 
        //cluster in memory (assuming clusters aren't too large).
        //We can then have the streaming threads read using
        //the cache reader in the normal way (by calling SyncRead);
        //since the pages are in cache the blocks will be read
        //quickly.  If the network I/O happens faster than the
        //playback rate, that would be wasteful, since the cluster
        //from which the streaming threads are reading will have
        //an earlier timecode, but if the playback rate is faster
        //than network I/O, then a cached read is the best we 
        //can do (but really, we don't ever want to be in this 
        //place).
        
        //Maybe the easiest solution is for the worker thread
        //to not do anything special when it reaches the end
        //of what's available.  If Segment::Parse returns a value
        //greater than 0, then we've reached the end of what we 
        //can play, so we should just report EC_STARVATION immediately.
        //The only problem is if the worker thread is reading
        //faster than the streaming thread, then there's no problem.
        //So maybe this the whole business of having a worker thread
        //is misguided, since we want it to be the streaming thread
        //that detects whether we've run out of bytes available
        //for streaming.
        //
        //On the other hand, the streaming thread could signal that
        //it's waiting for a new cluster.  If the worker thread
        //reaches the end of what's available, then it can check
        //whether a streaming thread has signalled that it's waiting
        //for a new cluster.  Only if a streaming thread has signalled
        //would the worker thread give up and signal EC_STARVATION;
        //if the streaming threads are earilier in the stream, then
        //the wouldn't signal because the next cluster would always
        //be there.  On the other hand, if a streaming thread reaches
        //a point where there are no more clusters, then that's still
        //the end-of-the-line (at least temporarily), and maybe it
        //doesn't make any sense for it to bother signalling the 
        //worker thread (since it could signal EC_STARVATION) just
        //as easily.
    }
}
#else
unsigned Filter::Main()
{
    std::vector<HANDLE> v;
    
    v.reserve(1 + m_outpins.size());    
    v.push_back(m_hStop);
    
    typedef outpins_t::iterator iter_t;

    iter_t i = m_outpins.begin();
    const iter_t j = m_outpins.end();
    
    while (i != j)
    {
        Outpin* const pPin = *i++;
        assert(pPin);

        const HANDLE h = pPin->m_hSampleCanBePopulated;
        assert(h);
                
        v.push_back(h);
    }
    
    const HANDLE* const hh = &v[0];
    const DWORD n = static_cast<DWORD>(v.size());
    
    const DWORD dwTimeout = INFINITE;
    
    for (;;)
    {
        //We need to request sample
        //pass sample to segment::parse
        //segment::parse can use sample to determine cluser boundaries
        //call IAsyncReader::WaitForNext with 1 sec timeout
        //if read timeout, check for stop bit
        //  if stop bit set, then return
        //  otherwise go back and re-attempt read
        //  I hate having to poll this way but I don't know what else to do
        //otherwise (not read timeout)
        //  parse new cluster
        //  wait for outpins to signal readiness to receive frame
        //  we'll have to determine pin's place in stream
        //  if pin is still feeding off the frame we just read,
        //    then we can give him another block (because it's loaded)
        //  otherwise if pin has already consumed all of its frames
        //    in this block, then we can't service his desire for new
        //    frame until we load another cluster
        //  it might also be the case the no pins are ready to consume
        //    a new frame, so we can immediately wait for another frame
        //  we have a problem here - if pin wants a frame, and we have it
        //    loaded, then we shouldn't delay giving him a frame because
        //    we're waiting to load the *next* cluster
        //  if all streams have consumed all frames in curr cluster,
        //    then loop back to top and wait for next cluster.
        
        //We could change the PopulateSample method to work off of 
        //clusters and blocks within clusters, instead of flattening
        //the block hierarchy as we do now.  A streaming thread could
        //signal its desire for a new cluster, when it has exhausted
        //the supply of blocks for this track on the current cluster.
        //When the worker thread has a new cluster, it can wait for
        //a signal from the streaming threads.  The streaming threads
        //consume the blocks on the new cluster, and then signal when
        //they consume all of the blocks.  
        //
        //It would be nice if we new here how far behind the slowest
        //streaming thread is.  If the worker thread is.
        //
        //Whenever the wroker thread creates a new cluster, it
        //signals availability of the new cluster.  The streaming
        //thread can wait for availability of the new cluster.
        //(This is similar to what we do already, with media 
        //samples.)  When the streaming thread wakes up (because a 
        //new cluster has been announced), it can enter the critical
        //section and check whether this cluster is of interest.
        //If not it goes back to sleep and waits for another signal;
        //otherwise, it consumes blocks from this cluster.
        //
        //The only time a streaming thread would wait is because
        //it ran out of clusters.  The only time the worker thread
        //would signal is because a new cluster becaome availalbe.
        //The worker thread need not wait for availability of a 
        //media sample; that is strictly the concern of the streaming
        //thread.  It doesn't seem like the worker thread would need
        //to wait for anything besides availability of new data.
        //
        //We don't necessarily want to fall off of the end of the
        //queue, since then we'd lose our place.  We'll have to 
        //conditionally check whether a new cluster is available
        //before navigating to the next cluster.
        
        const DWORD dw = WaitForMultipleObjects(n, hh, 0, dwTimeout);
        
#if 0
        if (dw == WAIT_TIMEOUT)
        {
            Lock lock;
            
            const HRESULT hr = lock.Seize(this);
            assert(SUCCEEDED(hr));
            
            const __int64 result = m_pSegment->Parse();
            assert(result == 0);
            
            if (m_pSegment->Unparsed() <= 0)
                dwTimeout = INFINITE;
            
            continue;
        }
#endif
        
        assert(dw >= WAIT_OBJECT_0);
        assert(dw < (WAIT_OBJECT_0 + n));
        
        if (dw == WAIT_OBJECT_0)  //hStop
            return 0;
            
        const DWORD idx = dw - (WAIT_OBJECT_0 + 1);
        assert(idx < m_outpins.size());
        
        PopulateSamples(hh + 1, idx);
    }
}
#endif


void Filter::PopulateSamples(const HANDLE* hh_begin, DWORD idx)
{
    //idx represents the pin that just signalled
    
    for (;;)
    {
        Outpin* const pPin = m_outpins[idx];
        assert(pPin);
        
        pPin->PopulateSample();
        
        if (++idx >= m_outpins.size())
            return;

        const HANDLE* const hh = hh_begin + idx;
        const DWORD n = static_cast<DWORD>(m_outpins.size()) - idx;
        
        const DWORD dw = WaitForMultipleObjects(n, hh, 0, 0);
        
        if (dw == WAIT_TIMEOUT)
            return;
                            
        assert(dw >= WAIT_OBJECT_0);
        assert(dw < (WAIT_OBJECT_0 + n));
        
        idx += dw - WAIT_OBJECT_0;
    }
}


HRESULT Filter::OnDisconnectInpin()
{
    while (!m_outpins.empty())
    {
        Outpin* const pPin = m_outpins.back();
        assert(pPin);
        
        if (IPin* pPinConnection = pPin->m_pPinConnection)
        {
            assert(m_info.pGraph);
        
            HRESULT hr = m_info.pGraph->Disconnect(pPinConnection);
            assert(SUCCEEDED(hr));
            
            hr = m_info.pGraph->Disconnect(pPin);
            assert(SUCCEEDED(hr));
        }
        
        m_outpins.pop_back();
        delete pPin;
    }
    
    delete m_pSegment;
    m_pSegment = 0;
    
    return S_OK;
}


} //end namespace MkvSplit

