#include "video_thread.h"
#include "web_sock_holder.h"
extern "C"
{
#include "avformat.h"
#include "swscale.h"
#include "cv.h"
}
namespace vmon_web
{
    class WebSockHolderEntry
    {
    public:
        WebSockHolderEntry(WebSockHolder* sockHolder):
          m_sockHolder(sockHolder)
          {
              // TODO: when this is constructed, send out the header
              // 0. guess format
              m_outputFormat = guess_format("swf", 0, 0);
              if (!m_outputFormat)
                  throw "Can't guess format";
              // 1. open format context
              m_formatContext = av_alloc_format_context();
              if (!m_formatContext)
                  throw "can't alloc format context";
              m_formatContext->oformat = m_outputFormat;
              // 2. open video stream
              m_videoStream = av_new_stream(m_formatContext, 0);
              if (!m_videoStream)
                  throw "can't alloc new stream";
              m_codecContext = m_videoStream->codec;
              m_codecContext->codec_id = m_outputFormat->video_codec;
              m_codecContext->codec_type = CODEC_TYPE_VIDEO;
              m_codecContext->bit_rate = 400000;
              m_codecContext->width = m_width = 320;
              m_codecContext->height = m_height = 240;
              m_codecContext->time_base.den = 25;
              m_codecContext->time_base.num = 1;
              m_codecContext->gop_size = 12;
              m_codecContext->pix_fmt = PIX_FMT_YUV420P;
              // 3. set output parameters
              if (av_set_parameters(m_formatContext, 0) < 0)
                  throw "can't set parameters";
              dump_format(m_formatContext, 0, "nevermind", 1);
              // 4. allocate output buffer
              m_outputBufferSize = 200000;
              m_outputBuffer = (uint8_t*)av_malloc(m_outputBufferSize);
              if (!m_outputBuffer)
                  throw "can't allocate output buffer";

              // 5. open dyn buf for header
              url_open_dyn_buf(&m_formatContext->pb);
              m_formatContext->pb.is_streamed = 1;

              // 6. write header
              if (av_write_header(m_formatContext) < 0)
                  throw "can't write header";
              uint8_t* buf = 0;
              int len = url_close_dyn_buf(&m_formatContext->pb, &buf);
              if (len > 0)
              {
                  // 7. send data out
                  m_sockHolder->send(buf, len);
              }
              // 8. free dyn buf
              av_free(buf);
              // 9. open codec
              m_codec = avcodec_find_encoder(m_codecContext->codec_id);
              if (avcodec_open(m_codecContext, m_codec) < 0)
                  throw "can't open codec";
              // 10. create frame
              m_frame = avcodec_alloc_frame();
              if (!m_frame)
                  throw "can't alloc frame";
              int size = avpicture_get_size(m_codecContext->pix_fmt, m_width, m_height);
              uint8_t* pictureBuffer = (uint8_t*)av_malloc(size);
              if (pictureBuffer == 0)
              {
                  av_free(m_frame);
                  throw "can't alloc picture buffer";
              }
              avpicture_fill((AVPicture*)m_frame, pictureBuffer, m_codecContext->pix_fmt, m_width, m_height);
          }

          virtual ~WebSockHolderEntry()
          {
          }

          int getState() { return 0; }

          // Push a frame to the stream.
          // we assume the frame is already in desired format?
          // only do the simplest push thing
          bool pushFrame(AVFrame* frame)
          {
              int outSize = avcodec_encode_video(m_codecContext, m_outputBuffer, m_outputBufferSize, frame);
              if (outSize > 0)
              {
                  AVPacket packet;
                  url_open_dyn_buf(&m_formatContext->pb);
                  av_init_packet(&packet);
                  if (m_codecContext->coded_frame->pts != AV_NOPTS_VALUE)
                      packet.pts= av_rescale_q(m_codecContext->coded_frame->pts, 
                      m_codecContext->time_base, 
                      m_videoStream->time_base);
                  if(m_codecContext->coded_frame->key_frame)
                      packet.flags |= PKT_FLAG_KEY;
                  packet.stream_index= m_videoStream->index;
                  packet.data= m_outputBuffer;
                  packet.size= outSize;
                  av_write_frame(m_formatContext, &packet); // we'll need to check return value?
                  uint8_t* buf = 0;
                  int len = url_close_dyn_buf(&m_formatContext->pb, &buf);
                  if (len > 0)
                  {
                      // send out the packet
                      m_sockHolder->send(buf, len);
                      av_free(buf);
                  }
                  av_free_packet(&packet);
              }
              return true;
          }

          bool pushFrame(IplImage* iplImage)
          {
              // convert the frame on the fly
              struct SwsContext* img_convert_ctx = 0;
              int linesize[4] = {0, 0, 0, 0};

              img_convert_ctx = sws_getContext(iplImage->width, iplImage->height,
                  PIX_FMT_BGR24,
                  m_width,
                  m_height,
                  m_codecContext->pix_fmt, SWS_BICUBIC, 0, 0, 0);
              if (img_convert_ctx != 0)
              {
                  linesize[0] = 3 * iplImage->width;
                  uint8_t* srcData = (uint8_t*)iplImage->imageData;
                  sws_scale(img_convert_ctx, 
                      &srcData, linesize, 0, iplImage->height,
                      m_frame->data, m_frame->linesize);
                  sws_freeContext(img_convert_ctx);

                  return pushFrame(m_frame);
              }
              return false;
          }

    public:
        WebSockHolder* m_sockHolder;
        AVCodecContext* m_codecContext;
        AVFormatContext* m_formatContext;
        AVOutputFormat* m_outputFormat;
        AVCodec* m_codec;
        AVStream* m_videoStream;
        AVStream m_stream;
        uint8_t* m_outputBuffer;
        uint32_t m_outputBufferSize;
        AVFrame* m_frame;

        int m_width;
        int m_height;
    };

    VideoThread::VideoThread():
    m_stopVideo(false)
    {
        av_register_all();
    }

    VideoThread::~VideoThread()
    {
    }

    void VideoThread::addSockHolder(WebSockHolder* sockHolder)
    {
        WebSockHolderEntry* sockHolderEntry = new WebSockHolderEntry(sockHolder);
        lock();
        m_sockHolders.push_back(sockHolderEntry);
        if (isStopped())
            start();
        unlock();
    }

    void VideoThread::removeSockHolder(WebSockHolder* sockHolder)
    {
        lock();
        list<WebSockHolderEntry*>::iterator iter = m_sockHolders.begin();
        while (iter != m_sockHolders.end())
        {
            WebSockHolderEntry* entry = *iter;
            if (entry->m_sockHolder == sockHolder)
            {
                m_sockHolders.erase(iter);
                delete entry;
                break;
            }
            ++iter;
        }
        unlock();
    }

    void VideoThread::openVideoInput()
    {
        m_capture = cvCaptureFromCAM(0);
        if (m_capture == 0)
        {
            throw "Failed to open capture";
        }
    }

    void VideoThread::run()
    {
        openVideoInput();
        while (!m_stopVideo)
        {
            if (m_sockHolders.size() > 0)
            {
                // get frame
                IplImage* image = captureImage();
                // send frame out
                lock();
                list<WebSockHolderEntry*>::iterator iter = m_sockHolders.begin();
                while (iter != m_sockHolders.end())
                {
                    WebSockHolderEntry* entry = *iter;
                    if (entry->pushFrame(image) == false)
                    {
                        // failed?
                        m_sockHolders.erase(iter);
                        delete entry;
                        break;
                    }
                    ++iter;
                }

                // TODO: control the thread life time more strictly...
                //if (m_sockHolders.size() == 0)
                //	m_stopVideo = true;
                unlock();
            }
            // destroy frame
            // cvReleaseImage(&image);
            // TODO: delay some time
            cvWaitKey(10);
        }
    }

    void VideoThread::stop()
    {
        m_stopVideo = true;
        join();
    }

    IplImage* VideoThread::captureImage()
    {
        if (cvGrabFrame(m_capture))
        {
            return cvRetrieveFrame(m_capture);
        }
        return 0;
    }

}
