#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:ts=4:et:

import pygst
import pygtk
import gobject

from optparse import OptionParser
from subprocess import call
import os
import string
import sys
import time

parser = OptionParser()
parser.add_option('--w', '--width', type="int", dest="width", default=0)
parser.add_option('--h', '--height', type="int", dest="height", default=0)
parser.add_option('--abr', '--audio-bitrate', type="int", dest="audiobitrate", default=0)
parser.add_option('--vbr', '--video-videobitrate', type="int", dest="videobitrate", default=0)
parser.add_option('--asr', '--audio-samplerate', type="int", dest="audiosamplerate", default=0)
parser.add_option('--deblock', default=0, dest='deblock')
(options, args) = parser.parse_args()

import gst

class AudioEncoder(gst.Bin):
    def __init__(self, **args):
        gst.Bin.__init__(self)
        queue = gst.parse_launch('queue name="input" ! audioconvert ! \
                                  audioresample ! capsfilter name="caps" ! lame name="lame" ! \
                                  mp3parse ! queue name="output"')
        queue.get_by_name('lame').set_property('mode', 3)
        if args['bitrate'] != 0:
            queue.get_by_name('lame').set_property("bitrate", args['bitrate'])
        if args['samplerate'] != 0:
            queue.get_by_name('caps').set_property('caps', gst.caps_from_string('audio/x-raw-int,rate='+str(args['samplerate'])))
        self.add(queue)
        self.add_pad(gst.GhostPad('src', queue.get_by_name('output').get_pad('src')))
        self.add_pad(gst.GhostPad('sink', queue.get_by_name('input').get_pad('sink')))

class VideoEncoder(gst.Bin):
    def __init__(self, **args):
        gst.Bin.__init__(self)        
        #ximagepipeline = gst.parse_launch('queue name="input" ! ffmpegcolorspace ! videorate ! capsfilter caps="video/x-raw-rgb,framerate=5/1" ! ffmpegcolorspace ! cairotimeoverlay ! ffmpegcolorspace ! ximagesink sync=false')
        #ximagepipeline = gst.parse_launch('queue name="input" leaky="downstream" ! capsfilter caps="video/x-raw-rgb" ! videorate ! capsfilter caps="video/x-raw-rgb,framerate=1/10" ! ffmpegcolorspace !  ximagesink sync=false qos=false')
        in_queue = gst.element_factory_make('queue')
        tee = gst.element_factory_make('tee')
        colorspace = gst.element_factory_make('ffmpegcolorspace')
        videoscale = gst.element_factory_make('videoscale')
        filter = gst.element_factory_make('capsfilter')
        hdeblock = gst.element_factory_make('postproc_hdeblock')
        vdeblock = gst.element_factory_make('postproc_vdeblock')
        ffenc = gst.element_factory_make('ffenc_flv')
        out_queue = gst.element_factory_make('queue')
        self.add(in_queue, tee, colorspace, videoscale, filter, ffenc, out_queue, hdeblock, vdeblock)
        in_queue.link(tee)
        tee.link(colorspace)
        #self.add(ximagepipeline)
        #tee.link(ximagepipeline.get_by_name('input'))
        if args.has_key('deblock'):
            colorspace.link(hdeblock)
            hdeblock.link(vdeblock)
            vdeblock.link(videoscale)
        else:
            colorspace.link(videoscale)
        videoscale.link(filter)
        videoscale.set_property("method", 1)
        filter.link(ffenc)
        ffenc.link(out_queue)
        ffenc.set_property("mb-decision", 1)
        basecaps = ['video/x-raw-yuv']
        basecaps.append('pixel-aspect-ratio=1/1')
        if args['height']!=0:
            basecaps.append('height=' + str(args['height']))
        if args['width']!=0:
            basecaps.append('width=' + str(args['width']))
        filter.set_property('caps', gst.caps_from_string(string.join(basecaps, ",")))
        if args['bitrate'] != 0:
            ffenc.set_property('bitrate', args['bitrate'] * 1000)
            ffenc.set_property('bitrate-tolerance', args['bitrate'] * 0.05)
        print "Video-Bitrate: " + str(ffenc.get_property('bitrate'))
        self.add_pad(gst.GhostPad('src', out_queue.get_pad('src')))
        self.add_pad(gst.GhostPad('sink', in_queue.get_pad('sink')))

mainloop = gobject.MainLoop()

bin = gst.element_factory_make("pipeline", "converter")

source = gst.element_factory_make("filesrc", "source")
source.set_property("location", args[0])
bin.add(source)

decoder = gst.element_factory_make("decodebin", "decoder")
bin.add(decoder)

source.link(decoder)

muxer = gst.element_factory_make("ffmux_flv", "muxer")
if options.deblock:
    video_encoder = VideoEncoder(height=options.height, width=options.width, bitrate=options.videobitrate, deblock=1)
else:
    video_encoder = VideoEncoder(height=options.height, width=options.width, bitrate=options.videobitrate)
audio_encoder = AudioEncoder(bitrate=options.audiobitrate, samplerate=options.audiosamplerate)

sink = gst.element_factory_make('filesink', 'sink')
sink.set_property("location", args[1] + ".pre_tool")
bin.add(muxer, sink)
muxer.link(sink)

audio = 0
video = 0
def new_decoded_pad(element, pad, boolean):
    global audio, video, audio_encoder, video_encoder, bin, count
    mime = pad.get_caps()[0].get_name()
    if mime.startswith("audio"):
        if not audio:
            audio = 1
            bin.add(audio_encoder)
            element.link(audio_encoder)
            audio_encoder.link(muxer)
            audio_encoder.set_state(gst.STATE_PLAYING)
        else:
            print "Achtung! Mehrere AudiostrÃ¶me - ungetestet!"
    elif mime.startswith("video"):
        if not video:
            video = 1
            bin.add(video_encoder)
            element.link(video_encoder)
            video_encoder.link(muxer)
            video_encoder.set_state(gst.STATE_PLAYING)
        else:
            print "Achtung! Mehrere VideostrÃ¶me - ungetestet!"

decoder.connect("new-decoded-pad", new_decoded_pad)

def bus_message_handler(bus, message):
    if message.type == gst.MESSAGE_ERROR:
        (gerror, debug) = message.parse_error()
        sys.stderr.write('error ' + str(debug) + "\n")
        mainloop.quit()
    elif message.type == gst.MESSAGE_WARNING:
        (gerror, debug) = message.parse_warning()
        sys.stderr.write('warning' + str(debug) + "\n")
    elif message.type == gst.MESSAGE_SEGMENT_DONE or message.type == gst.MESSAGE_EOS:
        mainloop.quit()

bus = bin.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_message_handler)

class timeTrack(object):
    # Manages a FIFO of realtime - logical time values
    # And can calculate the floating average of it
    def __init__(self, max_length = 3):
        self.data = []
        self.max_length = max_length

    def add(self, ltime, rtime):
        self.data.append([ltime, rtime])
        while len(self.data) > self.max_length:
            self.data.pop(0)

    def speed(self):
        if len(self.data) < 2:
            return 0
        else:
            count = 0
            speed = 0
            for i in xrange(0,len(self.data)-1):
                count += 1
                speed += float(self.data[i][0] - self.data[i+1][0]) / (self.data[i][1] - self.data[i+1][1])
            speed /= float(count)
            return speed

tracker = timeTrack(5)

def timeout():
    global tracker
    try:
        position, format = bin.query_position(gst.FORMAT_TIME)
        poss = position / gst.SECOND;
    except:
        pass
    try:
        duration, format = bin.query_duration(gst.FORMAT_TIME)
        dur = duration / gst.SECOND;
    except:
        pass
    tracker.add(poss, time.time())
    sys.stdout.write("\r                      ")
    sys.stdout.write("\r%3.0f%% %2.2f-fach" % (float(poss)/dur * 100 , tracker.speed()))
    sys.stdout.flush()
    return 1

print "Begin Transcoding"
tracker.add(0, time.time())
bin.set_state(gst.STATE_PLAYING)

gobject.timeout_add(500, timeout)

mainloop.run()

print
print "Done"
print "Adding Meta-Data to flv file"
call(['yamdi', '-i', args[1] + ".pre_tool", "-o", args[1]])

print "Removing temporary file"
os.unlink(args[1] + ".pre_tool")

