#!/usr/bin/python
# -*- coding: utf-8 -*-

from os import path
import clutter
import gst
import cluttergst

from twisted.internet import reactor

import logging

import jumble.label
import jumble.layout
import jumble.background


# DEbug
import random

ROOT_DIR = path.abspath(path.dirname(__file__))

log = logging.getLogger("AlicanteClutter")

class VideoChannel:  
    def __init__(self, stage, mapping, rtp_recv_port, videoServerIP):
        self.currentChannel = None
        self.stage = stage        
        self.actionsMapping = mapping    
        self.homebox = None

        # RTP ports
        self.rtp_recv_port = rtp_recv_port
        self.rtcp_recv_port = rtp_recv_port + 1
        self.rtcp_send_port = rtp_recv_port + 5
        
        self.rtp_audio_recv_port = rtp_recv_port + 50
        self.rtcp_audio_recv_port = self.rtp_audio_recv_port + 1
        self.rtcp_audio_send_port = self.rtp_audio_recv_port + 5        
        

        self.videoServerIP = videoServerIP

        self.stage.connect('key-press-event', self.on_key_press_event)

        # Will contain the video or images for demo
        self.videoOverlay = jumble.group.Group(enabled=True)
        self.videoOverlay.set_size(*self.stage.get_size())
        self.videoOverlay.set_layout(jumble.layout.HLayout(name="EventLayout"))

        # Display the Channel selected in the corner NE
        self.channelSelection = jumble.group.Group(enabled=True)
        self.channelSelection.set_layout(jumble.layout.HLayout(name="EventLayout"))
        self.channelSelection.set_size(*self.stage.get_size())        
        self.layoutChannelSelection = jumble.layout.VLayout(vpad=20, hpad=40)    
        self.message = jumble.label.Label(name="channelselect", text="")
        self.layoutChannelSelection.add_actor(self.message, expand=True)
        self.channelSelection.layout.add_layout(self.layoutChannelSelection, align=jumble.layout.Layout.ALIGN_NE, expand=10)
        
        
        self.stage.add(self.videoOverlay)
        self.stage.add(self.channelSelection)
        
    def initStreams(self):
        self.currentPipeline = self.newStream()
        self.currentAudioPipeline = self.newAudioStream()
        
    def nextStream(self):
        temp = self.currentPipeline
        self.currentPipeline = self.newStream()
        del temp
        temp = self.currentAudioPipeline
        self.currentAudioPipeline = self.newAudioStream()
        del temp
        
    def setHomeboxConnection(self, homeboxClient):
        self.homebox = homeboxClient
        
    def removeHomeboxConnection(self):
        self.homebox = None
  
    def demoBackground(self, channelID):
        try:
            self.picture.set_from_file( path.join(ROOT_DIR, 'images/%d.jpg' % channelID) )
        except:
            log.error("No picture")
      
        
    def on_error(self, bus, msg):
        print msg
   
           
    def newStream(self):    
        log.debug( "Video Stream-- Video server: %s   RTP port: %d" % (self.videoServerIP, self.rtp_recv_port) )

        # Creates a gstreamer texture
        video_texture = jumble.texture.Texture()
        video_texture.set_width( self.stage.get_width() )
        video_texture.set_height( int(self.stage.get_width()*9/16) ) # 16/9 format
        
        # Create a Pipeline
        pipeline = gst.Pipeline()

        #Connect to the bus to get the messages
        bus = pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message::error', self.on_error)
        
        # the udp src and source we will use for RTP and RTCP
        rtpsrc = gst.element_factory_make('udpsrc', 'rtpsrc')
        rtpsrc.set_property('port', self.rtp_recv_port)

        # we need to set caps on the udpsrc for the RTP data
        caps = gst.caps_from_string("application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)H263-1998")
        rtpsrc.set_property('caps', caps)
        
        rtcpsrc = gst.element_factory_make('udpsrc', 'rtcpsrc')
        rtcpsrc.set_property('port', self.rtcp_recv_port)

        rtcpsink = gst.element_factory_make('udpsink', 'rtcpsink')
        rtcpsink.set_property('port', self.rtcp_send_port)
        rtcpsink.set_property('host', self.videoServerIP)
        
        # no need for synchronisation or preroll on the RTCP sink
        rtcpsink.set_property('async', False)
        rtcpsink.set_property('sync', False) 
        
        pipeline.add(rtpsrc, rtcpsrc, rtcpsink)
        
        # the depayloading and decoding
        videodepay = gst.element_factory_make("rtph263pdepay", 'videodepay')
        videodec = gst.element_factory_make("ffdec_h263", 'videodec')
        
        #Video Sink
        colorspace = gst.element_factory_make("ffmpegcolorspace", "colorspace")
        
        sink = cluttergst.VideoSink(video_texture) # Attaches the output texture to gstreamer sink

        # add depayloading and playback to the pipeline and link
        pipeline.add(videodepay, videodec, colorspace, sink)
        gst.element_link_many(videodepay, videodec, colorspace, sink)
        
        # the rtpbin element
        rtpbin = gst.element_factory_make('gstrtpbin', 'rtpbin') 
        pipeline.add(rtpbin)
        
        # now link all to the rtpbin, start by getting an RTP sinkpad for session 0
        srcpad = gst.Element.get_static_pad(rtpsrc, 'src')
        sinkpad = gst.Element.get_request_pad(rtpbin, 'recv_rtp_sink_0')
        gst.Pad.link(srcpad, sinkpad)

        # get an RTCP sinkpad in session 0
        srcpad = gst.Element.get_static_pad(rtcpsrc, 'src')
        sinkpad = gst.Element.get_request_pad(rtpbin, 'recv_rtcp_sink_0')
        gst.Pad.link(srcpad, sinkpad)

        # get an RTCP srcpad for sending RTCP back to the sender
        srcpad = gst.Element.get_request_pad(rtpbin, 'send_rtcp_src_0')
        sinkpad = gst.Element.get_static_pad(rtcpsink, 'sink')
        gst.Pad.link(srcpad, sinkpad)

        def pad_added_cb(rtpbin, new_pad):
            qv_pad = videodepay.get_pad("sink")
            print "Add pad"
            
            if qv_pad.is_linked():
                pad = qv_pad.get_peer()
                pad.unlink( qv_pad )

            new_pad.link(qv_pad)

        def pad_removed_cb(rtpbin, pad):
            self.videoOverlay.layout.remove_actor(video_texture)
            
        rtpbin.connect('pad-added', pad_added_cb) 
        rtpbin.connect('pad-removed', pad_removed_cb) 
        
        pipeline.set_state(gst.STATE_NULL)

        self.videoOverlay.layout.add_actor(video_texture, fill=jumble.layout.Layout.FILL_HORIZONTAL)
        
        return pipeline


    def newAudioStream(self):
        log.debug( "Audio Stream-- Video server: %s   RTP port: %d" % (self.videoServerIP, self.rtp_audio_recv_port) )

        # Create a Pipeline
        pipeline = gst.Pipeline()

        #Connect to the bus to get the messages
        bus = pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message::error', self.on_error)
        
        # the udp src and source we will use for RTP and RTCP
        rtpsrc = gst.element_factory_make('udpsrc', 'rtpsrc')
        rtpsrc.set_property('port', self.rtp_audio_recv_port)

        # we need to set caps on the udpsrc for the RTP data
        caps = gst.caps_from_string("application/x-rtp,media=(string)audio,clock-rate=(int)8000,encoding-name=(string)AMR,encoding-params=(string)1,octet-align=(string)1")
        rtpsrc.set_property('caps', caps)
        
        rtcpsrc = gst.element_factory_make('udpsrc', 'rtcpsrc')
        rtcpsrc.set_property('port', self.rtcp_audio_recv_port)

        rtcpsink = gst.element_factory_make('udpsink', 'rtcpsink')
        rtcpsink.set_property('port', self.rtcp_audio_send_port)
        rtcpsink.set_property('host', self.videoServerIP)
        
        # no need for synchronisation or preroll on the RTCP sink
        rtcpsink.set_property('async', False)
        rtcpsink.set_property('sync', False) 
        
        pipeline.add(rtpsrc, rtcpsrc, rtcpsink)
        
        # the depayloading and decoding
        audiodepay = gst.element_factory_make("rtpamrdepay", 'audiodepay')
        audiodec = gst.element_factory_make("amrnbdec", 'audiodec')


        #audio Sink
        audioconvert = gst.element_factory_make("audioconvert", "audioconvert")
        audioresample = gst.element_factory_make("audioresample", 'audioresample')
        sink = gst.element_factory_make("alsasink", 'alsasink')
        
        # add depayloading and playback to the pipeline and link
        pipeline.add(audiodepay, audiodec, audioconvert, audioresample, sink)
        gst.element_link_many(audiodepay, audiodec, audioconvert, audioresample, sink)
        
        # the rtpbin element
        rtpbin = gst.element_factory_make('gstrtpbin', 'rtpbin') 
        pipeline.add(rtpbin)
        
        # now link all to the rtpbin, start by getting an RTP sinkpad for session 0
        srcpad = gst.Element.get_static_pad(rtpsrc, 'src')
        sinkpad = gst.Element.get_request_pad(rtpbin, 'recv_rtp_sink_1')
        gst.Pad.link(srcpad, sinkpad)

        # get an RTCP sinkpad in session 0
        srcpad = gst.Element.get_static_pad(rtcpsrc, 'src')
        sinkpad = gst.Element.get_request_pad(rtpbin, 'recv_rtcp_sink_1')
        gst.Pad.link(srcpad, sinkpad)

        # get an RTCP srcpad for sending RTCP back to the sender
        srcpad = gst.Element.get_request_pad(rtpbin, 'send_rtcp_src_1')
        sinkpad = gst.Element.get_static_pad(rtcpsink, 'sink')
        gst.Pad.link(srcpad, sinkpad)

        def pad_added_cb(rtpbin, new_pad):
            qv_pad = audiodepay.get_pad("sink")
            print "Add pad"
            
            if qv_pad.is_linked():
                pad = qv_pad.get_peer()
                pad.unlink( qv_pad )

            new_pad.link(qv_pad)

        rtpbin.connect('pad-added', pad_added_cb)
        
        pipeline.set_state(gst.STATE_NULL)
        
        return pipeline
        
    def play(self):
        self.currentPipeline.set_state(gst.STATE_PLAYING)
        self.currentAudioPipeline.set_state(gst.STATE_PLAYING)

    def stop(self):
        self.currentPipeline.set_state(gst.STATE_NULL)
        self.currentAudioPipeline.set_state(gst.STATE_NULL)

    # In the pause mode, the stream is put in a buffer and will be processed when switch in the play mode => fast played video for some time
    def pause(self):
        self.currentPipeline.set_state(gst.STATE_PAUSED)
        self.currentAudioPipeline.set_state(gst.STATE_PAUSED)
        
    # Everything is allocated but the stream is not opened
    def ready(self):
        self.currentPipeline.set_state(gst.STATE_READY)     
        self.currentAudioPipeline.set_state(gst.STATE_READY)

    def setChannel(self, channelID):
        log.debug("Set video stream to channel: %d" % channelID)
        if self.currentChannel == channelID:
            return
        
        if self.currentChannel != None:
            self.currentPipeline.set_state(gst.STATE_NULL)
            self.currentAudioPipeline.set_state(gst.STATE_NULL)
            self.nextStream()
            self.currentPipeline.set_state(gst.STATE_READY)
            self.currentAudioPipeline.set_state(gst.STATE_READY)

            
        self.currentChannel = channelID

        if self.homebox:
            reactor.callFromThread(self.homebox.setChannel, channelID)


    def on_key_press_event(self, stage, event):
        if event.keyval == clutter.keysyms.s:
            self.stop()
        if event.keyval == clutter.keysyms.b:
            self.ready()
        if event.keyval == clutter.keysyms.p:
            self.play()

        
        if self.actionsMapping.numericKey.has_key(event.keyval):
            channel = self.actionsMapping.numericKey[event.keyval]
            self.message.set_text("%d" % channel)
            self.setChannel(channel)
        