#!/usr/bin/python -O

import sys, os
import subprocess
import threading
import time, sched

from amqp import RoutingKey, Receiver
from protomsg import GpbMessage
import Queue

import const
const.QUEUED    = 0
const.COMPLETED = 1
const.FAILED    = 2
const.RUNNING   = 3
const.status_text = ['QUEUED', 'COMPLETED', 'FAILED', 'RUNNING']

path_scripts = os.environ.get('SP_BASE_SCRIPTS')
if not path_scripts:
    raise EnvironmentError('SP_BASE_SCRIPTS not set')

path_pylib = os.path.join(path_scripts, 'py-lib')
sys.path.append(path_pylib)

class DsThread(threading.Thread):
    def __init__(self, brokerIp, brokerPort, protoSrc):
        """ constructor, setting initial variables """

        self.__receiver = Receiver(brokerIp, brokerPort)
        self.__proto_src = protoSrc

        self._stopevent = threading.Event( )

        threading.Thread.__init__(self)

    def run(self):
        """ main control loop """
        if __debug__: print "%s starts" % (self.getName( ),)

        import qpid.exceptions

        try:
            self.__receiver.setup(self.get_binding_keys)

            while not self._stopevent.isSet( ):
                try:
                    msg = self.__receiver.fetch(timeout=5)
                except Queue.Empty:
                    if __debug__: print 'No message, just waiting ...'
                    continue

                for header in msg.headers:
                    try:
                        routingKeyStr = header.routing_key
                    except AttributeError:
                        continue

                    if __debug__:
                        print '{Message : %s}' % (routingKeyStr,)

                    rKey = RoutingKey(routingKeyStr)

                    pMsg = GpbMessage(self.__proto_src,
                                      rKey.package, rKey.publication)
                    pMsg.parse_from_string(msg.body)

                    self.process_msg(rKey, pMsg)

        except qpid.exceptions.Closed:
            print 'Qpid connection closed, exiting...'

        except qpid.exceptions.Timeout:
            print 'Broker timedout, exit...'

        except qpid.exceptions.VersionError:
            print 'Incompatible broker version, exiting...'

        if __debug__: print "%s ends" % (self.getName( ),)

    def join(self, timeout=None):
        """ Stop the thread and wait for it to end. """
        self._stopevent.set( )
        threading.Thread.join(self, timeout)


class QueryThread(DsThread):
    __key_summary = '.'.join(['performance_stat',
                              'dsinormalizedcounters',
                              'instance',
                              'public',
                              'jobquery_summary'])
    __key_progress = '.'.join(['performance_stat',
                               'seaquest',
                               'instance',
                               'public',
                               'query_progress'])

    def __init__(self, brokerIp, brokerPort, protoSrc):
        """ constructor, setting initial variables """
        DsThread.__init(self, brokerIp, brokerPort, protoSrc)

        self._lock = threading.RLock()
        self._query_stats = {}

    @property
    def get_binding_keys(self):
        return  ('#.ndcs.#.query_start_stats', '#.ndcs.#.query_end_stats')

    def block(self):
        self._lock.acquire()

    def unblock(self):
        self._lock.release()

    def process_msg(self, rKey, pMsg):
        query_id = pMsg.get_field_value('query_id')
        message_name = rKey.message

        self.block()

        if message_name == 'ndcs.query_start_stats':
            self._query_stats[query_id] = {'start' : pMsg}
        else:    # ndcs.query_end_stats
            if query_id in self._query_stats:
                self._query_stats[query_id]['end'] = pMsg
            else: # orphan message, just ignore
                pass

        self.unblock()

    def publish(self):
        try:
            (n_queued, n_completed, n_failed, n_running) = (0,0,0,0)

            self.block()
            for key, val in self._query_stats.items():
                ret = self.publish_one_query(nsleep, val)

                if ret == const.QUEUED:
                    n_queued += 1
                elif ret == const.COMPLETED:
                    n_completed += 1
                    self._query_stats.pop(key)
                elif ret == const.FAILED:
                    n_failed += 1
                    self._query_stats.pop(key)
                else:  # RUNNING
                    n_running += 1

            time_utc = time.time()
            time_lct = time_utc - (time.altzone + 3600 * time.daylight)

            rc = tpa_publish( sys.argv[0],
                           self.__key_summary,
                           ['<header>',   #header
                            n_running,    #running_count
                            n_queued,     #queued_count
                            n_completed,  #completed_count
                            n_failed] )   #failed_count

        finally:
            self.unblock()

    def publish_one_query(self, interval, query_stat):
        # cache the handlers to get value from start_stat and end_stat
        get_start_val = query_stat['start'].get_field_value

        if 'end' in query_stat:
            get_end_val = query_stat['end'].get_field_value
        else:
            get_end_val = lambda (x) : '' # if no end_stat, get null

        # starting to get all values for proto
        ts_start = get_start_val('time_ts_utc')
        status_code = const.QUEUED

        if 'end' in query_stat: # the query has already finished
            ts_end = get_end_val('time_ts_utc')

            if get_end_val('error_code') >= 0:
                status_code = const.COMPLETED
            else:
                status_code = const.FAILED
        else:
            # using the current time is better then null
            ts_end = time.time() * 1000000
            status_code = const.RUNNING

        msglist = ['<info_header>',                 #header
            interval * 1000,                 #requested_sampling_interval_ms
            interval * 1000,                 #actual_sampling_interval_ms
            get_start_val('user_name'),             #user_longname
            get_start_val('query_id'),              #jobquery_longname
            const.status_text[status_code],         #status_name
            get_start_val('time_ts_lct'),           #starttime_lct_ts
            ts_start,                               #starttime_utc_ts
            get_end_val('time_ts_lct'),             #endtime_lct_ts
            ts_end,                                 #endtime_utc_ts
            (ts_end - ts_start) / 1000,             #runtime_ms
            1 if status_code == const.RUNNING else 0, #running_bool
            (ts_end - ts_start) / 1000,             #cpu_ms
            get_end_val('total_memory'),            #memory_bytes
            get_end_val('disc_reads'),              #read_bytes
            get_end_val('msgs_bytes_to_disc'),      #written_bytes
            get_start_val('node_name'),             #node_longname
            get_start_val('session_id'),            #session_longname
            get_end_val('transaction_id'),          #transaction_id
            get_start_val('statement_type'),        #statement_type
            get_start_val('sql_text'),              #query_text32k
            get_start_val('client_id'),             #client_id
            get_start_val('application_id'),        #application_id
            get_start_val('ds_name'),               #ds_name
            get_start_val('statement_id'),          #statement_id
            get_end_val('error_code'),              #error_code_enum
            get_end_val('error_text'),              #error_text
            get_end_val('odbc_elapsed_time') ,      #odbc_elapsed_time_ms
            get_end_val('odbc_execution_time') ,    #odbc_execute_time_ms
            get_end_val('rows_returned'),           #rows_returned
            get_end_val('rows_retrieved'),          #rows_retrived
            get_end_val('num_sql_process'),         #num_sql_process
            get_start_val('wms_start_time_ts_lct'), #wms_start_time_lct_ts
            get_start_val('wms_start_time_ts_utc'), #wms_start_time_utc_ts
            get_start_val('cmp_start_time_ts_lct'), #cmp_start_time_utc_ts
            get_start_val('cmp_start_time_ts_utc'), #cmp_start_time_lct_ts
            get_start_val('cmp_end_time_ts_lct'),   #cmp_end_time_utc_ts
            get_start_val('cmp_end_time_ts_utc'),   #cmp_end_time_lct_ts
            get_end_val('wait_time'),               #wait_time
            get_end_val('hold_time'),               #hold_time
            'NULL',                                 #table_longname
        ]

        rc = tpa_publish( sys.argv[0], self.__key_progress, msglist )

        return status_code

class EventThread(DsThread):

    __key_text_event = '.'.join(['event',
                                 'seaquest',
                                 'instance',
                                 'public',
                                 'text_event'])

    def __init__(self, brokerIp, brokerPort, protoSrc):
        """ constructor, setting initial variables """
        DsThread.__init(self, brokerIp, brokerPort, protoSrc)
        self._events = Queue.Queue();

    @property
    def get_binding_keys(self):
        return ('#.common.#.text_event',)

    def process_msg(self, rKey, pMsg):
        self._events.put((rKey, pMsg))

    def publish(self):
        while not self._events.empty():
            (rKey, pMsg) =  self._events.get()

            get_val = pMsg.get_field_value

            msglist = ['<info_header>',                #info_header
                get_val('header.event_id'),            #event_id
                get_val('header.event_severity'),      #event_severity
                get_val('text'),                       #text
                get_val('tokenized_event_repos_table') #tokenized_event_repos_table
            ]

            rc = tpa_publish( sys.argv[0], self.__key_text_event, msglist )

def publish(sch, action_ths, nsleep):
    sch.enter(nsleep, 0, publish, (sch, action_ths, nsleep))

    if __debug__: print 'publishing...'
    for act_thread in action_ths:
        act_thread.publish()

# ---- main ----
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--subscribe-ip', dest = 'sub_ip',
      action = 'store', default = '127.0.0.1',
      help = 'ip address of the broker to subscribe, default [%default]')
parser.add_option('--subscribe-port', dest = 'sub_port',
      action = 'store', type = int, default = 5672,
      help = 'port of the broker to subscribe, default [%default]')
parser.add_option('--proto-src', dest ='protoSrc',
      action = 'store',
      help = 'directory to store subscribe, default [%default]')
parser.add_option('--type', dest = 'types',
      action = 'append',
      help = 'one or more type of data to poll, available types are [event, query_stat]')
parser.add_option('--sleep', dest ='sleep',
      action = 'store', type = int, default = 5,
      help = 'seconds of delay between query submits, default [%default]')

(opts, args) = parser.parse_args()

if opts.protoSrc is None:
    print '--proto-src could not be empty'
    sys.exit(1)

type_handlers = {'event' : EventThread,
                 'query_stat' : QueryThread}

if opts.types is None:
    print '--type could not be empty'
    sys.exit(1)

type_threads = []
for type_ in opts.types:
    if type_ in type_handlers:
        klass = type_handlers[action]
        type_threads.append( klass(opts.sub_ip, opts.sub_port, opts.protoSrc) )
    else:
        print 'invalid type %s' % type_
        sys.exit(2)

import socket
try:
    for type_th in type_threads:
        type_th.start()

    schedule = sched.scheduler(time.time, time.sleep)
    try:
        schedule.enter(0, 0, publish, (schedule, type_threads, opts.sleep))
        schedule.run()

    except KeyboardInterrupt:
        print 'Control-C pressed, exiting...'
        for type_th in type_threads:
            type_th.join()

except socket.error, e:
    print 'Could not connect to server, exiting...'
