#!/usr/bin/env python
import os, sys, time, string, operator
import socket, telnetlib
import Queue, threading
from multiprocessing import Process
from xml.etree import cElementTree

import eye
import addrutil
import trie

updatesQueue = Queue.Queue()
reconSignalQueue = Queue.Queue()
pfxorigin = trie.Trie()
update_id_begin = pfxorigin.db.getMaxUpdateID() + 1

def receivingBGPmon():
  addrutil.log(int(time.time()), 0, "Y:receiving thread running!")
  #constant
  BGPMON_HOSTS = [
    # YY: first host bad
    'livebgp.netsec.colostate.edu',
    '129.82.138.6',
    '129.82.138.96',
    '128.223.51.93',
  ]
  UPT_PORT = 50001
  # YY: flag change
  #MSG_END_FLAG = '</BGP_MESSAGE>'
  MSG_END_FLAG = '</BGP_MONITOR_MESSAGE>'
  LEN_MSG_END_FLAG = len(MSG_END_FLAG)
  # var: BGPmon
  I_HOST = 0
  HOST = BGPMON_HOSTS[I_HOST]
  #print "host:" + HOST
  uptmon = None
  # var: timer
  start_time = int(time.time())
  time_recv_local = connect_time = check_signal_time = 0
  idle_time = 0
  read_timeout = 1
  sleeptime = 30
  # var: counter
  nreceived_msgs = 0
  idx_update = update_id_begin
  # msg handler
  recv_msg = ''
  while True:
    try:
      # re-start every 30 days
      # re-connect every 7 days, or server is too slow
      if time_recv_local - start_time >= 2592000: # 30 * 24 * 3600
        addrutil.log(time_recv_local, 0, ("BGPmon-restart: %s." % HOST))
        if uptmon:
          uptmon.close()
        uptmon = None
        time.sleep(sleeptime)
        os.system('./restart_bgpmon.sh &')
        break
      elif time_recv_local - connect_time >= 604800: # 7 * 24 * 3600
        addrutil.log(time_recv_local, 0, ("BGPmon-reconnect: %s." % HOST))
        if uptmon:
          uptmon.close()
        uptmon = None
        I_HOST = 0
        HOST = BGPMON_HOSTS[I_HOST]
      elif time_recv_local - check_signal_time >= 300: # check BGPmon delay
        # check if BGPmon is too slow
        if (time_recv_local - connect_time >= 1800 and nreceived_msgs > 50 and
            reconSignalQueue.qsize() > 0.5 * nreceived_msgs):
          addrutil.log(time_recv_local, 0, ("BGPmon-reconnect-delay: HOST-%s, qsize-%d, recv-%d." % (
              HOST, reconSignalQueue.qsize(), nreceived_msgs)
            ))
          if uptmon:
            uptmon.close()
          uptmon = None
          # try another IP
          I_HOST = (I_HOST + 1) % len(BGPMON_HOSTS)
          HOST = BGPMON_HOSTS[I_HOST]
        # clear signals
        nreceived_msgs = 0
        check_signal_time = time_recv_local
        try:
          while reconSignalQueue.qsize():
            reconSignalQueue.get()
        except:
          pass

      # version-B: socket
      if uptmon is None:
        uptmon = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        uptmon.settimeout(60)
        uptmon.connect((HOST, UPT_PORT))
        connect_time = check_signal_time = int(time.time())
        nreceived_msgs = 0
        recv_msg = ''
      recv_len, i_upt = (len(recv_msg), -1)
      num = 0
      while i_upt == -1:
        num += 1
        try:
          recv_msg += uptmon.recv(1048576)
        except socket.timeout:
          break
        except Exception as e:
          raise Exception, e
        if recv_len == len(recv_msg):
          break  
        if recv_len <= LEN_MSG_END_FLAG:
          i_upt = recv_msg.find(MSG_END_FLAG)
        else:
          i_upt = recv_msg.find(MSG_END_FLAG, recv_len - LEN_MSG_END_FLAG)
        recv_len = len(recv_msg)
      addrutil.log(int(time.time()), 0, "recv msg: %s" % recv_msg)
      if i_upt == -1: # empty/incomplete msg
        idle_time += 60
        if idle_time >= 600:
	  tmp_time = idle_time
          idle_time = 0
          raise socket.timeout, 'idle_time = %d' % tmp_time
        continue
      else:
        idle_time = 0
      time_recv_local = int(time.time())
	  #split msg
      prev_i = 0
      while i_upt != -1:
        i_upt += LEN_MSG_END_FLAG
        upt = recv_msg[prev_i : i_upt]
        addrutil.log(int(time.time()), 0, "Y:receiving update: %s" % upt)
        updatesQueue.put((upt, time_recv_local, idx_update))
        nreceived_msgs += 1
        idx_update += 1
        prev_i = i_upt
        i_upt = recv_msg.find(MSG_END_FLAG, i_upt)
      recv_msg = recv_msg[prev_i : ]
      sleeptime = 30
    except Exception as e:
      addrutil.log(int(time.time()), 0, ("BGPmonError:{%s, %s.}" % (HOST, e)))
      if uptmon:
        uptmon.close()
      uptmon = None
      # when there is a BGPmon Exception
      #  1. restart if livetime longer than 30 day
      if int(time.time()) - start_time >= 2592000: # 30 * 24 * 3600
        addrutil.log(int(time.time()), 0,
          "BGPmonError-restart: %s, livetime = %dh." % (
          HOST, (int(time.time()) - start_time) / 3600))
        os.system('./restart_bgpmon.sh &')
        break
      #  2. or retry later
      time.sleep(sleeptime)
      if sleeptime < 240 and I_HOST + 1 == len(BGPMON_HOSTS):
        sleeptime *= 2
      # try another IP
      I_HOST = (I_HOST + 1) % len(BGPMON_HOSTS)
      HOST = BGPMON_HOSTS[I_HOST]

def parseASPath(updates, xmlns, AStag): # AStag: AS, AS4 # new AStag: ASN2, ASN4
  b = '{urn:ietf:params:xml:ns:xfb}'
  aspath = updates.find(b + "AS_PATH")
  if aspath is None:
    return []
  asseq = aspath.findall(b + "AS_SEQUENCE")
  if asseq is None:
    return []
  aspathXML = []
  for seq in asseq: # may be more than 1 AS_SEG
    # find out the right AS_SEG
    aspathXML = seq.findall(b + AStag)
    if aspathXML is not None:
      break
  if not aspathXML:
    return []
  # get ASpath
  aspath = []
  for i in xrange(len(aspathXML)):
    asi = addrutil.asn2int(aspathXML[i].text)
    aspath.append(asi)
  return aspath

parseQueue = Queue.Queue()

def parseUpdate():
  addrutil.log(int(time.time()), 0, "Y:parsing thread running!")
  # YY: flag change
  #MSG_BEGIN_FLAG = '<BGP_MESSAGE>'
  MSG_BEGIN_FLAG = '<BGP_MONITOR_MESSAGE>'
  LEN_MSG_BEGIN_FLAG = len(MSG_BEGIN_FLAG)
  while True:
    (upt, time_recv_local, idx_update) = updatesQueue.get(True)
    if upt.startswith('<xml>'): # begin with "<xml>"
      upt = upt[5:]
    # avoid to use xmlns when searching
    # YY: reset xmlns
    addrutil.log(int(time.time()), 0, "Y:parse update: %s" % upt)
    upt = '<ROOT xmlns:xsi="http://www.w3.org/2001/XMLSchema" xmlns="urn:ietf:params:xml:ns:bgp_monitor" xmlns:bgp="urn:ietf:params:xml:ns:xfb" xmlns:ne="urn:ietf:params:xml:ns:network_elements">' + upt[upt.find('<', LEN_MSG_BEGIN_FLAG) : upt.rfind('<', 0, -LEN_MSG_BEGIN_FLAG)] + '</ROOT>'
    root = None
    try:
      root = cElementTree.fromstring(upt)
    except Exception as expe:
      addrutil.log(time_recv_local, 0,
          ("ERROR: can not parse msg. <<<%s>>>" % expe))
      addrutil.log(time_recv_local, 0, upt)
      parseQueue.put(([], 0, idx_update))
      continue

    # YY: xml format changed
    #xmlns = ''
    xmlns = '{urn:ietf:params:xml:ns:bgp_monitor}'
    bgp = '{urn:ietf:params:xml:ns:xfb}'

    # 1. timestamp
    tstamp = root.find(xmlns + 'OBSERVED_TIME')
    if tstamp is None:
      addrutil.log(time_recv_local, 0, "ERROR: no TIMESTAMP")
      parseQueue.put(([], 0, idx_update))
      continue
    #tstampINT = int(tstamp.get('TIMESTAMP', '0'))
    tstampINT = int((tstamp.find(xmlns + 'TIMESTAMP')).text)
    #print "timestamp:" + (tstamp.find(xmlns + 'TIMESTAMP')).text 
    if tstampINT == 0:
      parseQueue.put(([], 0, idx_update))
      continue
    if time_recv_local - tstampINT > 120:
      reconSignalQueue.put((True))
    #
    # 2. peering
    peering = root.find(xmlns + 'SOURCE')
    if peering is None:
      #addrutil.log(tstampINT, "ERROR: no PEERING")
      parseQueue.put(([], 0, idx_update))
      continue
    # 2.1. monip
    monip = peering.find(xmlns + 'ADDRESS')
    monipAFI = int(monip.get('afi', '0'))
    #print "monitor ip:" + monip.text
    if (monip is None) or (monipAFI == 0) or (not monip.text):
      parseQueue.put(([], 0, idx_update))
      continue
    monip_bin = addrutil.ip_str2bin(
        monip.text, monipAFI == 2)
    if not monip_bin:
      parseQueue.put(([], 0, idx_update))
      continue
    # 2.2. monas
    monas = peering.find(xmlns + 'ASN2')
    #print "monitor as:" + monas.text
    if monas is None:
      addrutil.log(time_recv_local, tstampINT, "ERROR: no SOURCE_AS")
      parseQueue.put(([], 0, idx_update))
      continue
    monas_int = addrutil.asn2int(monas.text) # ASN4
    #
    # 3. update
    announce_prefixes = []
    updates = root.find(bgp + 'UPDATE')
    if updates is None:
      parseQueue.put(([], 0, idx_update))
      continue
    #
    # 3.1. withdrawn
    #withdrawn = updates.find(bgp + 'WITHDRAW')
    #if (withdrawn is not None) and withdrawn.get('count', '0') != '0':
    #  wpfx = withdrawn.findall(xmlns + 'PREFIX/' + xmlns + 'ADDRESS')
    #  wpfxAFI = withdrawn.findall(xmlns + 'PREFIX/' + xmlns + 'AFI')
    #  for i in xrange(len(wpfx)):
    #    pfx = addrutil.BGPUpdateEntry(
    #        wpfxAFI[i].text.upper() == 'IPV6', wpfx[i].text,
    #        monip.text, monip_bin, monas_int, 0,
    #        [], '', tstampINT, time_recv_local, len(wpfx), idx_update)
    #    if pfx.length_is_valid():
    #      announce_prefixes.append(pfx)
    wpfx = updates.findall(bgp + 'WITHDRAW')
    for i in xrange(len(wpfx)):
      pfx = addrutil.BGPUpdateEntry(
        apfx[i].get('afi', '0') == '2', wpfx[i].text,
        monip.text, monip_bin, monas_int, 0,
        [], '', tstampINT, time_recv_local, len(wpfx), idx_update)
      if pfx.length_is_valid():
        announce_prefixes.append(pfx)
    #
    # 3.2. announce
    # 3.2.1. IPv6 withdrawn
    # YY: unclear
    v6wd = updates.find(xmlns + 'ATTRIBUTE/'
        + xmlns + 'MP_UNREACH_NLRI/' + xmlns + 'WITHDRAWN')
    if (v6wd is not None) and v6wd.get('count', '0') != '0':
      wpfx = v6wd.findall(xmlns + 'PREFIX/' + xmlns + 'ADDRESS')
      wpfxAFI = v6wd.findall(xmlns + 'PREFIX/' + xmlns + 'AFI')
      for i in xrange(len(wpfx)):
        pfx = addrutil.BGPUpdateEntry(
            wpfxAFI[i].text.upper() == 'IPV6', wpfx[i].text,
            monip.text, monip_bin, monas_int, 0,
            [], '', tstampINT, time_recv_local, len(wpfx), idx_update)
        if pfx.length_is_valid():
          announce_prefixes.append(pfx)
    #
    # 3.2.2. AS path
    aspath = parseASPath(updates, bgp, "ASN2")   # aspath
    pathstr = []
    # restore 4 bytes ASN
    # AS4_PATH may incomplete, can not directly use it
    if not aspath:
      aspath = parseASPath(updates, bgp, "ASN4") # 4 bytes ASN
      if not aspath:
        parseQueue.put((announce_prefixes, int(time.time()), idx_update))
        continue
    as4path = None
    for j in xrange(-len(aspath), 0):
      if aspath[j] == 23456:
        if as4path is None:
          as4path = parseASPath(updates, xmlns, "ASN4") # 4 bytes ASN
        if as4path and j >= -len(as4path):
          aspath[j] = as4path[j]
      pathstr.append('%d' % aspath[j])
    pathstr = ' '.join(pathstr)
    # path clean
    # (1) set 23456/reserved ASN to 0 (unknown ASN)
    # (2) remove duplicated/private ASN
    i = 0
    for j in xrange(len(aspath)):
      if aspath[j] == 23456 or addrutil.isReservedASN(aspath[j]):
        aspath[j] = 0
      if (i == 0 or aspath[i - 1] != aspath[j]) and (
          not addrutil.isPrivateASN(aspath[j])):
        aspath[i] = aspath[j]
        i += 1
    del aspath[i:]
    if not aspath:
      parseQueue.put((announce_prefixes, int(time.time()), idx_update))
      continue
    homeas_int = aspath[-1]
    addrutil.log(int(time.time()), 0, "Y:ASPath:%s" % aspath)
    #
    # 3.2.3. NLRI
    # suppose 1 refer to IPV4, 2 refer to IPV6
    apfx = updates.findall(bgp + 'NLRI')
    #print "nlri:"
    for i in xrange(len(apfx)):
      #print apfx[i].text
      pfx = addrutil.BGPUpdateEntry(
        apfx[i].get('afi', '0') == '2', apfx[i].text,
        monip.text, monip_bin, monas_int, homeas_int,
        aspath, pathstr, tstampINT, time_recv_local, len(apfx),
        idx_update)
      if pfx.length_is_valid():
        announce_prefixes.append(pfx)
    #if (nlri is not None) and nlri.get('count', '0') != '0':
    #  apfx = nlri.findall(xmlns + 'PREFIX/' + xmlns + 'ADDRESS')
    #  apfxAFI = nlri.findall(xmlns + 'PREFIX/' + xmlns + 'AFI')
    #  for i in xrange(len(apfx)):
    #    pfx = addrutil.BGPUpdateEntry(
    #        apfxAFI[i].text.upper() == 'IPV6', apfx[i].text,
    #        monip.text, monip_bin, monas_int, homeas_int,
    #        aspath, pathstr, tstampINT, time_recv_local, len(apfx),
    #        idx_update)
    #    if pfx.length_is_valid():
    #      announce_prefixes.append(pfx)

    # 3.2.4. IPv6 Announce
    # YY: unclear
    v6nlri = updates.find(xmlns + 'ATTRIBUTE/' + xmlns + 'MP_REACH_NLRI/'
        + xmlns + 'NLRI')
    if (v6nlri is not None) and v6nlri.get('count', '0') != '0':
      apfx = v6nlri.findall(xmlns + 'PREFIX/' + xmlns + 'ADDRESS')
      apfxAFI = v6nlri.findall(xmlns + 'PREFIX/' + xmlns + 'AFI')
      for i in xrange(len(apfx)):
        pfx = addrutil.BGPUpdateEntry(
            apfxAFI[i].text.upper() == 'IPV6', apfx[i].text,
            monip.text, monip_bin, monas_int, homeas_int,
            aspath, pathstr, tstampINT, time_recv_local, len(apfx),
            idx_update)
        if pfx.length_is_valid():
          announce_prefixes.append(pfx)
    # check longer prefix first
    announce_prefixes.sort(reverse=True,
        key=operator.attrgetter('length', 'ipbeg_bin'))
    parseQueue.put((announce_prefixes, int(time.time()), idx_update))


def avg_key_count(kcd):
  s = n = 0
  for k, c in kcd.iteritems():
    s += k * c
    n += c
  if n == 0:
    n = 1
  return s * 1.0 / n

def runBGPmon(clear_expired=True):
  addrutil.log(int(time.time()), 0, "Y:BGPmon running!")
  alarms = dict() # {alarm_prefix: (start-time, alarm_id)}
  pfxorigin.load(clear_expired)
  # log info.
  recvDelayDis = dict()
  parseDelayDis = dict()
  trieDelayDis = dict()
  n_update = n_prefix = 0
  keepAlive = 0
  # threads
  probeCheckThread = threading.Thread(target = eye.probeTargetRun)
  probeCheckThread.start()
  parseUpdateThread = []
  N_PARSER_THREAD = 32
  for i in xrange(N_PARSER_THREAD):
    #parseUpdateThread.append(Process(target = parseUpdate))
    parseUpdateThread.append(threading.Thread(target = parseUpdate))
    parseUpdateThread[i].start()
  receivingBGPmonThread = threading.Thread(target = receivingBGPmon)
  receivingBGPmonThread.start()
  addrutil.log(int(time.time()), 0, "Y:Thread creating finished!")
  # run
  next_update_idx = update_id_begin
  trie_over_time = 0
  update_pool = {}
  while True:
    next_update = update_pool.get(next_update_idx, None)
    if next_update:
      del update_pool[next_update_idx]
      next_update_idx += 1
    else:
      update, parse_over_time, idx_update = parseQueue.get(True)
      if idx_update == next_update_idx:
        next_update = (update, parse_over_time)
        next_update_idx += 1
      else:
        outq_time = int(time.time())
        # put into pool
        if idx_update > next_update_idx:
          update_pool[idx_update] = (update, parse_over_time)
        else:
          addrutil.log(outq_time, 0,
            'DISCARD idx_update #%d (next_update_idx #%d)' % (
            idx_update, next_update_idx))
        # avoid starve
        if (outq_time - trie_over_time >= 10) and (
            len(update_pool) > N_PARSER_THREAD * 4):
          addrutil.log(outq_time, 0,
              'INCREASE next_update_idx #%d (prev/cur-tim: %d %d)' % (
              next_update_idx, trie_over_time, outq_time))
          next_update_idx += 1
    if next_update:
      prefixes, parse_over_time = next_update
      if prefixes:
        idx_announce = -1
        trie_over_time = int(time.time())
        pfx = None
        bad_path_re = ''
        bad_path = ''
        for i in xrange(len(prefixes)):
          pfx = prefixes[i]
          if pfx.homeas != 0:
            idx_announce += 1
          # add in trie
          pfxorigin.add(pfx, idx_announce == 0, bad_path_re, bad_path)
          if idx_announce == 0:
            bad_path_re = pfx.bad_path_re
            bad_path = pfx.bad_path
          if pfx.alarm_id != -1: # -1: no alarm,   >=0: alarm_id
            # eye probe
            addrutil.log(int(time.time()), 0, "Y:alarm appear: %d" % pfx.alarm_id)
            eye.probeTarget(pfx, pfxorigin.db)
            # capture UPDATE
            pfxorigin.db.dbQueue.put(("bgpmon", (pfx.alarm_id, pfx)))
            alarms[pfx.dec] = (pfx.bgptime, pfx.alarm_id)
          elif pfx.dec in alarms:
            pfxinfo = alarms[pfx.dec]
            if pfx.bgptime - pfxinfo[0] > 240: # collect BGPmon 4 minutes
              del alarms[pfx.dec]
            else:
              pfxorigin.db.dbQueue.put(("bgpmon", (pfxinfo[1], pfx)))
          trie_over_time = int(time.time())
          # log info.
          n_prefix += 1
          # statistical of trie delay
          delta_t = trie_over_time - pfx.bgptime
          if delta_t not in trieDelayDis:
            trieDelayDis[delta_t] = 1
          else:
            trieDelayDis[delta_t] = trieDelayDis[delta_t] + 1
        n_update += 1
        # statistical of BGPmon delay
        delta_t = pfx.localtime - pfx.bgptime
        if delta_t not in recvDelayDis:
          recvDelayDis[delta_t] = 1
        else:
          recvDelayDis[delta_t] = recvDelayDis[delta_t] + 1
        # statistical of UPDATE parser delay
        delta_t = parse_over_time - pfx.bgptime
        if delta_t not in parseDelayDis:
          parseDelayDis[delta_t] = 1
        else:
          parseDelayDis[delta_t] = parseDelayDis[delta_t] + 1
        # print statistic log
        if trie_over_time - keepAlive > 600:
          addrutil.log(trie_over_time, 0,
              ("ALIVE. #[uptQ/reconQ/parseQ/uPool/targetQ] %d %d %d %d %d " +
               "T[recv/parse/trie] %.2lf %.2lf %.2lf " +
               "#[upt/pfx] %d %d") % (
                updatesQueue.qsize(), reconSignalQueue.qsize(),
                parseQueue.qsize(), len(update_pool), eye.targetQueue.qsize(),
                avg_key_count(recvDelayDis), avg_key_count(parseDelayDis),
                avg_key_count(trieDelayDis), n_update, n_prefix))
          #addrutil.log(0, 0, recvDelayDis)
          #addrutil.log(0, 0, parseDelayDis)
          #addrutil.log(0, 0, trieDelayDis)
          recvDelayDis.clear()
          parseDelayDis.clear()
          trieDelayDis.clear()
          n_update = n_prefix = 0
          keepAlive = trie_over_time
    
if __name__ == '__main__':
  clear_expired = True
  if len(sys.argv) == 2 and sys.argv[1] == 'DB_NOT_CLEAR':
    clear_expired = False
  runBGPmon(clear_expired)

