#!/usr/bin/env python
#  Copyright (c) 2010
#  The Regents of the University of Michigan
#  All Rights Reserved

#  Permission is granted to use, copy, create derivative works, and
#  redistribute this software and such derivative works for any purpose,
#  so long as the name of the University of Michigan is not used in
#  any advertising or publicity pertaining to the use or distribution
#  of this software without specific, written prior authorization. If
#  the above copyright notice or any other identification of the
#  University of Michigan is included in any copy of any portion of
#  this software, then the disclaimer below must also be included.

#  This software is provided as is, without representation or warranty
#  of any kind either express or implied, including without limitation
#  the implied warranties of merchantability, fitness for a particular
#  purpose, or noninfringement.  The Regents of the University of
#  Michigan shall not be liable for any damages, including special,
#  indirect, incidental, or consequential damages, with respect to any
#  claim arising out of or in connection with the use of the software,
#  even if it has been or is hereafter advised of the possibility of
#  such damages.

import bisect
import collections
import socket
import sys
import threading

def node_observations_iter(report_filename):
  log = open(report_filename, 'r')
  lines = log.readlines()

  for idx, line in enumerate(lines):
    if line.startswith('Times'):
      break
  
  lines = lines[idx+1:]

  for line in lines:
    ip_port, times = line.split()
    times = int(times)
    ip, port = ip_port.split(':', 1)
    yield (ip, port, times)


def group_ips_by_num_ports(report_filename):
  ips = collections.defaultdict(int)
  for (ip, port, times) in node_observations_iter(report_filename):
    ips[ip] += 1

  ports_first = [(length, ip) for (ip, length) in ips.iteritems()]
  del ips
  ports_first.sort()
  return ports_first


def group_ips_by_degree(report_filename):
  ips = collections.defaultdict(int)
  for (ip, port, times) in node_observations_iter(report_filename):
    ips[ip] += times

  times_first = [(times, ip) for (ip, times) in ips.iteritems()]
  del ips
  times_first.sort()
  return times_first


def add_reverse_lookup_results(L, start, stop):
  for idx in xrange(start, stop):
    (count, ip) = L[idx]
    try:
      domain = socket.gethostbyaddr(ip)[0]
      L[idx] = (count, ip, domain.lower())
    except socket.herror:
      L[idx] = (count, ip, '')
      

if __name__ == '__main__':
  report = sys.argv[1]

  if 'degree' in sys.argv:
    print >> sys.stderr, 'OK, group by degree'
    grouped_ips = group_ips_by_degree(report)
  elif 'ports' in sys.argv:
    print >> sys.stderr, 'OK, group by ports'
    grouped_ips = group_ips_by_num_ports(report)
  else:
    print >> sys.stderr, 'Please specify "degree" or "ports" for grouping!'
    sys.exit(1)

  first = bisect.bisect(grouped_ips, (5, ''))
  last = len(grouped_ips) - first if 'degree' not in sys.argv else 0
  grouped_ips.reverse()

  print >> sys.stderr, 'Need to do %d DNS lookups, better go make a sandwich!' % last

  CONCURRENT_LOOKUPS = 15
  chunk_size = last/CONCURRENT_LOOKUPS
  threads = [threading.Thread(
                 target=add_reverse_lookup_results,
                 args=(grouped_ips, x * chunk_size, (x+1) * chunk_size))
             for x in xrange(CONCURRENT_LOOKUPS-1)]
  threads.append(threading.Thread(target=add_reverse_lookup_results,
                                  args=(grouped_ips, (x+1)*chunk_size, last)))
  for thread in threads:
    thread.start()
  for thread in threads:
    thread.join()

  print >> sys.stderr, 'Lookups done!'
    
  for group in grouped_ips[:last]:
    print '%5.d %s %s' % group

  for group in grouped_ips[last:]:
    print '%5.d %s' % group
