"""
WaterQualitySource is the feeder for ambient and recreational water quality 
data provided by Derwent Estuary Program. This feeder extract the archive 
specified by the path provided by the delegate configuration, iterates 
through each valid file, parses those files and dispatches all valid 
samples. 

@author Ben Howell <ben.howell@csiro.au>
"""

import time
import sys
import os
import xml
import string
import threading
import zipfile
import xlrd
import copy

from threading import Thread, Event, InterruptedException
from decimal import Decimal
from types import UnicodeType

from java.lang import Boolean

from com.xhaus.jyson import JysonCodec as json

from au.csiro.ict.tasman.core import Message
from au.csiro.ict.tasman.util import QuickCache
from au.csiro.ict.tasman.datatype import Sample
from au.csiro.ict.tasman.datatype import Annotation
from au.csiro.ict.tasman.datatype import TagCollection

from sleeper import Sleeper
from core import Dispatcher
from simplexlsx import SimpleXlsxParser
from simplexlsx import SimpleXlsxDate as xl_date
from zlibextract import ZlibExtract as zlib_extract



def run():
  """
  Called by au.csiro.ict.tasman.source.ScriptSource.
  This is the entry point for this script. This call returns the running script
  instance which will, on subsequent calls be held by the "instance" parameter.
  An almost, sort of visitor pattern if you like.
  @return the running script instance.
  """
  return WaterQualitySource()
  

def isRunning():
  """
  Called by au.csiro.ict.tasman.source.ScriptSource.
  If false, SourceManager will clean up calling thread and attempt a restart on
  the Java side.
  @return true if running, else false.
  """
  if instance is not None:
    if instance.is_running():
      return Boolean("True")
  return Boolean("False")


def shutDown():
  """
  Called by au.csiro.ict.tasman.source.ScriptSource.
  This allows script to perform its own cleanup routine before exiting.
  @return true after shutdown.
  """
  if instance is not None:
    if instance.shutdown():
      return Boolean("True")
  return Boolean("False")


class WaterQualitySource:
  """
  Main data source routine.
  """
  def __init__(self):
    """
    Constructor.
    """
    import re
    self.sleeper = Sleeper()
    self.thread_cancelled = False
    self.configuration = json.loads(source.getConfiguration().getConfiguration().toString())
    self.file_name_pattern = re.compile(self.configuration['filenamepattern'])
    self.workbook_pattern = re.compile(self.configuration['workbookpattern'])
    self.interval = self.configuration['interval']

    self.data_dir = source.getDataDirectory() + self.configuration['datapath']
    self.tmp_dir = self.data_dir + "tmp/"
    self.cache = QuickCache(self.data_dir + "cache.json")
    self.dispatcher = Dispatcher(source)
    
    self.thread = Thread(target=self.run)
    self.thread.start()
    source.getLogger().info("WaterQualitySource initialised")
    
    
  def run(self):
    """
    Main loop.
    """
    if not os.path.exists(self.tmp_dir):
      os.makedirs(self.tmp_dir)
    
    sxlp = SimpleXlsxParser()
    rxh  = WaterQualitySourceXlsxHandler()
    sxlp.set_content_handler(rxh)
    rxh.dispatcher = self.dispatcher
  
    while not self.thread_cancelled:
      try:
	
	# List files in achive.
	for file_name in os.listdir(self.data_dir):
	  match = self.file_name_pattern.match(file_name)
	  if match and not self.cache.contains(file_name):
	    self.cache.add(file_name)
	    sub_program = match.group(2)
	    archive = os.path.join(self.data_dir, file_name)
	    zf = zipfile.ZipFile(archive, 'r')
	    filelist = self.get_workbook_names(zf)
	    
	    # Extract workbooks from archive.
	    source.getLogger().info("extracting workbooks from archive: " + archive)
	    for file in filelist:
	      worksheet = os.path.join(self.tmp_dir, os.path.basename(file))
	      zlib_extract().extract(file, worksheet, archive)
	      source.getLogger().debug("workbook: " + file + " extracted!")
	    
	    # Parse each workbook.
	    for file in os.listdir(self.tmp_dir):
	      if file.lower().endswith("xls"):
		source.getLogger().info("parsing: " + file)
		match = self.workbook_pattern.match(file)
		wb = xlrd.open_workbook(self.tmp_dir + file)
		esheet = False
		for name in wb.sheet_names():
		  if "edited" in name.lower():
		    esheet = True
		    worksheet = wb.sheet_by_name(name)
		if not esheet:
		  worksheet = wb.sheet_by_index(0)
		self.parse_xls_worksheet(worksheet, match.group(1).replace(" ","_").lower(), sub_program)
		source.getLogger().info("worksheet parsed")
	      elif file.lower().endswith("xlsx"):
		source.getLogger().info("parsing: " + file)
		match = self.workbook_pattern.match(file)
		rxh.set_site_name(match.group(1).replace(" ","_").lower())
		rxh.set_sub_program(sub_program)
		sxlp.parse_workbook(self.tmp_dir + file)
		source.getLogger().info("worksheet parsed")
	      else:
		source.getLogger().error("file: " + file + " is not a recognised format, skipping...")
	
	  # Clean up extracted files.
	  for file in os.listdir(self.tmp_dir):
	    os.remove(self.tmp_dir + file)
	    
	self.cache.save()
	  
	# Sleep.
	source.getLogger().debug("sleeping...")
	self.sleeper.sleep(self.interval)
	source.getLogger().debug("waking...")
      except InterruptedException:
	self.thread_cancelled = True
    
    
  def is_running(self):
    """
    Returns true if main loop is running.
    @return true if main loop is running.
    """
    return self.thread.isAlive()
    
  
  def shutdown(self):
    """
    Performs cleanup routine on shutdown.
    @return true after shutdown.
    """
    source.getLogger().debug("shutting down script...")
    self.thread_cancelled = True
    source.getLogger().debug("forcibly waking thread...")
    self.sleeper.waken()
    
    # Block while waiting for thread to terminate.
    while self.thread.isAlive():
      time.sleep(1)
    source.getLogger().debug("script terminated!")
    self.cache.save()
    return True
    
    
  def get_workbook_names(self, zf):
    """
    Returns a list of workbook file names from the zipfile given.
    @param zf the zipfile to extract workbooks from.
    @return list of workbook file names.
    """
    import re
    pattern = re.compile(self.workbook_pattern)
    
    # Create a list of files to extract.
    filelist = list()
    for line in zf.namelist():
      match = pattern.match(line)
      if match:
	filelist.append(line)
    return filelist
    
    
  

    
  def parse_xls_worksheet(self, worksheet, site_name, sub_program):
    """
    Parses xls format worksheet.
    @param worksheet the worksheet to parse.
    @param site_name the site location associated with worksheet.
    """
    source.getLogger().debug("parsing worksheet: " + worksheet.name)
    self.meta_data = SheetMetaData()
    self.meta_data.site_name = site_name
    self.meta_data.sub_program = sub_program
    self.row_items = dict()

    # Determine site description, tag cells, stats header and data header rows.
    for nrow in range(0, worksheet.nrows):
      cells = worksheet.row_values(nrow)
      for ncol, cell in enumerate(cells):
	self.row_items[ncol] = cell

      if self.meta_data.site_description is None:
	source.getLogger().debug("probing row " + str(nrow) + " for site information...")
	if not self.meta_data.set_site_description(nrow, self.row_items):
	  source.getLogger().debug("  no site information found")

      elif self.meta_data.data_header_row is None:
	source.getLogger().debug("probing row " + str(nrow) + " for data header information...")
	if not self.meta_data.set_data_header_row(nrow, self.row_items):
	  source.getLogger().debug("  no data header information found" )

      elif self.meta_data.data_header_row is not None and nrow > self.meta_data.data_header_row:
	self.dispatcher.enqueue(parse_data_row(self.meta_data, self.row_items))
      self.row_items.clear()
    
    
    #once sheet parsed, reparse site_header_row to data_header_row looking for sheet stats.
    if self.meta_data.site_header_row is not None and self.meta_data.data_header_row is not None:
      for nrow in range(self.meta_data.site_header_row+1, self.meta_data.data_header_row):
	cells = worksheet.row_values(nrow)
	for ncol, cell in enumerate(cells):
	  self.row_items[ncol] = cell
	
	if self.meta_data.stats_header_row is None:
	  source.getLogger().debug("probing row " + str(nrow) + " for stats header information...")
	  if not self.meta_data.set_stats_header_row(nrow, self.row_items):
	    source.getLogger().debug("  no stats header information found" )
	
	if self.meta_data.sample_number_row is None:
	  source.getLogger().debug("probing row " + str(nrow) + " for sample number information...")
	  if not self.meta_data.set_sample_number_row(nrow, self.row_items):
	    source.getLogger().debug("  no sample number information found" )
	  else:
	    sample_number_row_items = copy.deepcopy(self.row_items)
	    
	
      if self.meta_data.sample_number_row is not None:
	for nrow in reversed(range(self.meta_data.site_header_row+1, self.meta_data.sample_number_row)):
	  cells = worksheet.row_values(nrow)
	  for ncol, cell in enumerate(cells):
	    self.row_items[ncol] = cell
	  
	  if self.row_items:
	    messages = create_stat_messages(self.row_items, self.meta_data, sample_number_row_items)
	    self.dispatcher.enqueue(messages)
	  self.row_items.clear()
	sample_number_row_items.clear()

    # Dispatch any remaining messages. 
    self.dispatcher.flush()
    
    

class WaterQualitySourceXlsxHandler(xml.sax.handler.ContentHandler):
  """
  Sax parser callback handler.
  """
  def __init__(self):
    import re
    self.string_dictionary = None
    self.meta_data = SheetMetaData()
    self.dispatcher = None
    self.row_items = dict()
    
    # Clear cursors and buffers.
    self.reset()
  
  def set_site_name(self, site_name):
    self.meta_data.site_name = site_name
    
  def set_sub_program(self, sub_program):
    self.meta_data.sub_program = sub_program
  
  def reset(self):
    """
    Clears cursors, buffers and sheet specific data.
    """
    self.buffer = ''
    self.col_type = None
    self.row_items.clear()
    self.n_rows = 0
    self.n_cols = 0
    
  
  def startElement(self, name, attrs):
    """
    Called when a new element is encountered.
    @param name the name of the element.
    @param attrs the element attributes (key/value pair).
    """
    if name == "c":
      for k,v in attrs.items():
	if attrs.has_key("t"):
	  if attrs.get("t") == "s":
	    self.col_type = "string"
    del name
    del attrs
    
    
  def characters(self, content):
    """
    Called for characters encountered inside each element.
    @param content a chunk of characters.
    """
    self.buffer += content
    del content
    
    
  def endElement(self, name):
    """
    Called when a closing element is encountered.
    @param name the name of the element.
    """
    if name == "c":
      if self.buffer != '':
	if self.col_type is not None and self.col_type == "string":
	  self.row_items[self.n_cols] = self.string_dictionary[int(self.buffer)]
	else:
	  self.row_items[self.n_cols] = self.buffer
	self.buffer = ''
	self.col_type = None
      self.n_cols += 1
  
  
    if name == "row":
      if self.meta_data.site_description is None:
	source.getLogger().debug("probing row " + str(self.n_rows) + " for site information...")
	if not self.meta_data.set_site_description(self.n_rows, self.row_items):
	  source.getLogger().debug("  no site information found")

      elif self.meta_data.data_header_row is None:
	source.getLogger().debug("probing row " + str(self.n_rows) + " for data header information...")
	if not self.meta_data.set_data_header_row(self.n_rows, self.row_items):
	  source.getLogger().debug("  no data header information found" )

      elif self.meta_data.data_header_row is not None and self.n_rows > self.meta_data.data_header_row:
	self.dispatcher.enqueue(parse_data_row(self.meta_data, self.row_items))
	
      self.row_items.clear()
      self.buffer = ''
      self.n_cols = 0
      self.row_items.clear()
      self.n_rows += 1

    
    if name == "sheetData":
      # Dispatch any remaining messages. 
      self.dispatcher.flush()
      self.reset()



      
class SheetMetaData:
  """
  Represents the column metadata (header row).
  """
  def __init__(self):
    self.sub_program = None
    self.site_name = None
    self.date_time_idx = None
    self.depth_time_idx = None
    self.start_date = None
    self.end_date = None
    self.site_header_row = None
    self.stats_header_row = None
    self.data_header_row = None
    self.sample_number_row = None
    self.site_cell = None
    self.qualifier = None
    self.site_description = None
    self.annotations = dict()
    self.phenomena = dict()
  
  
  def add_phenomenon(self, idx, sensor, uom):
    self.phenomena[idx] = dict()
    self.phenomena[idx]["sensor"] = sensor
    self.phenomena[idx]["uom"] = uom
    
    
  def is_annotation(self, idx):
    return idx in self.annotations
    
    
  def set_site_description(self, row_idx, row_items):
    if row_items:
      for k in sorted(row_items.keys()):
	if row_items[k]:
	  if "site" in row_items[k].lower():
	    self.site_description = row_items[k+1]
	    self.site_header_row = row_idx
	    source.getLogger().debug("** site information found: " + self.site_description)
	    return True
    return False
    
    
  def set_data_header_row(self, row_idx, row_items):
    if 0 in row_items and row_items[0]:
      if type(row_items[0]) is UnicodeType:
	if "date_time" in row_items[0].lower():
	  self.date_time_idx = 0
	  self.data_header_row = row_idx
	  source.getLogger().debug("** data header row found")
    if self.data_header_row is not None:
      for k in sorted(row_items.keys()):
	if k > 0 and row_items[k]:
	  if type(row_items[k]) is UnicodeType:
	    self.add(row_idx, k, row_items[k])
    return self.data_header_row is not None

    
  def set_stats_header_row(self, row_idx, row_items):
    for k, v in self.phenomena.iteritems():
      if k in row_items and row_items[k]:
	if type(row_items[k]) is UnicodeType:
	  stat_header = row_items[k].replace(" ","_")
	  stat_header = stat_header.replace(".","_")
	  stat_header = stat_header.lstrip()
	  stat_header = stat_header.rstrip()
	  stat_header = stat_header.rstrip("_")
	  if stat_header == self.phenomena[k]["sensor"]:
	    source.getLogger().debug("** stats header row found")
	    self.stats_header_row = row_idx
	    source.getLogger().debug("** stat_header: " + stat_header)
	return True
    return False
  

  def set_sample_number_row(self, row_idx, row_items):
    for k in sorted(row_items.keys()):
      if row_items[k] and type(row_items[k]) is UnicodeType and "sample number" in row_items[k].lower():
	source.getLogger().debug("** sample number row found")
	self.sample_number_row = row_idx
	return True
    return False
  
  
  def add(self, row_idx, col_idx, content):
    if "taken_by" in content.lower():
      self.annotations[col_idx] = "taken_by"
    elif "program_name" in content.lower():
      self.annotations[col_idx] = "program_name"
    elif content.lower() == "depth":
      self.annotations[col_idx] = "depth"
    elif "depth_time" in content.lower():
      self.depth_time_idx = col_idx
    elif "sample_comment" in content.lower():
      self.annotations[col_idx] = "sample_comment"
    elif "comments" in content.lower():
      self.annotations[col_idx] = "comments"
    elif "qualifier" in content.lower():
      self.annotations[col_idx] = content.replace(" ","_")
      self.qualifier = content.lower()
    elif "qualifier" not in content.lower():
      sensor = content.replace(" ","_")
      sensor = sensor.replace(".","_")
      sensor = sensor.lstrip()
      sensor = sensor.rstrip()
      sensor = sensor.rstrip("_")
      import re
      ph_match = re.match("^.*\\s+(ph|pH)\\s+.*", content)
      if "ug/l" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "ug/L")
      elif "degrees_c" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "degrees C")
      elif "sensor_tc" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "sensor TC")
      elif "mg/l" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "mg/L")
      elif "turbidity" in sensor.lower() and "ntu" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "NTU")
      elif "field_cond" in sensor.lower() and "us/cm" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "uS/cm")
      elif "conductivity" in sensor.lower() and "count" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "counts")
      elif "percent" in sensor.lower() and "saturation" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "percent_saturation")
      elif ph_match:
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "pH")
      elif "ppt" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "ppt")
      elif "secchi" in sensor.lower() and "metres" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "secchi_metres")
      elif "hazen" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "hazen")
      elif "cfu/100ml" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "CFU/100ml")
      elif "mv" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "mV")
      elif "100ml_count" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "100ml_count")
      elif "euphotic" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "m")
      elif "sulphate" in sensor.lower() and "g/l" in sensor.lower():
	if self.qualifier is not None:
	  self.add_phenomenon(col_idx, sensor, "g/L")
      else:
	source.getLogger().warn("Unable to determine sensor and uom for sensor: " + content)
      self.qualifier = None
    
      
      
def parse_data_row(meta_data, row_items):
  messages = list()
  if row_items:
    if meta_data.depth_time_idx is not None and meta_data.depth_time_idx in row_items and row_items[meta_data.depth_time_idx]:
      date_time = float(row_items[meta_data.depth_time_idx])
    else:
      date_time = float(row_items[meta_data.date_time_idx])
    timestamp = xl_date().joda_iso8601_datetime_from_decimal_days(date_time)

    if timestamp is not None:
      if meta_data.start_date is None:
	meta_data.start_date = timestamp
      meta_data.end_date = timestamp
      messages = create_sample_messages(row_items, meta_data, timestamp)
  return messages
      
      
def create_sample_messages(row_items, meta_data, timestamp):
  messages = list()
  for k, v in meta_data.phenomena.iteritems():
    if k in row_items and row_items[k] is not None and row_items[k] != '':
      phen = meta_data.phenomena[k]
      s = Sample()
      s.time = timestamp
      s.value = float(row_items[k])
      s.sensorId = "derwent_estuary_program." + meta_data.site_name + "." + phen["sensor"]
      s.tags.put("uom", phen["uom"])
      
      # Add annotations.
      if meta_data.site_description is not None:
	s.tags.put("site_description", meta_data.site_description)
	
      s.tags.put("sub_program", meta_data.sub_program)

      for col, name in meta_data.annotations.iteritems():
	if col in row_items and row_items[col]:
	  s.tags.put(str(name), str(row_items[col]))
      
      source.getLogger().debug("sample: " + s.toString())
      messages.append(Message(s.sensorId, s))
  return messages
  
  
def create_stat_messages(row_items, meta_data, sample_number_row_items):
  messages = list()
  stat = None
  for k in sorted(row_items.keys()):
    if type(row_items[k]) is UnicodeType:
      stat = str(row_items[k])
      break
      
  if stat:
    for k, v in meta_data.phenomena.iteritems():
      if k in row_items and row_items[k] is not None and row_items[k] != '':
	phen = meta_data.phenomena[k]
	a = Annotation()
	a.sampleFrom = meta_data.start_date
	a.sampleTo = meta_data.end_date
	a.sensorId = "derwent_estuary_program." + meta_data.site_name + "." + phen["sensor"]
	a.tags.put("value", str(row_items[k]))
	a.tags.put("uom", phen["uom"])
	a.tags.put("sample_number", str(sample_number_row_items[k]))
	
	if meta_data.site_description is not None:
	  a.tags.put("site_description", meta_data.site_description)
	
	a.tags.put("sub_program", meta_data.sub_program)
	  
	for col, name in meta_data.annotations.iteritems():
	  if col in row_items and row_items[col] is not None and row_items[col] != '':
	    a.tags.put(str(name), row_items[col])

	source.getLogger().debug("annotation: " + a.toString())
	messages.append(Message(a.sensorId, a))
  return messages
    
    
    
    
    
    
    
    
  