# -*- coding: utf-8 -*-
""" The LAS Object PointDataRecords module.

  :Author:
    - 2010-2012 Nicola Creati and Roberto Vidmar

  :Revision:  $Revision: 2 $
              $Date: 2012-10-22 15:55:02 +0200 (Mon, 22 Oct 2012) $

  :Copyright: 2010-2012
              Nicola Creati <ncreati@inogs.it>
              Roberto Vidmar <rvidmar@inogs.it>

  :License: MIT/X11 License (see :download:`LICENSE.txt
                             <../../LICENSE.txt>`)
"""

import numexpr as ne
import numpy as np
import mmap
# Local imports
from core import PointDataRecord
from lasExceptions import (FileCreationError, InvalidPointDataFormatID,
  MandatoryFieldMissing, UnimplementedException)


# Functions:
def sumLists(l1, l2):
  """ Return element by element sum of the two lists

    :param l1: fisrt list to add
    :type  l1: list
    :param l2: second list to add
    :type  l2: list
    :returns: element by element sum of l1 and l2
    :rtype: list

    .. warning:: the returned list will have the same number of elements of the
       shortest one
  """
  return map(sum, zip(l1, l2))

def checkForMandatoryFields(points):
  """ Check if points object has the right structure

    :param points: object
    :type  points: :class:`dict`, :class:`numpy.ndarray`, :class:`numpy.memmap`
    :raises: :class:`~ALASpy.lasExceptions.MandatoryFieldMissing`
  """
  if isinstance(points, dict):
    missingFields = set("XYZ").difference(set(points.keys()))

  else:
    missingFields = set("XYZ").difference(set(points[0].dtype.names))

  if missingFields:
    msg = ("Mandatory field(s) %s missing in points." %
      str(tuple(missingFields)))
    raise MandatoryFieldMissing(msg)

def hasGPSTime(points):
  """ Return True if points object has GPSTime field

    :param points: object
    :type  points: :class:`dict`, :class:`numpy.ndarray`, :class:`numpy.memmap`
    :returns: True if points object has GPSTime field
    :rtype: bool
  """
  if isinstance(points, dict):
    missingFields = set(('GPStime', )).difference(set(points.keys()))
  else:
    missingFields = set(('GPSTime', )).difference(set(points[0].dtype.names))

  if missingFields:
    return False
  else:
    return True

#===============================================================================
class Points(object):
  """ The Point Data Records object class
  
      
  
  """
  def __init__(self, parent):
    """ Create a new instance of the WHOLE dataset

      :param parent: LasObject
      :type parent: :class:`~ALASpy.las.LasObject`

      .. note:: No memory will be allocated yet
    """
    self._parent = parent
    self._header = parent.header()
    self._stopped = False    # is it necessary? Ask Nik.
    self._mapFile = None     # Memory mapped array
    self._removedRecords = None # Number of records removed while reading
    
    self._pointDataRecord = PointDataRecord(self._header)
    
    if parent.isReadMode():
    # Experimental map file support
        self._mapFile = mmap.mmap(self._header.fid().fileno(), 0, access = mmap.ACCESS_READ)
        inputDtype = self.pointDataRecordDtype('all')
        self._data = np.frombuffer(self._mapFile, inputDtype, 
                            offset = self._header.offsetToPointData())
    else:
        self._data = None
    
  def data(self):
      """ Return mapped file view of las points records. """
      return self._data
      
  def pointDataRecordDtype(self, *args, **kargs):
    """ Return Point Data Record Dtype

      :returns: Point Data Record Dtype
      :rtype: :class:`numpy.dtype`
    """
    return self._pointDataRecord.makeDtype(*args, **kargs)

  def pointDataRecordSize(self):
    """ Return Point Data Record size in bytes

      :returns: Point Data Record size in bytes
      :rtype: int
    """
    return self._pointDataRecord.sizeBytes()

  def __repr__(self):
    cname = self.__class__.__name__
    if self._parent.isReadMode():
        s = "Point Data Record\n"
        s += " Mode: read\n"
        s += " PointDataRecord version: %d\n" % self._header.pointDataFormatID()
        s += " N.points: %d" % self._header.numPointRecords()
    else:
        s = "Point Data Record\n"
        s += " Mode: write\n"
    return s

  def __getitem__(self, index):
    """ Return PointDataRecord at index

      :param index: index of point to retrieve
      :type index: int
      :returns: PointDataRecord instance at index
      :rtype: :class:`~ALASpy.header.PointDataRecord`
    """
    if not self._header.isReadMode():
      raise IOError("%s not opened for reading" %
        self._header.__class__.__name__)

    return PointDataRecord(self._header, self._data[index], index)


  def _makeDtypes(self, fields, asFloat, withGPS):
    """ Return dtype both for input and output

      :param fields: List or tuple of field names (strings) from
                     header pointDataRecordDtype() method
                     or: the string 'all'
                     or: the string 'short', shortcut for
                       (X(f4),Y(f4),Z(f4),Intensity, Classification)
                       In this case only (X, Y, Z, Intensity, Classification)
                       will be returned. If offset parameter is not None
                       X, Y, Z values returned will be float32
      :type  fields: tuple or string
      :param asFloat: generate dtype float instead of double for X, Y, Z
      :type  asFloat: bool
      :param withGPS: generate dtype with GPSTime field
      :type  withGPS: bool
      :returns: inputDtype, outputDtype
      :rtype: tuple of numpy.dtype
    """
    if fields in ('all', 'short'):
      inputDtype = self.pointDataRecordDtype('all')
    else:
      inputDtype = self.pointDataRecordDtype(fields)
    
    # Short output, we don't care about all fields...
    if asFloat:
      floatFormat = '<f4'
    else:
      floatFormat = '<f8'

    if fields == 'short':
      outputDtype = [('X', floatFormat), ('Y', floatFormat), ('Z', floatFormat),
        ('Intensity', '<u2'), ('Classification', '|u1')]
      if withGPS:
        outputDtype.append(('GPSTime', '<f8'))
      outputDtype = np.dtype(outputDtype)
    else:
      descr = inputDtype.descr
      for i, f in enumerate(descr):
        if f[0] in set("XYZ"):
          descr[i] = (f[0], floatFormat)
      outputDtype = np.dtype(descr)
    return inputDtype, outputDtype

  def pointDataRecordDtype(self, *args, **kargs):
    """ Return Point Data Record Dtype

      :returns: Point Data Record Dtype
      :rtype: :class:`numpy.dtype`
    """
    return self._pointDataRecord.makeDtype(*args, **kargs)

  def pointDataRecordSize(self):
    """ Return Point Data Record size in bytes

      :returns: Point Data Record size in bytes
      :rtype: int
    """
    return self._pointDataRecord.sizeBytes()

  def _rescaleXYZ(self, data, offsets, asFloat):
    """ Rescale x, y, z fields in data to their original value,
        *subtracting* optionally offset tuple from x and y.

      :param data: dataset to rescale
      :type  data: numpy.ndarray
      :param offsets: Offsets tuple to apply to data.
                      These value will be *subtracted*
                      New x = x - offsets[0], New y = y - offsets[1]
                      (default is to apply offsets from Header)
      :type  offsets: tuple
      :param asFloat: generate dtype float instead of double for X, Y, Z
      :type  asFloat: bool
      :returns: xyz tuple
      :rtype: tuple
    """
    # Get scales from Header
    scales = self._header.scaleFactors()
    # Do not move zero
    zeros = (0., 0., 0.)
    if offsets is None:
      # Get offsets from Header
      offsets = self._header.offsets()
      if asFloat:
        zeros = (self._header.xmin(), self._header.ymin(), 0)

    xyz = tuple()
    for i, scale, offset, zero in zip("XYZ", scales, offsets, zeros):
      xyz += (ne.evaluate('((c * scale) + offset) - min', local_dict=dict(
        c=data[i], scale=scale, offset=offset, min=zero)), )
    return xyz

  def  _data2points(self, data, totalRecsRead, xyz, points, asFloat, outputDtype):
    """ Move data to final destination

      :param data: dataset to rescale
      :type  data: numpy.ndarray
      :param totalRecsRead: total number of records read
      :type  totalRecsRead: int
      :param xyz: xyz tuple
      :type  xyz: tuple
      :param points: points object
      :type  points: dict, numpy.array or numpy.memmap
      :returns: points object
      :rtype: unchanged: dict, numpy.array or numpy.memmap
    """
    #-----------------------------------------------------------------------
    def getData(n):
      """ Return the array for field name `n` of the data object
          or a zero filled array of the same size if data has no field `n`
      """
      if n in data.dtype.names:
        return data[n]
      else:
        nDtype = [f[1] for f in points.dtype.descr if f[0] == n][0]
        return np.zeros(shape=data['X'].shape, dtype=nDtype)
    #-----------------------------------------------------------------------
    recsValid = data.shape[0]
    xyzSet = set("XYZ")
    if isinstance(points, dict):
      if not points.has_key('X'):
        # dict is empty!
        for c, i in zip(xyz, "XYZ"):
          if asFloat:
              points[i] = c.astype('f4')
          else:
              points[i] = c
        # Move remaining fields
        for n in set(outputDtype.names).difference(xyzSet):
          if n == 'Classification':
            points[n] = getData(n) & 0b11111
          elif (n == 'GPSTime') and (self._header.versionMinor() == 2):
            if self._header.globalEncoding() == 1:
              points[n] = getData(n) - 1E9
          else:
            points[n] = getData(n)
      else:
        # dict initialized
        for c, i in zip(xyz, "XYZ"):
          points[i] = np.append(points[i],
            c.astype(data.dtype.fields[i][0].type))

        # Move remaining fields
        for n in set(points.keys()).difference(xyzSet):
          points[n] = np.append(points[n], getData(n))
          
    else:
      # No dict, ndarray or mmaped array
      for c, i in zip(xyz, "XYZ"):
        points[i][totalRecsRead: totalRecsRead + recsValid] = c
      # Copy remaining fields
      for n in set(outputDtype.names).difference(xyzSet):
        points[n][totalRecsRead: totalRecsRead + recsValid] = getData(n)

    return points

#-------------------------------------------------------------------------------
# Public methods
#-------------------------------------------------------------------------------
  def readTo(self, recStart=0, recEnd=-1, skip=None, recsPerChunk=None,
    offsets=None, fields='all', unique=False, withGPS=False, pointsDtype=dict,
    asFloat=False):
    """ Read all points and return a Python container according to
        `pointsDtype`

      :param recStart: Record number to start from (default 0)
      :type  recStart: int
      :param recEnd: Last record to read,  default is -1, the number of
                     records from the header
      :type  recEnd: int
      :param skip: Integer number of records to skip while reading
      :type  skip: int
      :param recsPerChunk: Read file in recsPerChunk chunks
      :type  recsPerChunk: int
      :param offsets: Offsets tuple to apply to data.
                      These value will be *subtracted*
                      New x = x - offsets[0], New y = y - offsets[1]
                      (default is to apply offsets from Header)
      :type  offsets: tuple
      :param fields: * List or tuple of field names (strings) from
                       header pointDataRecordDtype() method or:
                     * the string 'all' or:
                     * the string 'short', shortcut for
                       (X(f4), Y(f4), Z(f4), Intensity, Classification)
                       In this case only (X, Y, Z, Intensity, Classification)
                       will be returned. If offset parameter is not None
                       X, Y, Z values returned will be float32
      :type  fields: tuple or string
      :param unique: If True remove duplicate data points (only for numpy
                     objects)
      :type  unique: bool
      :param withGPS: If True adds GPS data to short output
      :type  withGPS: bool
      :param pointsDtype: Points will be saved according to argument type:

       * :class:`dict`        -> Python dictionary
       * :class:`numpy.array` -> numpy ndarray
       * :class:`string`      -> numpy memory mapped array
         file name
      :type  pointsDtype: :class:`dict`, :class:`numpy.array` or :class:`string`
      :param asFloat: if True output format of X, Y, Z fields will be float (f4)
                      instead of double (f8).
                      In this case an offset of None means that offset will be
                      computed from Header xmin, ymin, zmin
      :type  asFloat: bool
      :raises: :class:`~ALASpy.lasExceptions.FileCreationError`,
               :class:`~ALASpy.lasExceptions.UnimplementedException`,
               :class:`IOError`
    """
    if not self._header.isReadMode():
      raise IOError("%s not opened for reading" %
        self._header.__class__.__name__)

    # If the point data format is not 1.1 or 1.3 GPS field
    # is missing from records
    if withGPS and (self._header.pointDataFormatID() not in (1, 3)):
      withGPS = False
    # Create data types for reading and saving points
    inputDtype, outputDtype =  self._makeDtypes(fields, asFloat, withGPS)

    numPointRecords = self._header.numPointRecords()
    if (recEnd == -1) and (recStart == 0):
      recsToRead = numPointRecords
    elif (recEnd == -1) and (recStart > 0):
      recsToRead = numPointRecords  - recStart
    else:
      recsToRead = recEnd  - recStart

    output_record_count = recsToRead if not skip else (
      1 + recsToRead / skip)
    
    if pointsDtype.__class__ != str:    
        # Allocate memory to hold all the points according to data type
        if ((pointsDtype.__name__ == np.ndarray.__name__) or
          isinstance(pointsDtype, np.ndarray)):
          # Create return empty ndarray
          points = np.empty(shape=(output_record_count,),
            dtype=outputDtype)
        elif (pointsDtype.__name__ == dict.__name__) or isinstance(pointsDtype, dict):
          # Create return empty dictionary
          points = {}
    else:
      # Create an empty memory mapped ndarray
      try:
        points = np.memmap(pointsDtype, dtype=outputDtype,
          mode='w+', shape=output_record_count)
      except IOError, e:
        msg = ("Cannot create numpy memory mapped file: %s" % e)
        raise FileCreationError(msg)
      else:
        self._mapFile = pointsDtype
    
    # Compute number of record to read at each iteration
    if recsPerChunk:
      # We want to read chunkwise:
      recsPerChunk = min(recsToRead, int(recsPerChunk))
    else:
      # Read "Hole in one"
      recsPerChunk = recsToRead

    info = ('Reading', self._parent.filename())
    self._parent.progress(0, info)

    # File reading loop, executed once in case nRecs is large enough
    remainingRecords = recsToRead
    numChunks = 0 # Number of data chunks read
    totalRecsRead = 0
    while remainingRecords and not self._stopped:
      data = self._data[totalRecsRead:(totalRecsRead+recsPerChunk)]
      
      recsReadThisCunk = data.shape[0]
      numChunks += 1

      # Apply decimation if set
      data = data[::skip]
      # Rescale X, Y, Z
      xyz = self._rescaleXYZ(data, offsets, asFloat)
      
      # Move data chunk into points
      points = self._data2points(data, totalRecsRead, xyz, points, asFloat, outputDtype)
      
      # Save number of records actually read
      totalRecsRead += recsReadThisCunk
      remainingRecords -= recsReadThisCunk

      # Adjust recsPerChunk
      recsPerChunk = min(recsPerChunk, remainingRecords)

      progress = 1. * totalRecsRead / numPointRecords
      self._parent.progress(progress, info)

    self._removedRecords = 0
    if unique:
      if type(pointsDtype) is type(dict) or isinstance(pointsDtype, dict):
        raise UnimplementedException(
          "Cannot remove duplicate records from dict objects.")
      else:
        # Remove duplicates
        points = np.unique(points)
        self._removedRecords = totalRecsRead - points['X'].shape[0]
    return points

  def removedRecords(self):
    """ Return the number of removed records while reading in case of
        duplicate records

      :returns: number of removed records while reading
      :rtype: int or None
    """
    return self._removedRecords

#-------------------------------------------------------------------------------
  def write(self, points, recStart=0, recEnd=-1, skip=None, recsPerChunk=None,
    offsets=None):
    """ Write LAS Header, Variable Length Records **AND** binary record(s)
        to file and leave file **CLOSED**

      :param points: either
                     Recarray, Memory Mapped Array or dict object containing
                     LASER points
                     or
                     list or tuple of the same objects.
      :type  points: numpy.ndarray, numpy.memmap or dict
      :param recStart: Record number to start from (default 0)
      :type  recStart: integer
      :param recEnd: Last record to write,  (default is -1, the number of
                     records in the `points` object)
      :type  recEnd: integer
      :param skip: Integer number of records to skip while writing.
                   Resample output so that the number of points written
                   is equal to (num of points) / skip (default None)
      :type  skip: integer
      :param recsPerChunk: Write file in recsPerChunk chunks (default None)
                          (Useful with large datasets)
      :type  recsPerChunk: integer
      :param offsets: offsets tuple to apply to x, y, z data (default None)
      :type  offsets: tuple
      :raises: :class:`~ALASpy.lasExceptions.InvalidPointDataFormatID`,
               :class:`ValueError`

      .. warning:: Leaves file **CLOSED**
    """
    # Check for mandatory fields
    checkForMandatoryFields(points)

    if recEnd == -1:
      # End is last point of dataset
      recEnd = points['X'].size

    # Sanity check
    if recEnd < recStart:
      raise ValueError("start (%d) must be less than recend (%d)" %
        (recStart, recEnd))

    # Compute number of output records
    recsToWrite = points['X'][
      recStart: recEnd: skip].shape[0]

    # Compute number of record to write at each iteration
    if recsPerChunk:
      # We want to write chunkwise:
      recsPerChunk = int(min(recsToWrite, recsPerChunk))
    else:
      # Write "Hole in one"
      recsPerChunk = recsToWrite

    if hasGPSTime(points) and (
      self._header.pointDataFormatID() in (0, 2)):
      raise InvalidPointDataFormatID(
        "PointDataFormatID is %d but dataset *HAS* GPS Time info." %
        self._header.pointDataFormatID())

    # Create output structure
#    lasDtype = self._header.pointDataRecordDtype()
    lasDtype = self.pointDataRecordDtype()
    nonMandatoryFields = set(lasDtype.names).difference(set('XYZ'))
    output = np.zeros(shape=recsToWrite, dtype=lasDtype)

    min_xyz = [points[i].min() for i in "XYZ"]
    max_xyz = [points[i].max() for i in "XYZ"]

    # Set offsets in Header
    if offsets == None:
      offsets = min_xyz
    self._header.setOffsets(*offsets)

    scaleFactors = self._header.scaleFactors()
    for i, f in enumerate("XYZ"):
      output[f] = (np.round((
        points[f][recStart: recEnd: skip] - offsets[i]) /
        scaleFactors[i])).flatten()

    # NON MANDATORY FIELDS:
    for f in nonMandatoryFields:
      if isinstance(points, dict):
        if points.has_key(f):
          output[f] = points[f][recStart:recEnd:skip]
      else:
        if f in points.dtype.names:
          output[f] = points[f][recStart:recEnd:skip]

    # Update Header
    self._header.setRanges(
      sumLists(min_xyz, offsets), sumLists(max_xyz, offsets))
    self._header.setNumPointRecords(output.shape[0])

    if self._header.pointDataFormatID() < 6:
      byteData = output['AByte'][recStart:recEnd:skip]
    else:
      byteData = output['2Bytes'][recStart:recEnd:skip]
    # Count points by return number
    returnNumbers = PointDataRecord.getReturnNumber(byteData)
    numPoints = [(returnNumbers == i + 1).sum()
      for i, dummy in enumerate(self._header.numPointsByReturn())]
    # Update header
    self._header.setNumPointsByReturn(numPoints)

    #######################################
    # RIEN NE VA PLUS! Write Header and VLR
    #######################################
    info = ('Writing', self._parent.filename())
    self._parent.progress(0, info)
    self._header.write()

    # File writing loop, executed once in case nRecs is large enough
    remainingRecords = recsToWrite

    recsWritten = 0
    while remainingRecords:
      # Adjust recsPerChunk
      recsPerChunk = min(recsPerChunk, remainingRecords)
      start = recsToWrite - remainingRecords
      end = start + recsPerChunk
      output[start: end].tofile(self._parent.fid())
      # Save number of records actually read
      recsWritten += recsPerChunk
      remainingRecords -= recsPerChunk
      # Memory mapped array?
      progress = 1. * recsWritten / recsToWrite
      self._parent.progress(progress, info)
    self._parent.fid().close()

#-------------------------------------------------------------------------------
  def mapFile(self):
    """ Return name of Memory Mapped file or None

      :return: Name of Memory Mapped file
      :rtype: String or None
    """
    return self._mapFile

  