#!/usr/bin/python2.4
#
# Copyright 2007 Dobromir Montauk. All Rights Reserved.

"""This module implements the functions for changing data into various
text-friendly states (like CSV, HTML tables...)
"""

__author__ = """dmontauk@gmail.com (Dobromir Montauk)"""

import csv
import StringIO
from dashomatic import BadDataError

def InsertOrGrowList(mutable_list,position,item,padding):
  """Take a list and grow it to the appropriate position for putting in "item".
  
  If the list is longer than necessary, we just change mutable_list[position] to
  contain this item.
  
  Args:
    mutable_list: the list to change.
    position: the position to place "item" in.
    item: some item to place in "position"
    padding: what to put between the last list size, and "position"
  Returns:
    mutable_list with length position+1 (at least) and item in "position"
  Throws:
    Nothing
  """
  assert position >= 0,"You must pass in a positive position."
  if len(mutable_list) <= position:
    for i in range(len(mutable_list),position+1):
      mutable_list.append(padding)
  mutable_list[position] = item
  return mutable_list

def ListOfDictsToCsv( data_list,headers=None,strict=False,empty="",
                      dialect="excel"):
  l = ListOfDictsToListOfLists(data_list,headers,empty)
  return ListOfListsToCsv(l,strict,empty,dialect)

def ListOfDictsToHtml(data_list,headers=None,strict=False,empty="",
                      table_name=""):
  #return ListOfDictsToCsv(data_list,headers,strict,empty,HtmlDialect)
  raise AssertionError("Not implemented")
  
def ListOfListsToCsv(data_list,strict=False,empty="",dialect="excel"):
  l = NormalizeListOfLists(data_list,strict,empty)
  writer = StringIO.StringIO()
  csv_writer = csv.writer(writer,dialect)
  csv_writer.writerows(l)
  value = writer.getvalue()
  writer.close()
  return value
  
def ListOfListsToHtml(data_list,strict=False,empty="",table_name=""):
  #return ListOfListsToCsv(data_list,strict,empty,HtmlDialect)
  raise AssertionError("Not implemented")
  
def ListOfDictsToListOfLists(data_list,headers=None,empty=""):
  """Take a list of dicts and turn it into a list of lists.
  
  We process the headers of the dicts as we move through the list (unless they
  get passed in, which is faster). New headers basically "add" columns.
  
  Args:
    data_list: a list of dicts
    headers: a list of the headers we should look for.
  Returns:
    A list of list (NOT normalized!)
  Throws:
    BadDataError if one of the dicts doesn't have a header in "headers" 
  """
  ordered_headers = {}
  header_position = 0
  output = []
  if headers:
    for header_position in range(0,len(headers)):
      ordered_headers[headers[header_position]] = header_position

  for d in data_list:
    new_row = []
    loop_headers = headers or d.keys()  # If we have headers, use those.
    for header in loop_headers:
      if not d.has_key(header):
        raise BadDataError("Header %s missing in dict %s" % (header,d))
      position = ordered_headers.get(header)
      if position is None:
        ordered_headers[header] = header_position
        position = header_position
        header_position += 1
      InsertOrGrowList(new_row,position,d[header],empty)
    output.append(new_row)
    
  # We want to append the headers as our first row.
  if not headers:
    headers = []
    for header in ordered_headers:
      InsertOrGrowList(headers,ordered_headers[header],header,None)
  output.insert(0,headers)
  return output
  
def NormalizeListOfLists(data_list,strict=False,empty=""):
  """Take a list of lists and make sure each sublist has the same length.
  
  Args:
    data_list: a list of lists.
    strict: bool. If true, throw an error if one of the lengths is different.
    empty: string. What to pad at the end of the lists of strict=False.
  Returns:
    A list of lists.
  Throws:
    BadDataError if strict=True and the lists have the wrong size.
    ValueError if the list of lists is empty.
  """
  def PadList(l,length,padding):
    for i in range(len(l),length):
      l.append(padding)
    return l
    
  def PadListFunc(length,padding):
    return lambda l: PadList(l,length,padding)
    
  if data_list == []: return data_list
  max_length = max(map(len,data_list))
  min_length = min(map(len,data_list))
  if strict:
    if max_length != min_length:
      raise BadDataError("Your list of lists has different length sublists.")
    else:
      return data_list
  else:
    return map(PadListFunc(max_length,empty),data_list)
    
def NormalizeDataRange(data,max_value=100.0,min_value=0):
  """Takes a list of numbers and reformats them to be between 0 & 100 (1 decimal
  floats).
  
  TODO(dobromirv): Do we support negative numbers?
  
  Args:
    data: data to be normalized
  Returns:
    A list of values between 0 & 100
  Throws:
    Nothing
  """
  if len(data) == 0:
    return data
  # Replace all the "None"s with "0". Feature or bug?
  while data.count(None):
    data[data.index(None)] = 0
  if min(data) < 0:
    raise BadDataError("We don't currently support data < 0. Data: %s" % data)
  if max(data) <= 0:
    # There's a bug in pygooglechart which doesn't like *floats* for zeros.
    # So we return ints.
    return [0 for d in data]
  scaling_factor = (max_value-min_value)/max(data)
  return [round(d * scaling_factor,1) for d in data]




def RollUpData(data,group_by,roll_up,filter_by):
  """Takes data, the "group by" columns, and the "roll up" columns, and returns
  aggregate data. See the unit tests for examples.
  
  Args:
    data: a list of lists, [ [], [], ]
    group_by: int, the column to group by. 
    roll_up: list of ints, the columns to sum up.
    filter_by: dict like {0:"3",1:12}. Which columns to filter by, and what
               values. If you pass in a list, each item in the list is deemed
               acceptable.
  Returns:
    A list of lists
  Throws:
    BadDataError if something is wrong with the list.
                                                                                                                                                            
  """
  tempData = {} #the dictionary we use for groupBy
  returnVar = [] # the final list being returned
  NotRemove = [] # columns that will not be removed

  if len(data) == 0:
    return data

  if group_by!='':
    for row in data:
      passFilters = 1
      for key,value in filter_by.iteritems():
        if isinstance(value,(list,tuple)):
          if row[key] not in value:
            passFilters = 0
        else:
          if row[key] != value:
            passFilters = 0

      if passFilters:
        for rowToSum in roll_up:
          if tempData.has_key(row[group_by]):
            if tempData[row[group_by]].has_key(rowToSum):
              tempData[row[group_by]][rowToSum]+=row[rowToSum]
            else:
              tempData[row[group_by]][rowToSum] = row[rowToSum]
          else:
            tempData[row[group_by]] = {}
            tempData[row[group_by]][rowToSum]  = row[rowToSum]

    #need to return list of lists. scrape dictionary
    for listItem in tempData.iteritems():
      tempList = [listItem[0]] # this is the group by item
      for entry in listItem[1].itervalues():
        tempList.append(entry)
      #insert this in the Back of returnVar
      #to insure that order is preserved
      returnVar.insert(len(returnVar)+1,tempList)
    return returnVar
