#!/usr/bin/python2.4
#
# Copyright 2007 Dobromir Montauk. All Rights Reserved.

"""This module implements the functions for changing data into various
text-friendly states (like CSV, HTML tables...)
"""

__author__ = """dmontauk@gmail.com (Dobromir Montauk)"""

import csv
import StringIO
from dashomatic import BadDataError

def InsertOrGrowList(mutable_list, position, item, padding):
  """Take a list and grow it to the appropriate position for putting in "item".
  
  If the list is longer than necessary, we just change mutable_list[position] to
  contain this item.
  
  Args:
    mutable_list: the list to change.
    position: the position to place "item" in.
    item: some item to place in "position"
    padding: what to put between the last list size, and "position"
  Returns:
    mutable_list with length position+1 (at least) and item in "position"
  Throws:
    Nothing
  """
  assert position >= 0,"You must pass in a positive position."
  if len(mutable_list) <= position:
    for i in range(len(mutable_list),position+1):
      mutable_list.append(padding)
  mutable_list[position] = item
  return mutable_list

def ListOfDictsToCsv( data_list, headers=None, strict=False, empty="",
                      dialect="excel"):
  l = ListOfDictsToListOfLists(data_list,headers,empty)
  return ListOfListsToCsv(l,strict,empty,dialect)

def ListOfDictsToHtml(data_list, headers=None, strict=False, empty="",
                      table_name=""):
  raise AssertionError("Not implemented")
  
def ListOfListsToCsv(data_list, strict=False,empty="", dialect="excel"):
  l = NormalizeListOfLists(data_list,strict,empty)
  writer = StringIO.StringIO()
  csv_writer = csv.writer(writer,dialect)
  csv_writer.writerows(l)
  value = writer.getvalue()
  writer.close()
  return value
  
def ListOfListsToHtml(data_list, strict=False, empty="", table_name=""):
  raise AssertionError("Not implemented")
  
def ListOfDictsToListOfLists(data_list, headers=None, empty=""):
  """Take a list of dicts and turn it into a list of lists.
  
  We process the headers of the dicts as we move through the list (unless they
  get passed in, which is faster). New headers basically "add" columns.
  
  Args:
    data_list: a list of dicts
    headers: a list of the headers we should look for.
  Returns:
    A list of list (NOT normalized!)
  Throws:
    BadDataError if one of the dicts doesn't have a header in "headers" 
  """
  ordered_headers = {}
  header_position = 0
  output = []
  if headers:
    for header_position in range(0,len(headers)):
      ordered_headers[headers[header_position]] = header_position

  for d in data_list:
    new_row = []
    loop_headers = headers or d.keys()  # If we have headers, use those.
    for header in loop_headers:
      if not d.has_key(header):
        raise BadDataError("Header %s missing in dict %s" % (header,d))
      position = ordered_headers.get(header)
      if position is None:
        ordered_headers[header] = header_position
        position = header_position
        header_position += 1
      InsertOrGrowList(new_row,position,d[header],empty)
    output.append(new_row)
    
  # We want to append the headers as our first row.
  if not headers:
    headers = []
    for header in ordered_headers:
      InsertOrGrowList(headers,ordered_headers[header],header,None)
  output.insert(0,headers)
  return output
  
def NormalizeListOfLists(data_list, strict=False, empty=""):
  """Take a list of lists and make sure each sublist has the same length.
  
  Args:
    data_list: a list of lists.
    strict: bool. If true, throw an error if one of the lengths is different.
    empty: string. What to pad at the end of the lists of strict=False.
  Returns:
    A list of lists.
  Throws:
    BadDataError if strict=True and the lists have the wrong size.
    ValueError if the list of lists is empty.
  """
  def PadList(l, length, padding):
    for i in range(len(l),length):
      l.append(padding)
    return l
    
  def PadListFunc(length, padding):
    return lambda l: PadList(l,length,padding)
    
  if data_list == []: return data_list
  max_length = max(map(len,data_list))
  min_length = min(map(len,data_list))
  if strict:
    if max_length != min_length:
      raise BadDataError("Your list of lists has different length sublists.")
    else:
      return data_list
  else:
    return map(PadListFunc(max_length,empty),data_list)
    
def NormalizedDataRange(data, max_value=100, min_value=0):
  """Takes a list of numbers and reformats them to be between 0 & 100 (1 decimal
  floats).
  
  TODO(dobromirv): Do we support negative numbers?
  
  Args:
    data: data to be normalized. Can be a nested list of numbers as well.
  Returns:
    A list of values between 0 & 100
  Throws:
    Nothing
  """
  if isinstance(data[0],list):
    return NormalizeNestedListOfData(data,max_value,min_value)

  if len(data) == 0:
    return data
  # Replace all the "None"s with "0". Feature or bug?
  while data.count(None):
    data[data.index(None)] = 0
  if min(data) < 0:
    raise BadDataError("We don't currently support data < 0. Data: %s" % data)
  if max(data) <= 0:
    # There's a bug in pygooglechart which doesn't like *floats* for zeros.
    # So we return ints.
    return [0 for d in data]
  scaling_factor = (max_value-min_value)/max(data)
  return [round(d * scaling_factor,1) for d in data]

def NormalizeNestedListOfData(nested_list, max_value=100, min_value=0):
    """
    Takes a list of numbers and reformats them to be between 0 & 100 
    (1 decimal floats).
    Args:
      data: data to be normalized
    Returns:
      A list of values between 0 & 100
    Throws:
      None
    """
    if len(nested_list) == 0:
      return nested_list
    for data in nested_list:
      while data.count(None):
        data[data.index(None)] = 0
      if min(data) < 0:
        raise BadDataError("We don't support data < 0. Data: %s"%data)
      if max(data) <= 0:
        for d in data:
          d = 0
    max_value_in_nested_list = round(max(max(nested_list)),2)
    min_value_in_nested_list = round(min(min(nested_list)),2)

    if max_value_in_nested_list:
      scaling_factor = 100/float(max_value_in_nested_list)
    else:
      for current_list in nested_list:
        nested_list[nested_list.index(current_list)] = [1 for item
                                                  in current_list] 
      return nested_list

    for data in nested_list:
      nested_list[nested_list.index(data)] = [round((d*scaling_factor),2) 
                                              for d in data]    
    return nested_list

def RollUpData(data, group_by, roll_up, filter_by):
  """Takes data, the "group by" columns, and the "roll up" columns, and returns
  aggregate data. See the unit tests for examples.
  
  Args:
    data: a list of lists, [ [], [], ]
    group_by: int, the column to group by. 
    roll_up: list of ints, the columns to sum up.
    filter_by: dict like {0:"3",1:12}. Which columns to filter by, and what
               values. If you pass in a list, each item in the list is deemed
               acceptable.
    divide: [[1,2],[2,3]] It's necessary to divide columns to calculate things
    like CTR,CPM,RPMs. The divide should have a list of lists stating the necessary
    columns to manipulate.
  Returns:
    A list of lists
  Throws:
    BadDataError if something is wrong with the list.
                                                                                                                                                            
  """
  temp_data = {} #the dictionary we use for groupBy
  return_var = [] # the final list being returned
  not_remove = [] # columns that will not be removed
  
  if not len(data):
    return data

  if group_by is None:
    temp_data = []
    for row in data:
      pass_filters = True
      for key,value in filter_by.iteritems():
        if isinstance(value,(list,tuple)):
          if row[key] not in value:
            pass_filters = False
        else:
          if row[key]!= value:
            pass_filters = False
      if pass_filters:
        temp_data.append(row)
    return temp_data
  else:
    for row in data:
      pass_filters = True
      for key,value in filter_by.iteritems():
        if isinstance(value,(list,tuple)):
          if row[key] not in value:
            pass_filters = False
        else:
          if row[key] != value:
            pass_filters = False
      if pass_filters:
        for row_to_sum in roll_up:
          if temp_data.has_key(row[group_by]):
            if temp_data[row[group_by]].has_key(row_to_sum):
              temp_data[row[group_by]][row_to_sum]+=row[row_to_sum]
            else:
              temp_data[row[group_by]][row_to_sum] = row[row_to_sum]
          else:
            temp_data[row[group_by]] = {}
            temp_data[row[group_by]][row_to_sum]  = row[row_to_sum]
    for list_item in temp_data.iteritems():
      temp_list = [list_item[0]] # this is the group by item
      for entry in list_item[1].itervalues():
        temp_list.append(entry)
      #insert this in the Back of returnVar
      #to insure that order is preserved
      return_var.insert(len(return_var)+1,temp_list)
    return return_var
