"""
Occam's knife (rapid recursive spectral learning)
(c) 2012, Tim Menzies, tim@menzies.us, LGPL v3.0
Cropping complexity, culling confusion, and cutting the crap since 2012
   
           .----------------._________________
           |=()=========---  \-----------     `\ 
      jgs  \_________________/^^^^^^^^^^^^^^^^^^

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

import random
from pprint import pprint
from lib    import *
from dict   import *
from thing  import *
from about  import *
from table  import *
from divide import *
  
def eg1(file='data/weather.csv'):
  t= Table().slurp(file)
  print t.meta
  for x in t.meta:
    pprint(vars(x))

def eg2(file='data/autompg100K.csv'):
  "Testing loading 100,000 instances. Takes a few secs." 
  Table().slurp(file)

def eg3(file='data/sizes.csv'):
  "Spliting data into a tree of tables: on very simple data."
  random.seed(1)
  t = Table().slurp(file)
  d = idea(Divide(t))
  d.hprint()

def eg4():
  "Spliting data into a tree of tables: on more complex data."
  eg3('data/nasa93.csv')

def eg5(file='data/albrecht.csv'):
  """
  Comparing boring old nearest neighbor vs NN or the leaf cluster
  nearer the test isntance via a LOO study. Boring wins, I suspect
  since this data set is so small that spectral learning cannot find
  interesting structures. Next step: all the other reductions!
  """
  random.seed(1)
  out1=[]
  out2=[]
  for (test,actual,train) in leaveOneOut(file):
    d= Divide(train)
    i = idea(d)
    if not out1:
      i.hprint()
    predictor1 = d.nearest(test).table
    predictor2 = d.table
    nearest1   = predictor1.nearest(test)
    nearest2   = predictor2.nearest(test)
    predicted1 = nearest1[-1]
    predicted2 = nearest2[-1]
    mar1       = abs(predicted1 - actual)/actual
    mar2       = abs(predicted2 - actual)/actual
    out1.append(mar1)
    out2.append(mar2)
  report = lambda x: percentiles(x,10,
                                 [1,3,5,7,9],
                                 filter=lambda y: int(y*100))
  print "us  ",report(out1)
  print "them",report(out2)

if __name__ == '__main__':    
  if hasArgs():
    eval(runArgs()) 
  else:
    egs(eg1,eg2,eg3,eg4,eg5)
