from Table import *
from pprint import pprint
import random
from divide import *

def csv(x) : 
  return '../data/' + x + '.csv'

def eg1(file=csv('weather')):
  t= Table().slurp(file)
  print t.meta
  for x in t.meta:
    pprint(vars(x))

def eg2(file=csv('weathern')):
  t= Table().slurp(file)
  print t.meta
  for x in t.meta:
    pprint(vars(x))

def eg3(file=csv('autompg100K')):
  "Testing loading 100,000 instances. Takes a few secs." 
  print 'Slurping ' + file + '...'
  t = howlong(lambda: Table().slurp(file))
  print  'Load time: ' + str(round(t,2)) + ' seconds.'

def eg4(file=csv('sizes')):
  "Spliting data into a tree of tables: on very simple data."
  random.seed(1)
  t = Table().slurp(file)
  d = idea(Divide(t))
  d.hprint()

def eg5():
  "Spliting data into a tree of tables: on more complex data."
  eg3(csv('nasa93'))

def eg6(file=csv('albrecht')):
  """
  Comparing boring old nearest neighbor vs NN or the leaf cluster
  nearer the test isntance via a LOO study. Boring wins, I suspect
  since this data set is so small that spectral learning cannot find
  interesting structures. Next step: all the other reductions!
  """
  random.seed(1)
  out1=[]
  out2=[]
  for (test,actual,train) in leaveOneOut(file):
    d= Divide(train)
    i = idea(d)
    if not out1:
      i.hprint()
    predictor1 = d.nearest(test).table
    predictor2 = d.table
    nearest1   = predictor1.nearest(test)
    nearest2   = predictor2.nearest(test)
    predicted1 = nearest1[-1]
    predicted2 = nearest2[-1]
    mar1       = abs(predicted1 - actual)/actual
    mar2       = abs(predicted2 - actual)/actual
    out1.append(mar1)
    out2.append(mar2)
  report = lambda x: percentiles(x,10,
                                 [1,3,5,7,9],
                                 filter=lambda y: int(y*100))
  print "us  ",report(out1)
  print "them",report(out2)


