import csv, pickle

"""
Load data from outlays.csv into a nested dictionary
Save data structure to data.pkl in a pickle format (python serialization fromat)
so that later scripts can use this data without  reparsing the csv.
"""

MaxRows = 1000000   #Set this to a smaller number to load only the first n records
fieldNames = []     #column names in the first row of the csv data source
data = {}           #dictionary that stores data from csv file


#Load comma separted text data from Federal Budget: 1962-2009:
#Source: http://www.whitehouse.gov/omb/budget/fy2009/db.html
reader = csv.reader(open("outlays.csv", "rb"), delimiter=',', quoting=csv.QUOTE_MINIMAL)

for rowNum, row in enumerate(reader):        

    #First row contains column names
    if rowNum == 0:
        for colNum, col in enumerate(row):
            fieldNames.append(col)  #Get Column Names

    #Load all remaining rows into nested dictionary: data
    if (rowNum > 0) and (rowNum < MaxRows):

        print rowNum

        print "Agency: " , row[fieldNames.index("Agency name")]
        print "Bureau: " , row[fieldNames.index("Bureau name")]
        print "Account: " , row[fieldNames.index("Account name")]
        print "Subfunction: " , row[fieldNames.index("Subfunction title")]
        print "2007: " , row[fieldNames.index("2007")]
        
        #Category Level 1: Agency 
        if not  data.has_key( row[fieldNames.index("Agency name")] ):
            data[ row[fieldNames.index("Agency name")] ] = {}
            Agency = True

        #Category Level 2:  Bureau 
        if not data[ row[fieldNames.index("Agency name")] ].has_key(row[fieldNames.index("Bureau name")]):
            data[ row[fieldNames.index("Agency name")] ] [row[fieldNames.index("Bureau name")]] = {} 
            Bureau = True

        #Category Level 3: Account
        if not data[ row[fieldNames.index("Agency name")]] [row[fieldNames.index("Bureau name")]].has_key(row[fieldNames.index("Account name")]) :
            data[ row[fieldNames.index("Agency name")] ] [row[fieldNames.index("Bureau name")]] [row[fieldNames.index("Account name")]] = {}
            Account = True
        
        #Category Level 4: Subfunction
        if not data[ row[fieldNames.index("Agency name")]] [row[fieldNames.index("Bureau name")]] [row[fieldNames.index("Account name")]].has_key(row[fieldNames.index("Subfunction title")]):
            data[ row[fieldNames.index("Agency name")]] [row[fieldNames.index("Bureau name")]] [row[fieldNames.index("Account name")]] [row[fieldNames.index("Subfunction title")]] = {}
        
        #Dollar Values per Year
        for year in range(1962, 2010):
            data[ row[fieldNames.index("Agency name")]] [row[fieldNames.index("Bureau name")]] [row[fieldNames.index("Account name")]] [row[fieldNames.index("Subfunction title")]][year] = row[fieldNames.index(str(year))]
        

#Save data structure to a file which other scripts can use
#Use pickel serialization module: http://www.network-theory.co.uk/docs/pytut/pickleModule.html
output = open('outlays.pkl', 'wb')
pickle.dump(data, output)
pickle.dump(fieldNames, output)
output.close()


#Demonstrate how the nested dictionary can be looped through
for agency in sorted(data):
    print " > ", agency
    
    for bureau in sorted(data[agency]):    
        print  "    >> ", bureau
        
        for account in sorted(data[agency][bureau]):        
            print "         >>> ", account
            
            for subfunction in data[agency][bureau][account]:            
                print "           >>>> ", subfunction

                #We'll comment out the year values for now because it can clutter the output
                #for year, val in data[agency][bureau][account][subfunction].iteritems():                
                #    print "              >>>> ", year, val
