rm(list=ls())
library('sp')
library('maptools')
#PUMA <- '01400' #Knoxville PUMA CODE
# I am no longer using Knoxville, I am using Davidson
# Enter the FIPS codes for the PUMAs in Davidson County
PUMA <- c('02201','02202','02203','02204','02205')


# For each PUMA, we want to 1) Extract the SF3 data and 2) Extract the necessary midrodata

# Section 1. Extracting SF3 data
# I have a little SAS program... Davidson_run.sas, which reads through the .uf3 and .geo files and extracts all the census tract fields for Davidson County, amd saves in a SAS export format as Davidson .xpt.  The file is reproduced below.  Notes afterward


###################################################
###################################################
###################################################

#dm "out;clear;log;clear;";#options xwait;#options mprint mlogic;#/* These are for the computer at the office#libname census 'X:\www2.census.gov\census_2000\datasets\Summary_File_3/Tennessee/SAS';##%INCLUDE 'X:\www2.census.gov\census_2000\datasets\Summary_File_3/SF3SAS/census_SF3.sas';#*/##/* These are for the laptop */#libname census 'Y:\Data\www2.census.gov\census_2000\datasets\Summary_File_3/Tennessee/SAS';#%INCLUDE 'Y:\Data\www2.census.gov\census_2000\datasets\Summary_File_3/SF3SAS/census_SF3.sas';###%census_sf3(#tn,#0 1 6 15 56 57 58 59 60 61,#Y:\Data\www2.census.gov\census_2000\datasets\Summary_File_3\SF3SAS,#Y:\Data\www2.census.gov\census_2000\datasets\Summary_File_3\Tennessee,#C:\Program Files\7-Zip\#)###DATA census.tract(where=(SUMLEV="140" and COUNTY="037"));#MERGE census.geo #	census.SF301 #	census.SF306#	census.SF315#	census.SF356#	census.SF357#	census.SF358#	census.SF359#	census.SF360#	census.SF361#/*	census.SF309*/#;#by LOGRECNO;#RUN;###PROC DATASETS library=census;#save tract;#RUN;##libname  Davidson xport 'Y:\Data\www2.census.gov\census_2000\datasets\Summary_File_3/Tennessee/SAS/Davidson.xpt';#PROC COPY in=census out=Davidson memtype=data;#select Tract;#RUN;


###################################################
###################################################
###################################################

# SAS NOTES: 
#	1) I hate SAS! 
#	2) there is nifty code on the cesus server for extracting 		from .uf3 files... i modified it as census_SF3.sas
#	3) I hate SAS!

# At the end of that SAS script, I have a file called Davidson.csv with the SF3 data.  We can now read this into R.
SF3 <- read.csv('~/Data/www2.census.gov/census_2000/datasets/Summary_File_3/Tennessee/SAS/Davidson.csv')
#SF3 has thousands of columns, columns names are as in the Tech Doc.
# The rows of SF3 are census tracts.

#The SF3 dataframe does not have the code for the PUMA which contains the tract. I will download a shapefile with the necessary PUMAs, and then overlay the lat/lon coords of the tracts on the PUMA SHAPEFILE.  In this way, I can determine which PUMA contains each census tract, and save the PUMA as a new data variable in the tract-level summary file

# Turn SF3 into a Spatial Points DataFrame
# First, we need to construct the coordinate vector.  There are centroids in SF3, but they are characters, have a plus or minus, and the decimal is only implied. 
coord <- data.frame(matrix(nrow=nrow(SF3),ncol=2)) #Initialize a data frame for the coordinates
names(coord) <- c('Lon','Lat') # Assign column names tp the data frame
for(i in 1:nrow(SF3)){ #For each tract
	# Insert the decimal after the 3rd number of INTPTLON, and save as a number and put into dataframe
	coord$Lon[i] <- as.double(paste(substr(SF3$INTPTLON[i],1,3),'.',substr(SF3$INTPTLON[i],4,9),sep=''))
	# Insert the decimal after the 2nd number of INTPTLAT, and save as a number and put into dataframe
		coord$Lat[i] <- as.double(paste(substr(SF3$INTPTLAT[i],1,2),'.',substr(SF3$INTPTLAT[i],3,8),sep=''))
	}

# Create a spatial points dataframe for each tract... the point location is the centroid we just identified.
spSF3 <- SpatialPointsDataFrame(coord,data=SF3,proj4string=CRS("+proj=longlat +datum=NAD27"))

# Now I have a tract-level spatial dataframe, and am ready to overlay it on the PUMA boundaries.  Ooops, we need the PUMA boundaries....

# Read in the PUMA shapefile as unhelpfully titled variable xx
xx <- readShapeSpatial('~/Data/www2.census.gov/geo/cob/bdy/pu/p500shp/p547_d00_shp/p547_d00.shp',   proj4string=CRS("+proj=longlat +datum=NAD27"))
# I read it in from my hard drive, but it is available at http://www.census.gov/geo/www/cob/pu5_2000.html
# A direct ftp address is http://www2.census.gov/geo/tiger/TIGER2009/47_TENNESSEE/


#Create new vector PUMA that contains the 5 digit PUMA code
spSF3$PUMA5<-xx$PUMA5[overlay(spSF3,xx)]
# plot(spSF3) will show the centroid locations

# At this point, spSF3 is a spatial data frame that has all the tract level data AND a new variable called PUMA5 that has teh PUMA containing it.

#############################################
# Section 2: Extracting microdata

# The microdata for the 2000 census is in a real nasty format.  As a brief introduction.... Some variables, such as household income, or tenure, are associated with the house.  Other variables, such as race, are associated with individuals.  Thus, there are two type of records, Household Records, and Person records.  The file structure is fixed width, but the household records have a different format than person records.  All of the household and person records are in the SAME data file.  Suppose the first household has 3 people, the 2nd house is vacant, and the third household has one person, the file would look like this
#H1 data
#P11 data
#P12 data
#P13 data
#H2 data
#H3 data
#H31 data
#H32 data


# The file PUMS5_47.TXT was downloaded from census, and contains the PUMS5 data for the ENTIRE STATE of TN.
fid <- file('~/Data/www2.census.gov/census_2000/datasets/PUMS/Tennessee/PUMS5_47.TXT')
# The original file was at www2.census.gov/census_2000/datasets/PUMS/Tennessee/PUMS5_47.TXT
# On my laptop....
#fid <- file('/Volumes/Macintosh\ HD\ 2/Data/www2.census.gov/census_2000/datasets/PUMS/Tennessee/PUMS5_47.TXT')

# We will need to process this file to get back just those records for our PUMA, and to get the right columns and have them labeled....


# These are all the column names I will be using
colNames <- c( 	'STATE',
				'PUMA5',
				'HWEIGHT',
				'PERSONS',
				'UNITTYPE',
				'VACSTAT',
				'TENURE',
				'BLDGSZ',
				'ROOMS',
				'RENT',
				'VALUE',
				'SVAL',
				'SRNT',
				'HINC',
				'PAOC',
				'HH_PWEIGHT', #Person weight of the householder
				'HH_SEX',
				'PERSONS_PW', #Total of person weights for the entire household
				'WHITE', # White alone
				'BLACK', #Black alone
				'HISPAN',
				'WHITE_PW', #Total of person weights for white persons in the household
				'BLACK_PW', #Total of person weights for white persons in the household
				'HISPAN_PW',
				'HH_WHITE', # Householder is White alone
				'HH_BLACK', # Householder is Black alone
				'HH_HISPAN')

# Most of the variables are in the household records, but the race ones are in the the person records.
# The datastructure I want at the end of the day has one row per household.  This is a 1-1 match for household records, but not for person records.  Suppose I have a dataframe as
# ID	Tenure  Black 	White 	HH_BLACK	HH_WHITE
# HH1	Own		0		1		1			0
# HH2	Rent	1		0		1			0
# HH3	Own		2		1		0			1

#NOTE: Tenure is a household variable, and is easy to get out of the raw data, but Black is the number of people in the household who are black, and needs to come from all of the person records in that household, and HH_BLACK is whether the householder is black, and needs to come from the 1st household in the file (the householder is always the 1st person listed after the house record.)

# Read in the file as a character vector
char.vec <- readLines(fid,n=-1)

# Char.vec is the unprocessed data

# Unfortunately, the main loop is a while loop, so I need to know how many records for our PUMS there will be...
# Find the number of records
Nlines <- length(char.vec)

# Determine the number of housing units in our PUMA
numHouses <- 0 #initialize a counting variable
# For each line, we have a new household if the first column is 'H' 
# and if it is in the PUMA we want (columns 14-18 of the house records)
for (i in 1:Nlines){
   if(substr(char.vec[i],1,1)=='H' &
   		!is.na(match(substr(char.vec[i],14,18),PUMA)))
   numHouses <- numHouses+1
}

#numHouses now tells me how many houses are in the PUMA

#Now I initialize a dataframe
ncol <- length(colNames) # The number of columns we need
PUMS <- data.frame(matrix(0,nrow=numHouses,ncol=ncol)) # an enmpty dataframe
names(PUMS) <- colNames # Assign column names

#############################
# Now comes the hardpart
# On my MacBook Pro, the following while loops takes over an hour to run!
i <- 1 #row iterator for raw data file
row <- 1 #row iterator for output data file (each row is a new household)
while(i<=Nlines) {
	if(!is.na(match(substr(char.vec[i],14,18),PUMA))) { #if this row is in our PUMA, then
		#Collect variables from the house record
	PUMS$STATE[row] <- substr(char.vec[i],10,11)
	PUMS$PUMA5[row] <- substr(char.vec[i],14,18)
	PUMS$HWEIGHT[row] <- as.double(substr(char.vec[i],102,105))
	PUMS$PERSONS[row] <- as.double(substr(char.vec[i],106,107))
	PUMS$UNITTYPE[row] <- substr(char.vec[i],108,108)
	PUMS$VACSTAT[row] <- substr(char.vec[i],111,111)
	PUMS$TENURE[row] <- substr(char.vec[i],113,113)
	PUMS$ROOMS[row] <- substr(char.vec[i],122,122)
	PUMS$RENT[row] <- as.double(substr(char.vec[i],162,165))
	PUMS$VALUE[row] <- substr(char.vec[i],202,203)
	PUMS$BLDGSZ[row] <- substr(char.vec[i],115,116)
	PUMS$PAOC[row] <- substr(char.vec[i],225,225)
	PUMS$SVAL[row] <- substr(char.vec[i],227,227)
	PUMS$SRNT[row] <- substr(char.vec[i],236,236)
	PUMS$HINC[row] = as.double(substr(char.vec[i],251,258))
	#Now collect data from the person records
	# Create temporary variables for number of Blacks, Whites, and Hispanics in the house...
	BLACK_TEMP <- 0
	HISP_TEMP <- 0
	WHITE_TEMP <- 0
	PUMS$WHITE_PW[row] <- 0
	PUMS$BLACK_PW[row] <- 0
	PUMS$HISPAN_PW[row] <- 0
	PUMS$PERSONS_PW[row] <- 0
	if(PUMS$PERSONS[row]>0){ #PUMS$PERSONS[row] is how many persons are in the house
		for(j in 1:PUMS$PERSONS[row]){ #Loop over each person j in the house
			NUMRACE <- substr(char.vec[i+j],31,31) # The number of races for person j
			PWEIGHT <- as.double(substr(char.vec[i+j],13,16)) # Person weight for j
			PUMS$PERSONS_PW[row] <- PUMS$PERSONS_PW[row]+PWEIGHT #Add person weight of j to the house total
			if(j==1) { # If this is the first person, they are also the householder and we need to save more data
				PUMS$HH_PWEIGHT[row] <- PWEIGHT
				PUMS$HH_SEX[row] <- substr(char.vec[i+j],23,23)
				PUMS$HH_HISPAN[row] <- (substr(char.vec[i+j],28,29)!='01')
				PUMS$HH_BLACK[row] <- (substr(char.vec[i+j],33,33)=='1' & NUMRACE=='1')
				PUMS$HH_WHITE[row] <- (substr(char.vec[i+j],32,32)=='1' & NUMRACE=='1')
				}
			if(substr(char.vec[i+j],28,29)!='01') { #If person j is hispanic
				HISP_TEMP <- HISP_TEMP+1 #Increment the hispanic counter
				PUMS$HISP_PW[row] <- PUMS$HISP_PW[row]+PWEIGHT #Increment the hispanic person weight
				}
			if(NUMRACE=='1'){ #If the person identifies just one race, then they might be Black Alone or White Alone
				BLACK_TEMP <- BLACK_TEMP+as.double(substr(char.vec[i+j],33,33)) #Increment Black counter (might be +0)
				WHITE_TEMP <- WHITE_TEMP+as.double(substr(char.vec[i+j],32,32)) #Increment White counter
				PUMS$WHITE_PW[row] <- PUMS$WHITE_PW[row]+
					PWEIGHT*as.double(substr(char.vec[i+j],32,32)) #Increment person weight counter
				PUMS$BLACK_PW[row] <- PUMS$BLACK_PW[row]+
					PWEIGHT*as.double(substr(char.vec[i+j],33,33)) #Increment person weight counter
				}
		}
	} else PUMS$HH_PWEIGHT[row] <- NA # else, there are no people in house, and we can add NA to household variables.
	PUMS$HISPAN[row] <- HISP_TEMP
	PUMS$BLACK[row] <- BLACK_TEMP
	PUMS$WHITE[row] <- WHITE_TEMP
	i <- i+PUMS$PERSONS[row]+1 #Increment row counter to next house record (we have to skip of the person records)
	row <- row+1 #Increment row counter in the output data frame
	row # I wanted to see the progress
	}
	else { # This else is if the household is not in our PUMA, we must advance to next household: 106-107 is num. of persons in this house, and we need to skip them to find the next household.
		i <- i+ as.double(substr(char.vec[i],106,107))+1
		i
		}
	}
# end of while loop
####################
	
# At this point, I have a dataframe called PUMS that has all the households in our PUMA, and all the data, but the data need to be coded properly. See the Tech Doc for what each variable is.
PUMS$UNITTYPE <- factor(PUMS$UNITTYPE,
		levels=c('0','1','2'),
		labels = c('Housing unit',
		'Institutional group quarters',
		'Noninstitutional group quarters'))
PUMS$VACSTAT = factor(PUMS$VACSTAT,
		levels = c('0','1','2','3','4','5','6'),
		labels = c(	'Not in universe (occupied or GQ)',
				'For rent',
				'For sale only',
				'Rented or sold, not occupied',
				'For seasonal, recreational or occasional use',
				'For migrant workers',
				'Other vacant'))
PUMS$TENURE = factor(PUMS$TENURE,
		levels = c('0','1','2','3','4'),
		labels = c(	'Not in universe (vacant or GQ)',
				'Owned - mortgage',
				'Owned - free and clear',
				'Rented for cash rent',
				'Occupied without payment of cash rent'))
PUMS$BLDGSZ = factor(PUMS$BLDGSZ,
		levels = levels(factor(PUMS$BLDGSZ)),
		labels = c(	'Not in universe (GQ)',
				'mobile home',
				'one-family - detached',
				'one-family - attached',
				'2 apartments',
				'3 or 4 apartments',
				'5 to 9 apartments',		
				'10 to 19 apartments',
				'20 to 49 apartments',
				'50 or more apartments',
				'Boat, RV, van, etc.'))
PUMS$VALUE <- factor(PUMS$VALUE,
				levels = levels(factor(PUMS$VALUE)),
				labels = c('Not in universe (GQ, TENURE = 3-4, or VACSTAT= 1, 3-6)',
						'Less than $10,000',
						'$10,000 to $14,999',
					'$15,000 to $19,99',
					'$20,000 to $24,999',
					'$25,000 to $29,999',
					'$30,000 to $34,999',
					'$35,000 to $39,999',
					'$40,000 to $49,999',
					'$50,000 to $59,999',
					'$60,000 to $69,999',
					'$70,000 to $79,999',
					'$80,000 to $89,999',
					'$90,000 to $99,999',
					'$100,000 to $124,999',
					'$125,000 to $149,999',
					'$150,000 to $174,999',
					'$175,000 to $199,999',
					'$200,000 to $249,999',
					'$250,000 to $299,999',
					'$300,000 to $399,999',
					'$400,000 to $499,999',
					'$700,000 to $749,999',
					'$750,000 to $999,999',
					'$1,000,000 or more'))
PUMS$PAOC <- factor(PUMS$PAOC,
		levels = c('0','1','2','3','4'),
		labels = c('Not in universe (vacant or GQ)',
				'With own children under 6 years only',
			'With own children 6 to 17 years only',
			'With own children under 6 years and 6 to 17 years',
			'No own children under 18 years')
		)
	
# Save workspace, since the previous part is the most time intensive
save.image('~/Work/WRSA2010/Data/Davidson_Temp.Rdata')

# It is possible to start right here if Davidson_Temp.Rdata already exists.
load('~/Work/WRSA2010/Data/Davidson_Temp.Rdata')

#############################################################
#############################################################
#############################################################
#############################################################
#############################################################
# Fit the model
#############################################################
#############################################################
#############################################################
# We now have all the data we need.  The problem is separable by PUMA, so for each PUMA,
# we will create constraining data matrices from the PUMS and SF3, and save them as 
# Matlab .mat files.
#
# The relevent tables in SF3 are
#	P006: Total Population #This is  1-way table is redundant given 2-way tables
#	H007: Tenure #This is  1-way table is redundant given 2-way tables
#	P052: Income 
#	P151B: Black Income
#	H11: Tenure x Race
#	H15: Pop x Tenure
#	HCT11: Tenure x Income

for (pum_i in 1:length(PUMA)){
	library(sp)
	PUMA_id <- which(spSF3$PUMA5==PUMA[pum_i]) #Find row numbers for the tracts in this PUMA
	PUMS_id <- which(PUMS$PUMA5==PUMA[pum_i]) #Find row numbers for the households in this PUMA
	##################################
   	# Population Block: Total, Black alone, White alone
	##################################
	TR_POP <- NULL
	HH_POP <- NULL
	# Column 1
	TR_POP$TOT <- (spSF3@data)[PUMA_id,'P006001'] #Total Population
	HH_POP$TOT <- PUMS$PERSONS[PUMS_id]

	# Column 2 #White Alone Population  # This won't be used
	TR_POP$WHITE <- (spSF3@data)[PUMA_id,'P006002'] #White Alone Population
	HH_POP$WHITE <- PUMS$WHITE[PUMS_id]

	# Column 3 #Black Alone Population
	TR_POP$BLACK <- (spSF3@data)[PUMA_id,'P006003']  #Black Alone Population
	HH_POP$BLACK <- PUMS$BLACK[PUMS_id]
	
	# Paste Non Black and Black into two columns
	TR_POP <- cbind(TR_POP$TOT-TR_POP$BLACK,TR_POP$BLACK)
	HH_POP <- cbind(HH_POP$TOT-HH_POP$BLACK,HH_POP$BLACK)

	#############################################
	# Housing Unit Block: Owner, Renter, Vacant  #
	#############################################
	TR_HU <- NULL
	HH_HU <- NULL
	TR_HU$OWN <- (spSF3@data)[PUMA_id,'H007002'] #Owner Occupied HU
	HH_HU$OWN <- as.double(unclass(PUMS$TENURE[PUMS_id])==2 | unclass(PUMS$TENURE[PUMS_id])==3)

	TR_HU$RENT <- (spSF3@data)[PUMA_id,'H007003'] #Renter Occupied HU
	HH_HU$RENT <- as.double(unclass(PUMS$TENURE[PUMS_id])==4 | unclass(PUMS$TENURE[PUMS_id])==5)

	TR_HU$VAC <- (spSF3@data)[PUMA_id,'H008001'] #Vacant HU
	HH_HU$VAC <- as.double(unclass(PUMS$VACSTAT[PUMS_id])!=1)

	TR_HU <- cbind(TR_HU$OWN,TR_HU$RENT,TR_HU$VAC)
#	HH_HU <- cbind(HH_HU$OWN,HH_HU$RENT,HH_HU$VAC)

# In order to  construct 2-way table of income by race, we will collect the Household income data, and then HH income for those households who are headed by thos Black alone, and then collect the residual of Total - Black Alone

	####################################
	# HH Income Block    TOTAL         #
	####################################
	# Income less than 25000
	TR_INC <- NULL
	HH_INC <- NULL
	# This is an aggregate category, we want to sum P052002---P052005
	col_range <- c(which(names((spSF3@data))=='P052002'),which(names((spSF3@data))=='P052005'))
	# col_range is a range of columns containing P052002, P052003, P052004 and P052005
	TR_INC$LOW <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum) #Row (Tract) sum across these columns
	HH_INC$LOW <- as.double(PUMS$HINC[PUMS_id] < 25000 & unclass(PUMS$TENURE[PUMS_id])!=1) #Turn HHincome into a binary variable for low income

	# Income in [25000,50000)
	# This is an aggregate category, we want to sum P052006---P052010
	col_range <- c(which(names((spSF3@data))=='P052006'),which(names((spSF3@data))=='P052010'))
	TR_INC$MED <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum) # Number of people in each tract for income bracket
	HH_INC$MED <- as.double(PUMS$HINC[PUMS_id]>=25000 & PUMS$HINC[PUMS_id] < 50000) # Binary variable for medium income bracket
	
	# Income greater than or equal to 50000
	# This is an aggregate category, we want to sum P052011---P052017
	col_range <- c(which(names((spSF3@data))=='P052011'),which(names((spSF3@data))=='P052017'))
	TR_INC$HI <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	HH_INC$HI <- as.double(PUMS$HINC[PUMS_id]>=50000)

	####################################
	# HH Income Block    BLACK         #
	####################################
	TR_BINC <- NULL
	HH_BINC <- NULL
	# Income less than 25000
	col_range <- c(which(names((spSF3@data))=='P151B002'),which(names((spSF3@data))=='P151B005'))
	TR_BINC$LOW <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	HH_BINC$LOW <- as.double(!is.na(PUMS$HH_BLACK[PUMS_id]) & PUMS$HINC[PUMS_id] < 25000 & PUMS$HH_BLACK[PUMS_id]==1)

	# Income in [25000,50000)
	col_range <- c(which(names((spSF3@data))=='P151B006'),which(names((spSF3@data))=='P151B010'))
	TR_BINC$MED <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	HH_BINC$MED <- as.double(PUMS$HINC[PUMS_id]>=25000 & PUMS$HINC[PUMS_id] < 50000 & PUMS$HH_BLACK[PUMS_id]==1)

	# Income greater than or equal to 50000
	col_range <- c(which(names((spSF3@data))=='P151B011'),which(names((spSF3@data))=='P151B017'))
	TR_BINC$HI <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	HH_BINC$HI <- as.double(PUMS$HINC[PUMS_id]>=50000 & PUMS$HH_BLACK[PUMS_id]==1)

	# Here are 6 more constraints...
	# "WHite" Income is actually total minus Black Alone
	TR_INC_WH <- cbind(TR_INC$LOW-TR_BINC$LOW,TR_INC$MED-TR_BINC$MED,TR_INC$HI-TR_BINC$HI)
	HH_INC_WH <- cbind(HH_INC$LOW-HH_BINC$LOW,HH_INC$MED-HH_BINC$MED,HH_INC$HI-HH_BINC$HI)
	
	# "BLack alone HH income.
	TR_INC_BL <- cbind(TR_BINC$LOW,TR_BINC$MED,TR_BINC$HI)
	HH_INC_BL <- cbind(HH_BINC$LOW,HH_BINC$MED,HH_BINC$HI)
	
	####################################
	# HH Tenure x Income Block         #
	####################################
	TR_INC_TEN <- NULL
	# Income less than 25000 and own
	# This is an aggregate category, we want to sum HCT011003---HCT011007
	col_range <- c(which(names((spSF3@data))=='HCT011003'),which(names((spSF3@data))=='HCT011007'))
	TR_INC_TEN$LOW_O <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	
	# Income in [25000,50000) and own
	# This is an aggregate category, we want to sum HCT011008---HCT011009
	col_range <- c(which(names((spSF3@data))=='HCT011008'),which(names((spSF3@data))=='HCT011009'))
	TR_INC_TEN$MED_O <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	
	# Income greater than or equal to 50000 and own
	# This is an aggregate category, we want to sum HCT011010---HCT011013
	col_range <- c(which(names((spSF3@data))=='HCT011010'),which(names((spSF3@data))=='HCT011013'))
	TR_INC_TEN$HI_O <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	
	# Income less than 25000 and rent
	# This is an aggregate category, we want to sum HCT011015---HCT011019
	col_range <- c(which(names((spSF3@data))=='HCT011015'),which(names((spSF3@data))=='HCT011019'))
	TR_INC_TEN$LOW_R <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	
	# Income in [25000,50000) and rent
	# This is an aggregate category, we want to sum HCT011020---HCT011021
	col_range <- c(which(names((spSF3@data))=='HCT011020'),which(names((spSF3@data))=='HCT011021'))
	TR_INC_TEN$MED_R <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	
	# Income greater than or equal to 50000 and rent
	# This is an aggregate category, we want to sum HCT011022---HCT011025
	col_range <- c(which(names((spSF3@data))=='HCT011022'),which(names((spSF3@data))=='HCT011025'))
	TR_INC_TEN$HI_R <- apply((spSF3@data)[PUMA_id,col_range[1]:col_range[2]],1,sum)
	
	####################################
	# Tenure x Race Block         #
	####################################
	TR_TEN <- NULL
	TR_TEN$Tot_O <- (spSF3@data)[PUMA_id,'H011002'] # Total Owner occupied
	TR_TEN$Bl_O <- (spSF3@data)[PUMA_id,'H011004'] # Black Owner occupied
	TR_TEN$Tot_R <- (spSF3@data)[PUMA_id,'H011010'] # Total Renter
	TR_TEN$Bl_R <- (spSF3@data)[PUMA_id,'H011012'] # Black Renter

	
	###############################################
	###############################################
	# Construct X and Y matrices
	#	Y has tracts on rows and attributes in columns
	#	X has households on rows and attributes in columns
	# 	The constraints are w'X=Y
	#	where w is a weight matrix, with households on rows and tracts on tracts.
	###############################################
	###############################################
	# Construct the Y matrix for Race x Tenure
	Y_RACE_TEN <- data.frame(cbind(WH_OWN=TR_TEN$Tot_O-TR_TEN$Bl_O,
			   	 				   BL_OWN=TR_TEN$Bl_O,
							  	   WH_RENT=TR_TEN$Tot_R-TR_TEN$Bl_R,
							  	   BL_RENT=TR_TEN$Bl_R
	))
	
	# Construct the Y matrix for Income x Tenure
	Y_INC_TEN <- data.frame(cbind(  LOW_OWN=TR_INC_TEN$LOW_O,
									MED_OWN=TR_INC_TEN$MED_O,
									HI_OWN=TR_INC_TEN$HI_O,
									LOW_RENT=TR_INC_TEN$LOW_R,
									MED_RENT=TR_INC_TEN$MED_R,
									HI_RENT=TR_INC_TEN$HI_R
	))
	
	# Construct the Y matrix for Race x Income
	Y_RACE_INC <- data.frame(cbind( WH_LOW=TR_INC$LOW-TR_BINC$LOW,
									WH_MED=TR_INC$MED-TR_BINC$MED,
									WH_HI=TR_INC$HI-TR_BINC$HI,
									BL_LOW=TR_BINC$LOW,
									BL_MED=TR_BINC$MED,
									BL_HI=TR_BINC$HI
	))
	
	# Construct the X matrix for Race x Tenure
	X_RACE_TEN <- data.frame(cbind(WH_OWN = (1-PUMS$HH_BLACK[PUMS_id])*HH_HU$OWN, # Each of these is a 0-1 variable
			   	 				   BL_OWN = PUMS$HH_BLACK[PUMS_id]*HH_HU$OWN,
							  	   WH_RENT = (1-PUMS$HH_BLACK[PUMS_id])*HH_HU$RENT,
							  	   BL_RENT = PUMS$HH_BLACK[PUMS_id]*HH_HU$RENT
	))
	
	#Construct the X matrix for Income x Tenure
	X_INC_TEN <- data.frame(cbind(  LOW_OWN = HH_INC$LOW*HH_HU$OWN,
									MED_OWN = HH_INC$MED*HH_HU$OWN,
									HI_OWN = HH_INC$HI*HH_HU$OWN,
									LOW_RENT = HH_INC$LOW*HH_HU$RENT,
									MED_RENT = HH_INC$MED*HH_HU$RENT,
									HI_RENT = HH_INC$HI*HH_HU$RENT
	))
	
	#Construct the X matrix for Race x Income
	X_RACE_INC <- data.frame(cbind( WH_LOW = (1-PUMS$HH_BLACK[PUMS_id])*HH_INC$LOW,
									WH_MED = (1-PUMS$HH_BLACK[PUMS_id])*HH_INC$MED,
									WH_HI = (1-PUMS$HH_BLACK[PUMS_id])*HH_INC$HI,
									BL_LOW = PUMS$HH_BLACK[PUMS_id]*HH_INC$LOW,
									BL_MED = PUMS$HH_BLACK[PUMS_id]*HH_INC$MED,
									BL_HI = PUMS$HH_BLACK[PUMS_id]*HH_INC$HI
	))
	
	
	
	# Fix improper coding of vacant as White low income!!
	## This was a problem at one time, I can't remember if I fixed it or not.  NNN 8/12/10.  (I think I did fix it)
	## I didn't want to delete the comment just not in case it really is still a problem.


	# Now create initial weight.  I will use head of household weights when available, 
	# and Housing Unit weights if not.
	# The logic, if I recall correctly, is that people in Group Quarters don't have household weights or vice versa. (I can't remeber right now. NNN 08/12/10)
	WT <- PUMS$HH_PWEIGHT[PUMS_id]
	WT[is.na(WT)] <- (PUMS$HWEIGHT[PUMS_id])[is.na(WT)]
	
	# Now, we assemble the data into single matrices...
	matout_HH <- cbind(X_RACE_TEN,X_INC_TEN,X_RACE_INC) # Matlab Out HouseHold
	matout_TR <- cbind(Y_RACE_TEN,Y_INC_TEN,Y_RACE_INC) # Matlab Out Tract
	matout_WT <- cbind(PUMS$HWEIGHT[PUMS_id],PUMS$HH_PWEIGHT[PUMS_id]) # Matlab Out Tract




	# Assemble X and Y matrices
	X <- matout_HH
	Y <- matout_TR
	XHH <- X # Copy the complete X matrix as XHH, I do this because the next step writes over X
	
	# Remove duplicates in X
	X <- unique(XHH)
	# Remove any 0 rows (for example, Group Quarters if we are constraining only on households)
	#X<- X[-which(apply(X,1,sum)==0),] # This is no longer necessary, I changed the code so this problem doesn't exist.

	########################################################################################
	# Now we need to calculate the weights for the unique observations.  Since we collapsed the households that were identical, we need to aggregate their sample weights.
	#
	# This is a major pita... I don't know how to match entire rows of a dataframes, so I 
	# will add the columns in such a way that each unique row has its own column sum.
	# The column sum will be unique for different combinations of the attributes.
	#   Essentially, I create a hybrid base10, base 2, base3, etc system
	# Since each unique row will have its own unique column sum, I can match based on this
	#	unique column sum
	######################################################################
	
	# I think you'll really have to step through this to see what is happening.
	# I might think of other-more transparent-ways of doing this now, that didn't occur to me then.
	cum_max <- cumprod(1+apply(X,2,max)) #This is by hybrid baseX system
	# Now sum the columns, multiplying each by its base
	# X is the unique-'ified' matrix
	# XHH is the original household level matrix
	col_sumX <- X[,1]
	for (j in 2:ncol(X)) col_sumX <- col_sumX+(cum_max[j-1]*X[,j])
	
	col_sumHH <- XHH[,1]
	for (j in 2:ncol(X)) col_sumHH <- col_sumHH+(cum_max[j-1]*XHH[,j])

	
	# Assemble weights
	# Loop through the unique observations and aggregate the individual weights
	X_Wt = numeric(nrow(X))
	for (j in 1:nrow(X)) X_Wt[j] <- sum(WT[col_sumHH==col_sumX[j]])
	# I had a zero weight in the data set. I wonder why?
	# I set that weight to 1.
	if(min(X_Wt)==0) X_Wt[which(X_Wt==0)]<- 1
	######################################################################
	
	##########################################################################################################
	# Now fix the housing unit/household problem: In summary file 3, the estimated number of occupied housing units
	# and the estimated number of households are not equal (Randy says this might be due to differences in housing
	# unit weights and household weights).  The number of occupied housing units (i.e. the tenure variables) are estimated 
	# using the houseing unit weights.  The household Income x Race variable is estimates using the person weights of the
	# householder.  The number of householders and number of occupied housing units should be equal, but they are not because
	# of different ways the Census Bureau estimates housing unit weights and person weights.  Nonetheless, this is a problem 
	# for the constraints.  It is an infeasible problem if not fixed.  I readjust the household count to equal the occupied housing unit count.
	
	adjustment <- apply(Y[,1:6],1,sum)/apply(Y[,7:12],1,sum) # sum of 1:6 is number of households, sum of 7:12 is number of 
	# occupied housing units.  (Sum of 13:66 is also number of occupied housing units)
	adjustment <- matrix(adjustment,nrow=nrow(Y),ncol=6,byrow=FALSE)
	
	Y[,1:6] <- Y[,1:6]/adjustment
	# Now, the sum of columns 1:6, columns 7:12 and 13:16 will all be consistent.
	######################################################################
	
	# Spit these out to Matlab
	library(R.matlab)
	
	fileOut <- paste('/Users/nicholasnagle/Work/WRSA2010/data/R2matlab_Davidson',PUMA[pum_i],'.mat',sep='')
	# Create the output file name
	writeMat(fileOut,	X = X, Y=Y, WT=X_Wt,PUMA=PUMA[pum_i])	# Write the matlab file.

	# Run matlab (I can't run this today 8/12/10, my Matlab license expired and I haven't yet renewed it) 
#	system('matlab < /Users/nicholasnagle/Work/WRSA2010/data/DavidsonET3.m > /dev/null')
#	matIn <- readMat('/Users/nicholasnagle/Work/WRSA2010/data/matlab2R_Davidson.mat')
#	# input variables are $EXITFLAG: how fmincon ended, and $p2: the weights
#	
#	# Estimate low income Black homeowers
#	this_row <- which(X[,3]==1 & X[,9]==1)
#	spSF3$BL_LO_Home[PUMA_id] <- apply(matIn$p2[this_row,],2,sum)
#	# Estimate low income Black renters
#	this_row <- which(X[,4]==1 & X[,9]==1)
#	spSF3$BL_LO_Rent[PUMA_id] <- apply(matIn$p2[this_row,],2,sum)
#	# Estimate low income White homeowers
#	this_row <- which(X[,3]==1 & X[,6]==1)
#	spSF3$WH_LO_Home[PUMA_id] <- apply(matIn$p2[this_row,],2,sum)
#	# Estimate low income White renters
#	this_row <- which(X[,4]==1 & X[,6]==1)
#	spSF3$WH_LO_Rent[PUMA_id] <- apply(matIn$p2[this_row,],2,sum)
#
} # Close the for each PUMA loop

#spSF3$WhRatio <- spSF3$WH_LO_Home/(spSF3$WH_LO_Home+spSF3$WH_LO_Rent)
#spSF3$BlRatio <- spSF3$BL_LO_Home/(spSF3$BL_LO_Home+spSF3$BL_LO_Rent)
#
## Now, get a tract-level polygon file for Davidson County.
#load('~/Work/WRSA2010/davidson.tract.Rdata')
#davidson.tracts$WhRatio <- 0
#davidson.tracts$BlRatio <- 0
#davidson.tracts$WhRatio[overlay(spSF3,davidson.tracts)]<- spSF3$WhRatio
#davidson.tracts$BlRatio[overlay(spSF3,davidson.tracts)]<- spSF3$BlRatio
#
#col.regions=brewer.pal(5,'Oranges')
#pdf('~/Work/WRSA2010/Davidson.pdf',height=3,width=6)
#spplot(davidson.tracts,c('WhRatio','BlRatio'),col.regions=col.regions[seq(5,1,by=-1)],at=c(0,.2,.4,.6,.8,1))
#dev.off()