
#Words Vote.
#Copyright (c) 2009 Todd Fine
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.


# The statistics code is governed by the license.txt file in the NLTK installation.
# Natural Language Toolkit: Language Models
#
# Copyright (C) 2001-2009 NLTK Project

import datetime
import pickle #for dictionary storage
import pygtk
pygtk.require('2.0') #needs python 2.0
import gtk, gobject 
import random, urllib
from nltk import defaultdict
from nltk.probability import *
from nltk.classify.api import *
import nltk
import capitolwords #no api needed
from xml.etree import ElementTree as ET


#pickled dictionary, make sure this is in the same directory as wordsvote.py
pkl_file = open('idsdict.pkl', 'rb') #load conversion data from pickle
govtobio = pickle.load(pkl_file)
pkl_file.close()


class Results:  #result window
	def __init__(self,x):
		self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
		self.window.set_title("Most Informative Features")
		self.window.set_geometry_hints(min_width=400,min_height=500)
		self.window.set_border_width(20) #fiddle with more?
		view = gtk.TextView()
		buffer = view.get_buffer()
		scrolled_window = gtk.ScrolledWindow()
		scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
		scrolled_window.add(view)
		iter = buffer.get_iter_at_offset(0)
		buffer.insert(iter, x) #insert text in buffer
		self.box1 = gtk.VBox()
		self.window.add(self.box1)
		self.box1.pack_start(scrolled_window)
		scrolled_window.show_all()
		self.box1.show() 
		#scrolled_window.show_all()
		self.window.show()

class CalendarExample:

	def callback(self, widget):
		one = self.calendar.get_date()
		two = self.calendar2.get_date()
		x = self.tree_view.get_selection().get_selected()
		_dateone = list(one) #turn into list
		_datetwo = list(two)
		_dateone[1] = _dateone[1] + 1 #add one to month
		_datetwo[1] = _datetwo[1] + 1
		percentinclude = self.adjustment.get_value() / 100.0
		minwords = int(self.adjustment2.get_value())
		#print percentinclude -- for testing
		#print minwords
		result = select(self.model.get(x[1],1)[0],_dateone,_datetwo, percentinclude, minwords)
		if not result: #function gives false if problem
			Results("Insufficient data offered to process properly.")
			return
		Results(result)
		

	def changed(self, tree_view):
		months = {"Jan" : "1", "Feb" : "2", "Mar" : "3", "Apr" : "4", "May" : "5", "Jun" : "6", "Jul" : "7", "Aug" : "8", "Sep" : "9", "Oct" : "10", "Nov" : "11", "Dec" : "12"} #process string text for conversion back to date
		x = self.tree_view.get_selection().get_selected()
		y = self.model.get(x[1],0)[0]
		monthfind = str(y[:3])
		month = int(months[monthfind])
		month = month - 1 #adjust for data standard coming from calendar
		#month = int(months[str(y[:2])])
		day = y[4:6]
		if day[1] == ',':
			day = day[0]
		day = int(day)
		yeartemp = y.find("200")
		yearfind = int(yeartemp)
		yearfind2 = yearfind + 4
		year = int(y[yearfind:yearfind2])
		self.calendar.select_month(month,year) #set calendar to bill dates automatically
		self.calendar2.select_month(month,year)	
		self.calendar.select_day(day)
		self.calendar2.select_day(day)
		if self.model.get(x[1],4)[0] == "h":
			body = "House "
		else:
			body = "Senate "

		string = "Result: " + self.model.get(x[1],2)[0] + "\n" + body + "Roll Call No.: " + self.model.get(x[1],3)[0] + "\n"
		self.label.set_text(string)
	
	def __init__(self):
		self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
		self.window.set_title("Words Vote.")
		self.window.set_geometry_hints(min_width=800,min_height=650)
		#self.window.set_border_width(156)
		self.window.connect("destroy", lambda x: gtk.main_quit())
		#self.window.set_resizable(True)
		table = gtk.Table(6, 2, True)
		self.window.add(table)
		self.scrolled_window = gtk.ScrolledWindow()
		self.scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
		self.model = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING) #six columns one for link
		self.tree_view = gtk.TreeView(self.model)
		self.select = self.tree_view.get_selection()
		self.scrolled_window.add_with_viewport (self.tree_view)
		self.select.connect('changed', self.changed)
		self.tree_view.show()

		# Add bill titles and date information
		all = getallvotes()
		for i in all:
			msg = i[2] + " : " + i[0]
			link = i[1]
			result = i[3]
			roll = i[4]
			chamber = i[5]
			iter = self.model.append()
			self.model.set(iter, 0, msg)
			self.model.set(iter, 1, link)
			self.model.set(iter, 2, result)
			self.model.set(iter, 3, roll)
			self.model.set(iter, 4, chamber)

		cell = gtk.CellRendererText()
		column = gtk.TreeViewColumn("Rollcall Votes", cell, text=0)
		self.tree_view.append_column(column)

		table.attach(self.scrolled_window, 0, 1, 0, 4)
		self.scrolled_window.show()
		
		self.calendar = gtk.Calendar()
		table.attach(self.calendar, 1, 2, 0, 2)	
		self.calendar.show()
		
		self.calendar2 = gtk.Calendar()
		table.attach(self.calendar2, 1, 2, 2, 4)
		self.calendar2.show()

		self.label = gtk.Label("Information on Votes")
		#self.box1.pack_start(self.button)
		table.attach(self.label, 0, 1, 4, 5)
		self.label.show()

		box1 = gtk.VBox(False, 0)
		self.adjustment = gtk.Adjustment(value=30, lower=5, upper=100, step_incr=1, page_incr=1, page_size=0)
		self.hscale = gtk.HScale(self.adjustment)
		self.label2 = gtk.Label("Percentage of Speakers to Include in Training Set")
		box1.pack_start(self.label2, True, True, 0)
		box1.pack_start(self.hscale, True, True, 0)

		self.adjustment2 = gtk.Adjustment(value=50, lower=10, upper=200, step_incr=5, page_incr=5, page_size=0)
		self.hscale2 = gtk.HScale(self.adjustment2)
		self.label3 = gtk.Label("Minimum Number of Words for Inclusion per Speaker")
		
		
		box1.pack_start(self.label3, True, True, 0)
		box1.pack_start(self.hscale2, True, True, 0)
	
		table.attach(box1, 0, 1, 5, 6, xoptions=gtk.EXPAND)
		#table.attach(self.hscale, 0, 1, 5, 6, xoptions=gtk.EXPAND)
		box1.show()
		self.hscale.show()
		self.label2.show()
		self.label3.show()
		self.hscale2.show()
		
		self.button = gtk.Button("Analyze Language in Calendar Range")
		self.button.connect("clicked", self.callback)
		#self.box1.pack_start(self.button)
		table.attach(self.button, 1, 2, 4, 6)
	  
		self.button.show()
 
		#self.box1.show()
		table.show()
		#self.box0.show()
		self.window.show()

class NaiveBayesClassifier(ClassifierI):
	"""
	A Naive Bayes classifier.  Naive Bayes classifiers are
	paramaterized by two probability distributions:

	  - P(label) gives the probability that an input will receive each
		label, given no information about the input's features.
	   
	  - P(fname=fval|label) gives the probability that a given feature
		(fname) will receive a given value (fval), given that the
		label (label).

	If the classifier encounters an input with a feature that has
	never been seen with any label, then rather than assigning a
	probability of 0 to all labels, it will ignore that feature.

	The feature value 'None' is reserved for unseen feature values;
	you generally should not use 'None' as a feature value for one of
	your own features.
	"""
	def __init__(self, label_probdist, feature_probdist):
		"""
		@param label_probdist: P(label), the probability distribution
			over labels.  It is expressed as a L{ProbDistI} whose
			samples are labels.  I.e., P(label) =
			C{label_probdist.prob(label)}.
	   
		@param feature_probdist: P(fname=fval|label), the probability
			distribution for feature values, given labels.  It is
			expressed as a dictionary whose keys are C{(label,fname)}
			pairs and whose values are L{ProbDistI}s over feature
			values.  I.e., P(fname=fval|label) =
			C{feature_probdist[label,fname].prob(fval)}.  If a given
			C{(label,fname)} is not a key in C{feature_probdist}, then
			it is assumed that the corresponding P(fname=fval|label)
			is 0 for all values of C{fval}.
		"""
		self._label_probdist = label_probdist
		self._feature_probdist = feature_probdist
		self._labels = label_probdist.samples()

	def labels(self):
		return self._labels

	def classify(self, featureset):
		return self.prob_classify(featureset).max()
	   
	def prob_classify(self, featureset):
		# Discard any feature names that we've never seen before.
		# Otherwise, we'll just assign a probability of 0 to
		# everything.
		featureset = featureset.copy()
		for fname in featureset.keys():
			for label in self._labels:
				if (label, fname) in self._feature_probdist:
					break
			else:
				#print 'Ignoring unseen feature %s' % fname
				del featureset[fname]

		# Find the log probabilty of each label, given the features.
		# Start with the log probability of the label itself.
		logprob = {}
		for label in self._labels:
			logprob[label] = self._label_probdist.logprob(label)
		   
		# Then add in the log probability of features given labels.
		for label in self._labels:
			for (fname, fval) in featureset.items():
				if (label, fname) in self._feature_probdist:
					feature_probs = self._feature_probdist[label,fname]
					logprob[label] += feature_probs.logprob(fval)
				else:
					# nb: This case will never come up if the
					# classifier was created by
					# NaiveBayesClassifier.train().
					logprob[label] += sum_logs([]) # = -INF.
				   
		return DictionaryProbDist(logprob, normalize=True, log=True)

	def show_most_informative_features(self, n=10):
		x = ""
	# Determine the most relevant features, and display them.
		cpdist = self._feature_probdist
		for (fname, fval) in self.most_informative_features(n):
			def labelprob(l):
				return cpdist[l,fname].prob(fval)
			labels = sorted([l for l in self._labels
							 if fval in cpdist[l,fname].samples()],
							key=labelprob)
			if len(labels) == 1: continue
			l0 = labels[0]
			l1 = labels[-1]
			if cpdist[l0,fname].prob(fval) == 0:
				ratio = 'INF'
			else:
				ratio = '%8.1f' % (cpdist[l1,fname].prob(fval) /
								  cpdist[l0,fname].prob(fval))
			#print ('%24s = %-16r %5s : %-5s = %s : 1.0' %  
				  # (fname, fval, l1[:5], l0[:5], ratio))
			#x.append([fname[9:-1], l1[:10], str(ratio[5:])])
			# preserve for terminal use			
			if str(l1[:8]) == "+":
				bias = "(Yea):"
			else:
				bias = "(Nay):"
			x = x + "".join([str(fname[9:-1]), " ", bias, "   ", str(ratio[5:]), " : 1.0\n"]) #CREATE OUTPUT 	
		#print x		
		return x

	def most_informative_features(self, n=100):
		"""
		Return a list of the 'most informative' features used by this
		classifier.  For the purpose of this function, the
		informativeness of a feature C{(fname,fval)} is equal to the
		highest value of P(fname=fval|label), for any label, divided by
		the lowest value of P(fname=fval|label), for any label::

		  max[ P(fname=fval|label1) / P(fname=fval|label2) ]
		"""
		# The set of (fname, fval) pairs used by this classifier.
		features = set()
		# The max & min probability associated w/ each (fname, fval)
		# pair.  Maps (fname,fval) -> float.
		maxprob = defaultdict(lambda: 0.0)
		minprob = defaultdict(lambda: 1.0)

		for (label, fname), probdist in self._feature_probdist.items():
			for fval in probdist.samples():
				feature = (fname, fval)
				features.add( feature )
				p = probdist.prob(fval)
				maxprob[feature] = max(p, maxprob[feature])
				minprob[feature] = min(p, minprob[feature])
				if minprob[feature] == 0:
					features.discard(feature)

		# Convert features to a list, & sort it by how informative
		# features are.
		features = sorted(features,
			key=lambda feature: minprob[feature]/maxprob[feature])
		return features[:n]

	@staticmethod
	def train(labeled_featuresets, estimator=ELEProbDist):
		"""
		@param labeled_featuresets: A list of classified featuresets,
			i.e., a list of tuples C{(featureset, label)}.
		"""
		label_freqdist = FreqDist()
		feature_freqdist = defaultdict(FreqDist)
		feature_values = defaultdict(set)
		fnames = set()

		# Count up how many times each feature value occured, given
		# the label and featurename.
		for featureset, label in labeled_featuresets:
			label_freqdist.inc(label)
			for fname, fval in featureset.items():
				# Increment freq(fval|label, fname)
				feature_freqdist[label, fname].inc(fval)
				# Record that fname can take the value fval.
				feature_values[fname].add(fval)
				# Keep a list of all feature names.
				fnames.add(fname)

		# If a feature didn't have a value given for an instance, then
		# we assume that it gets the implicit value 'None.'  This loop
		# counts up the number of 'missing' feature values for each
		# (label,fname) pair, and increments the count of the fval
		# 'None' by that amount.
		for label in label_freqdist:
			num_samples = label_freqdist[label]
			for fname in fnames:
				count = feature_freqdist[label, fname].N()
				feature_freqdist[label, fname].inc(None, num_samples-count)
				feature_values[fname].add(None)

		# Create the P(label) distribution
		label_probdist = estimator(label_freqdist)

		# Create the P(fval|label, fname) distribution
		feature_probdist = {}
		for ((label, fname), freqdist) in feature_freqdist.items():
			probdist = estimator(freqdist, bins=len(feature_values[fname]))
			feature_probdist[label,fname] = probdist

		return NaiveBayesClassifier(label_probdist, feature_probdist)

govtrackvoteurls = ["http://www.govtrack.us/congress/votes_download_xml.xpd?year=2009","http://www.govtrack.us/congress/votes_download_xml.xpd?year=2008", "http://www.govtrack.us/congress/votes_download_xml.xpd?year=2007", "http://www.govtrack.us/congress/votes_download_xml.xpd?year=2006", "http://www.govtrack.us/congress/votes_download_xml.xpd?year=2005", "http://www.govtrack.us/congress/votes_download_xml.xpd?year=2004", "http://www.govtrack.us/congress/votes_download_xml.xpd?year=2003", "http://www.govtrack.us/congress/votes_download_xml.xpd?year=2002", "http://www.govtrack.us/congress/votes_download_xml.xpd?year=2001"] 


def allwords(numwords, date1, date2):
	wordresult = capitolwords.wordofday(date1[0], date1[1], date1[2], date2[0], date2[1], date2[2], maxrows=numwords)
	wordlist = [x.word for x in wordresult]
	nodupwordlist = dict(zip(wordlist,wordlist)).keys() #remove duplicate words in relatively efficient way
	# idea from http://mail.python.org/pipermail/python-list/2004-July/269428.html
	return nodupwordlist

def wordlist(congressperson, date1, date2, _maxrows=1000):
	wordresult = capitolwords.lawmaker(congressperson, date1[0], date1[1], date1[2], date2[0], date2[1], date2[2], maxrows=_maxrows) #grab congressperson's words using sunlight api
	wordlist = [x.word for x in wordresult]
	return wordlist

def list_features(allwords, wordlist):
	features = {}
	for word in allwords:  
		features['contains(%s)' % word] = (word in wordlist) #create large feature list efficiently in format for print out 
	return features

def getids(url="http://www.govtrack.us/data/us/111/repstats/people.xml", dict={}): 
	#useful xml function for linking govtrack ids and bioguide ids
	feed = urllib.urlopen(url)
	tree = ET.parse(feed)
	a = tree.getroot()
	d = a.findall("person")
	for x in d:
		date = 0
		b = x.findall("role")
		if len(b) > 0:
			date = b[0].get("startdate")	 
			if int(date[:4]) > 1952: 
			#include conversion list for congresspeople starting in at least 1952
			#proxy for current list
				dict[str(x.get("id"))] = str(x.get("bioguideid"))	  
	return dict

def govtrack_tobio(govtrackid):
	leg = sunlight.legislators.get(govtrack_id=govtrackid)
	y = leg.bioguide_id
	return y

def doclist(voteresult, date1, date2, wordcut):  
	list = []
	num = 1
	for x in voteresult:
		a = wordlist(govtobio[x[0]], date1, date2) 
		print "Loaded words for congressperson no.", num, ":", len(a)
		if len(a) > wordcut:
			list.append([wordlist(govtobio[x[0]], date1, date2), x[1]]) 
		num = num + 1
	return list

def sortyea(result): #from stat results
	yea = [x for x in result if x[1] == '+']
	return yea

def sortnay(result): #from stat results 
	nay = [x for x in result if x[1] == '-']
	return nay

def getvoteinfo(url):
	feed = urllib.urlopen(url)
	tree = ET.parse(feed)
	a = tree.getroot().findall("voter")
	voteinfo = [(x.get("id"), x.get("vote")) for x in a]
	return voteinfo

def getxmlurl(url):
	#quickly manipulate webpage itself to get to xml file, should work for all now
	#govtrack links inside the xml vote file point to main page, not xml...
	#so this may break if govtrack redesigns core bill webpages
	feed = urllib.urlopen(url)
	string = feed.read()
	first = string.find("/data/us") 
	trysecond = first+40
	tempstring = string[first:trysecond]
	secondtempagain = tempstring.find("xml")
	thirdtemp = secondtempagain + 3
	end = tempstring[:thirdtemp]
	xmlurl = "http://www.govtrack.us/" + end #generate xml url
	return xmlurl

def getallvotes(): #only for capitol words years
	allvotes = []
	for x in govtrackvoteurls:
		print "Loading vote data for", x[-4:], "..."
		allvotes = allvotes + getvotelist(url=x)
	return allvotes

def getallvotesyear(yearnum): #only for capitol words years
	allvotes = []
	allvotes = allvotes + getvotelist(url=govtrackvoteurls[yearnum])
	return allvotes

def getvotelist(url):
	list = []
	feed = urllib.urlopen(url)
	tree = ET.parse(feed)
	c = tree.getroot()
	d = c.findall("votes")
	f2 = d[0].findall("vote")
	for x in f2:
		roll = x.get("roll")
		chamber = x.get("chamber")
		des = x.findall("description")
		link = x.findall("link") # also date, result
		date = x.findall("date")
		result = x.findall("result")
		list.append([des[0].text, link[0].text, date[0].text, result[0].text, roll, chamber])
	return list

def select(bill, firstdate, seconddate, percenttrain=.30, wordcutoff=50): #core calculation function
	#fix if date range reversed
	if datetime.date(firstdate[0],firstdate[1],firstdate[2]) > datetime.date(seconddate[0],seconddate[1],seconddate[2]): 
		firstdate, seconddate = seconddate, firstdate
	xmlbill = getxmlurl(bill)
	voteinfo = getvoteinfo(xmlbill)
	_doclist = doclist(voteinfo, firstdate, seconddate, wordcutoff)
	for x in reversed(_doclist):
		if x[1] == '0':
			_doclist.remove(x)
	random.shuffle(_doclist)
	_allwords = allwords(3000, firstdate, seconddate)
	featuresets = [(list_features(_allwords, a), b) for (a, b) in _doclist]
	trainnumber = int(len(_doclist) * percenttrain)
	print "Total in complete set:", len(_doclist)  
	if trainnumber < 4:
		return False #give 'not enough data' signal
	print "Total in training set:", trainnumber
	train_set, test_set = featuresets[trainnumber:], featuresets[:trainnumber]
	classifier = NaiveBayesClassifier.train(train_set)
	output = "Overall accuracy of classifier: " + str(nltk.classify.accuracy(classifier, test_set)) + "\n\n" + classifier.show_most_informative_features(60)
	return output


  
def main():
	gtk.main()
	return 0

if __name__ == "__main__":
	CalendarExample()
	main()



