import tweepy
import os
from optparse import OptionParser
import sys
from urllib import urlencode
import re
import locale
import simplejson
import time
import pdb
import urllib2
import signal
from mailer import Mailer
from mailer import Message

class TimeoutException(Exception): 
	pass

class ImplicitFollowers:

	def __init__(self, outputfilepath, screen_name):
		self.outputfile = outputfilepath
		self.root_screen_name = screen_name
		
		# Account 0 (Imp2Fol)
		#Authentication tokens for Twitter, authenticate, and check rate limit status
		self.auth0 = tweepy.OAuthHandler("6VgMBlOXueMbgUtTcLDZSA", "c246Cwf844ITBqf6QcmIpaFIJLInNZSZauDrvE7sWw")
		self.auth0.set_access_token("335838961-lAHD4suoXmXEEasLNtgCAfNXXKBOc3pNBV1MFiZf", "9NohmpnKwFgnmlQfaYcOg1JUr6zInoMY3wOEl0pzNXI")
		self.api0 = tweepy.API(self.auth0)
		self.my_rate_limit_status0 = self.api0.rate_limit_status()

		# Account 1 (Conv2Fol)
		# Authentication tokens for Twitter, authenticate, and check rate limit status
		self.auth1 = tweepy.OAuthHandler("uEaf4MBF9pu6ePTJT0T3w", "uTwMUJGo6dqciEJ1vVrCJ3wgHrSYR9Vysz2TJ01GLyA")
		self.auth1.set_access_token("335841614-072kBBbxiWHcezvN3Eo9VmMzFnGbehUrqWMXp0Tu", "03jqfVjGPXe219hJHNFBlkKK96igdsvyoFiitnuoJ8")
		self.api1 = tweepy.API(self.auth1)
		self.my_rate_limit_status1 = self.api1.rate_limit_status()
		
		# Account 2 (Int2Fol)
		# Authentication tokens for Twitter, authenticate, and check rate limit status
		self.auth2 = tweepy.OAuthHandler("P07XwnJHUN3dqQxG4zUpw", "fJVg6q79jdtaPNvh2tc1ipv8x9lEiTcwzRf3C24nFQ")
		self.auth2.set_access_token("335844930-HmUJwXcIYbyhvFZE0xCuOZbAIvWjR2gEGV83AxsK", "MTJ2GOv02777TreJkawARTr2YQSMb3UoEjPPCefnGPA")
		self.api2 = tweepy.API(self.auth2)
		self.my_rate_limit_status2 = self.api2.rate_limit_status()

		# Account 3 (Con2Fol)
		# Authentication tokens for Twitter, authenticate, and check rate limit status
		self.auth3 = tweepy.OAuthHandler("FGlMRu1UBRD0K2a0BYBj1w", "ra1uQ6RXpupAzDotakWchgaCrKG5nSlmcST6J5XTl6c")
		self.auth3.set_access_token("335853926-wUuKeH3I4z46LlTVIZ06vyAt3NXSRuINQJj3sYuJ", "qW93vOnz7E7Tq1pMItsEt8bPt2oH04A2WukDP3FplU")
		self.api3 = tweepy.API(self.auth3)
		self.my_rate_limit_status3 = self.api3.rate_limit_status()

		self.account_number = 0
		self.expander_service = 1
		
		# Keeping track of number of hits to Twitter's search API
		self.twitter_search_api_calls = 0

		# Lists that will be used for users and their items
		self.items = []
		self.users = []
		
		self.filtered_items = []
		self.filtered_users = []
		
		self.total_items = []
		self.total_filtered_items = []

		# Initialize Twitter (multiple accounts)
		self.api_calls = {0:0, 1:0, 2:0, 3:0}
		self.number_exceptions = {0:0, 1:0, 2:0}
			
		# This is the set of entities extracted from TweetUM for user TechCrunch based on that user's
		# 30 latest followees (18Aug2011)
		self.tweetum_entities = set(['Canada', 'non-seed', 'coach', 'versatile', 'Wilson', 'Leopard', 'Buffett', 'SEOmoz', 'Moon', 'manager', 'Babysitter', 'edu', 'Schmidt', 'Louisiana', 'Czar', 'Brown', '@rentcycle', 'Hyer', 'disorders', '@mcuban', 'Christiane', 'Dam', 'Javascript', 'Fremont', 'Snowball', 'Stem', 'PeopleBrowsr', 'Ranger', 'Lapeer', 'ARPU', 'Now', 'Day', '@NewMEAccel...let', 'smartphone', 'Whitman', 'GQ', 'www.facebook.com/event.php?eid=222193311155586', 'Ventures', 'Alaska', 'results', 'Sheryl', 'GE', 'cellular', 'GO', 'NYU', 'rapper', 'Jersey', 'cool', 'Spy', 'Parker', 'Buckaroo.com', 'Ruby', 'Osaka', 'Global', 'michael', 'solution', 'Marilyn', 'Goodnight', 'unrelated', 'Jon', 'VP', 'businesses', 'Paul', 'Seattle', 'George', 'Kevin', 'jmatthews@scenechat.com', 'hunter', 'LaTeX', 'Paleo', 'HTTP', 'Howard', 'phones', 'consistent', 'MySQL', 'Fast', 'chef', 'Ricoh', 'Here&apos;s', 'video', 'Rick', 'Acquia', 'Farmer', 'East', '3G', 'Red', 'www.cuyana.com', 'Arizona', 'Mongolab', 'Boyd', 'TWiT', 'Roll', 'cell', 'Batali', 'Amazon', 'HTML', 'Hamilton', 'Sao', 'above', 'Mich', 'relational', 'America', 'Sally', 'Bin', 'Classic', 'Ma', 'Libya', 'reporter', 'Charlie', 'Professor', 'NFL', 'water', 'ranger', 'Hoover', 'studio', 'Sunnyvale', 'Ohio', 'Times', 'Search', 'VMware', 'alto', 'search', 'Dean', 'Stream', 'Scoop', 'brilliant', 'Queen', 'Clara', 'SF', 'MT', 'Bikes', 'products', 'advertising', 'Palo', 'social', 'Jared', 'Roland', 'Technology', 'patents', 'Staff', 'Mountain', 'tu', '408-884-3889', 'Robert', 'Daniels', 'psychological', 'Nick', 'Louisville', 'O&apos;Brien', 'Safety', 'S.', '@wjarek', 'Eminem', 'tools', 'MSFT', 'Europe', 'Australia', 'sghopwood@scenechat.com', 'August', 'Comerica', 'Isaak', 'Beerworks', 'process', 'Royce', 'music', 'Novell', 'injury', 'TechCrunch', 'Fela', 'CTOs', 'Bronfman', 'PayPal', 'Intel', 'Mount', 'Fonts', 'Orlando', 'Chopra', 'Devices', 'site', 'team@mongolab.com', 'Morocco', 'antibiotics', 'tim@rentcycle.com', 'Time', 'olive', '#internet', 'Paolo', 'Spotlight', 'Duke', 'bartender', 'Johnny', 'Tim', 'car', 'athlete', 'Library', 'Toronto', 'Entrepreneur', 'mp', 'Boston', 'Neko', 'Children', '@Tagged', 'spy', 'IBM', 'Lil', 'Media', 'Nepal', 'Hudson', 'Jerry', 'Virtual', 'Reality', 'stroke', 'PRELL', 'Scenechat!', 'Party', 'Hertzfeld', 'Glasgow', 'Smartphone', 'court', 'HIV', 'LA', 'Beach', 'travel', 'Wampanoag', 'Dell', 'Christmas', 'Arrington', 'Jordan', 'Jack', 'VentureBeat', 'Atom', '#travel', 'Kuper', 'Rental', 'Skyport', 'Robin', 'Soledad', 'designer', 'e-beam', 'Renault', 'Tea', 'www.doubletwist.com', 'Ryan', 'Garcia', 'Player', 'designed', 'Yale', 'Eleanor', 'law', 'View', 'Luidia', 'natural', 'Apple', 'www.facebook.com/pages/OLyfe/139761632754778', 'Mexico', 'jobs@mongolab.com', 'MySpace', 'Windows', 'BeCouply', 'Middle', 'Radio', 'Sean', 'www.Olyfe.com', 'Inigral', 'ethernet', 'BostonConsultingGroup', 'Monroe', '@couchsessions', 'Web-based', 'What', 'comms', 'Jay', 'september', 'Smith', '@cmusico', 'jpeg', 'Illinois', 'Jim', '@rexly', 'White', 'MIT', 'Sheen', 'developer', 'RT', 'I&apos;ve', '@navarrowwright', 'Full', 'SAN', 'Verizon', 'Berners-Lee', 'geo', 'OFFICIAL', 'translation', 'Xerox', 'html', '@kimazille', 'systems', 'Chernin', '2011', '@alexia', 'Spain', 'Buck', 'Mobile', 'docs', 'Kapor', 'Michael', 'Interpol', 'Bay', 'iPhone', 'Cam', 'Social', 'ninja', 'Closing', 'republican', 'day', 'Karina', 'Adobe', 'San', 'Mobility', 'India', 'curator', 'capability', 'courses', 'API', 'Skype', 'Twitpic', 'Riggins', 'Aneesh', 'Sanjay', 'MyLife', 'Tokbox', 'bump', 'Farhad', 'energy', 'Dude', 'Butcher', 'Geddes', 'ABC', 'P2P', 'Peru', 'programmer', '10v', 'Bubble', 'More', 'Engine', 'Ken', 'network', 'driving', 'CEO', 'Michigan', 'Haha', 'factory', 'Scoble', 'About.com', 'looking', 'Job', 'Strawpoll', 'China', '7', 'Zinch', 'Super', 'Class', 'Napoleon', 'Aniston', 'AlwaysOn', 'support@mongolab.com', 'farmer', 'Citibank', 'ipod', 'Let&apos;s', '@bryan_hunter', 'CNN', 'Vic', 'www.scenecha', 'Congress', 'Madrid', 'Chan', 'days', 'pence', 'Vischer', 'IPv6', 'app', 'Cafe', 'www.scenechat.com', '@eBeam', 'Duh', 'Turkey', 'Cooper', 'Blanco', 'ISP', 'Rex', 'Black', 'co-founder', 'engineer', '@xrite', 'Jennifer', 'raksha@lmgpr.com', 'North', 'Botsman', 'service', 'Victoria&apos;s', 'Effect', 'Gallardo', 'paint', 'station', 'Woo', 'Nintendo', 'Rentcycle&apos;s', 'Iraq', 'Rebecca', 'store', 'Azam', 'Davos', 'Justin', 'hub', 'Gates', 'tool', '#imissnyc', 'BBC', 'Miracle', 'dentist', 'www.facebook.com/contour?sk=app_190208911039132', 'Girod', 'Oracle', 'PDF', 'XP', 'eBay', 'Haiti', 'Inc.', 'Gundotra', 'king', 'Dagger', 'Colorado', 'Creator', 'bubble', 'players', 'Herman', 'Resurfacing', 'silly', 'Gang', 'Sea', 'Forehand', 'Peter', 'Zink', 'Welcomes', 'and', 'Diego', 'Cosmonaut', '3M', 'arin@toodo.com', 'Chicago', 'MicroCell', 'DC', 'Secret', 'BLACK', 'gigabit', 'Interview', 'dinner', 'Valley', 'OpenTable', 'mountain', 'dale@uncollege.org', 'support@contour.com', 'Food', 'Kyoto', 'Price', 'Money', 'Da', 'www.getquik.com', 'Minister', 'Santa', 'online', 'Alpha', 'Gallery', 'Kuti', 'Cruze', 'TCP/IP', 'Linux', '@marcidale', 'Tho', 'Korea', '@sfoutsidelands', 'Kanye', 'alpha', 'Mohit', 'Abraham', 'Portland', 'singer', 'Google', 'Capital', 'Lewis', 'disease', 'Asta', 'tech', 'media/social', 'caching', 'Earth', 'Herring', '.com', 'renting', 'Web', 'Matt', 'Ian', 'Blount', 'Cisco', 'Paris', 'Barry', 'Karla', 'queen', 'Dolores', 'Craigslist', '@contour_cam', 'Brookins', 'Lennon', 'solutions', 'Marks', 'OnStar', 'networks', 'Businessweek', 'Kingdom', 'Crux', 'producer', 'Sacramento', 'writer', 'paranoia', 'in-car', 'wayne', 'York', 'Cottage', 'TechTarget', '@oLyfe', 'Prime', 'Grossman', 'sunglasses', '#solutions', '@upenzi', 'Willin&apos;', 'Louis', '@abenton', 'Magazine', 'Houston', 'www.mealison.com', 'Shops', 'chips', 'Stadium', 'GPS', '+1-206-792-5226', 'artist', 'True', 'Washington', 'Kong', '#collaboration', 'INTERNET', 'PHP', 'Twitter', 'view', 'FDA', 'Arbor', 'EMC', 'King', 'w/', 'Dogg', 'Tesco', 'sez', 'France', 'analytics', 'Motorola', 'result', 'GBP', 'Grand', 'Chamillionaire', 'Android', 'John', 'Malawi', 'CRM', 'Linden', 'County', 'Good', 'Iowa', 'luv', 'mine', 'Tool', 'Park', 'Canyon', 'artificial', 'mayor', 'Genecure', 'Rexly', 'Harvard', 'Texas', 'Amazon.com', 'Sony', 'Technologies', 'Republican', 'Hopwood', 'therapies', 'conferencing', 'Hip', 'Dusty', 'Florida', 'Youtube', 'Edgar', 'Silicon', 'Subway', 'Annie', '@NewMeAccel', 'news', 'Hansen', '@orchestra.io', 'design', 'instant', '+@bryan_hunter', 'restaurant', 'drug', 'patti', 'Steve', 'Gillmor', 'XM', 'Computer', 'Contrave', 'arin@tood.com', 'auctions', 'Oscar', 'interactive', 'Whip', 'Primark', 'AOL', 'cancer', 'Zane', 'smith', 'YESYESYES', 'Song/artist', 'Amanpour', '@datingheadshots', 'Lounge', 'Los', 'Norway', 'Mt.', 'Ireland', 'Online', 'Dating', 'Willie', 'Meadows', '@stephendeberry', 'swimmer', 'Yahoo', 'Tacos', 'Brooklyn', 'California', 'B.I.G', 'Doctors', 'St.', 'dev@toodo.com', 'oDesk', 'engine', 'Georgia', 'Adams', 'Tokyo', 'Poland', 'Phil', 'gas', 'Philadelphia', 'Tackle', 'Kamangar', 'Dismissed', 'News', '@baptistejesse', 'Conway', 'Ann', 'Rachel', 'Everest', 'ventures', 'b&apos;shevat', 'voice', 'radio', 'Kottke', 'Building', 'Alfano', 'Italy', 'CATHERINE', 'Vannevar', 'Oakley', 'Krugman', 'hardware', 'player', 'Bush', 'Mini', 'properties', 'Prison', 'ready', 'Valencia', 'foreign', 'technology', 'Kareem', 'Georgetown', 'facial-recog', 'author', 'media', 'Bangkok', 'food', 'Leith', 'speaker', 'Handbag', 'party', 'CruiseControl.NET', 'Business&apos;s', 'oil', 'http', 'Institute', 'Cox', 'AERO', 'driver', 'Adam', 'Angeles', 'boston', 'portal', 'php', 'diabetes', 'Yard', 'Salar', 'Game', 'ROTFLMAO', 'Miller', 'NextUp', 'Mine', 'fever', 'Ppl', 'States', 'Club', 'www.getquik.com/pizza-my-heart-sunnyvale', 'sunnyvale', 'telephone', 'audio', 'browsing', 'BOY', 'HBO', 'United', 'ceo', 'Oslo', 'TechStars', 'executive', 'Access', 'Daniel', 'virtual', 'Internet', 'injuries', 'Marvell', 'web', 'Jobs', 'Snoop', 'candidate', '#sfGiants', 'Lupe', 'photographer', 'Company', 'YouTube', '@jtwebman', '@BeCouply', 'Victoria', 'Zuckerberg', 'haz', 'Starbucks', 'FriendFeed', 'DVD', 'Fab', '5', 'easter', 'Bremmer', 'co-Founder', 'Alto', 'Sprint', 'Norton', 'Restaurant', 'Saratoga', 'James', 'Cells', 'Patagonia', 'Reginald', 'Snow', 'I/O', 'Billy', 'traffic', 'Nelson', 'Notorious', 'checkins', 'Talk', 'GetQuik', 'Bellingham', 'Yo-Yo', 'InformationWeek', '@contour_cam..check', 'Pie', 'server', 'communications', 'London', 'SQL', 'JUDGE', 'Stephan', 'Inspector', 'Golfer', 'AT&amp;T', 'Warren', '@hajjflemings', 'Perry', 'Java', 'Arcade', 'Marc', 'West', 'Garrett', 'HQ', 'Pius', 'Mark', 'streaming', 'Plugandplay', 'Doubletwist', 'Andy', 'Editor', 'palo', 'Try', 'New', 'sprint', 'Jonathan', 'Bubba', 'HD', 'OSX', 'Mike', '#collcons', 'Ace', 'for', 'Singapore', 'Fire', 'thanksgiving', '@Kate_Butler', 'Leo', 'Togo', 'Walden', 'Facebook', 'Bieber', 'Armenia', 'Chris', 'Barros', 'commentator', 'christmas', 'Syracuse', 'Rentcycle', 'Google+', 'Bill', 'David', 'Josaitis', 'Dakota', 'Sweden', 'www.thisweekin.com', 'Kindle', 'Microsoft', 'vaccine', 'contact@buckaroo.com', 'Becky', 'Madison', 'Taylor', 'HTC', 'Ron', 'industry', 'whiteboard', 'favorite', 'Heights', 'Buckaroo', 'WIRED', 'airline', 'Femi', 'Cameron', 'Japan', 'iPads', 'Captain', 'software', 'Lord', 'Hong', 'Eli', 'Memories', 'Majestic', 'AV', 'Bowl', 'Taj', 'entrepreneur', 'Aaron', 'Gaga', 'web-site', 'Alabama', 'networking', 'Ali', 'Shawn', 'people', 'JSON', 'Toshiba', 'Chelsea', 'Vegas', 'editor', 'Chile', 'President', 'forward', 'hysterical', 'HP', 'Obama', 'function', 'head', 'Mahal', 'N.', '#promotion', 'Girls', 'Visual', 'Karel', 'Detroit', 'Wildstrom', 'daylightburnsahole@gmail.com', 'DNS', 'Walker', 'technologies', 'Austin', 'Levchin', 'www.nyinternproject.com', 'Uzamere', '@TechTwNews', 'Zune', 'Awesome', 'Africa', 'Live', 'us', 'technology-enabled', 'Avi', 'Messenger', 'Startups', 'election', 'Jose', 'USD', 'Main', 'Writer', 'CIO', 'Auerbach', 'LinkedIn', 'Abdul-Jabbar', '@arrington', 'sharing', 'rally', 'Ryu', 'Bernd', 'dealer', 'Rheingold', 'Francisco', 'TV', 'Max', '#entrepreneur', 'Utah', 'Andrew', 'Lab', 'reality', 'Mario', 'Jody', 'Mae', 'recognition', '#Disruptive', 'intelligence', 'LivingSocial', 'Discovery', 'Lady', 'Altucher', 'Las', 'Case', 'Great', 'Fiasco', 'Alzheimer&apos;s', 'Stanford', 'students', 'worker', 'Meg', 'stipe', 'Brazil', 'www.Buckaroo.com', 'Derby', 'Sandberg', '@_pius', 'Nations', 'presidential', 'South', 'Lakes', 'Major', 'COACH', '@CMCreativeMedia', 'Gupta', 'REBECCA', 'dog', 'Comcast', 'Hardware', 'Pariser', 'Scott', '#search', 'Jay-Z', 'representative'])
		
		
		self.Process(self.root_screen_name)
	
	def timeout(timeout_time, default):
	#Timeout function using decorator pattern
	#ref: http://pguides.net/python/timeout-a-function

		def timeout_function(f):
			def f2(*args):
				def timeout_handler(signum, frame):
					raise TimeoutException()

				old_handler = signal.signal(signal.SIGALRM, timeout_handler) 
				signal.alarm(timeout_time) # triger alarm in timeout_time seconds
				try: 
					retval = f(*args)
				except TimeoutException:
					print "TIMEOUT!"
					return None
				finally:
					signal.signal(signal.SIGALRM, old_handler) 
				signal.alarm(0)
				return retval
			return f2
		return timeout_function	

	@timeout(1800, None) # Timeout after 30 minutes	
	def GetUserID(self, screen_name):		
		# Keep track of the Twitter API Calls
		self.api_calls[self.account_number] = self.api_calls[self.account_number] + 1
		print "Api calls for account %s :" %self.account_number + " [%s] " %self.api_calls[self.account_number] + " (from GetUserTimeline)"
		if (self.api_calls[self.account_number] > 325):
			
			# Get the state of all API keys
			self.api_calls[0] = 350 - self.api0.rate_limit_status()["remaining_hits"]
			self.api_calls[1] = 350 - self.api1.rate_limit_status()["remaining_hits"]
			self.api_calls[2] = 350 - self.api2.rate_limit_status()["remaining_hits"]
			self.api_calls[3] = 350- self.api3.rate_limit_status()["remaining_hits"]

			# Ensure from the start that we are using the right API key according to the rate limits.
			while (self.api_calls[self.account_number] > 325):
				self.account_number = (self.account_number + 1) % 4 # Modulo, so that account numbers rotate
				print "Sleeping for 10 minutes while API refreshes ..."
				self.output_logs.write("Sleeping for 10 minutes while API refreshes... \n")
				time.sleep(10*60)
				
		user_id = None

		try:
			if (self.account_number == 0):
				user_id = self.api0.get_user(screen_name).id
			elif (self.account_number == 1):
				user_id = self.api1.get_user(screen_name).id
			elif (self.account_number == 2): 
				user_id = self.api2.get_user(screen_name).id
			elif (self.account_number == 3):
				user_id = self.api3.get_user(screen_name).id
		except:
			self.output_logs.write("Problem getting user id for user %s" %screen_name  + "Error: Not Authorized \n")
			print "Problem getting user id for user %s:" %screen_name + "Error: Not Authorized"
			pass

		return user_id

	@timeout(1800, None) # Timeout after 30 minutes	
	def GetUserTimeline(self, user_id):		
		# Keep track of the Twitter API Calls
		self.api_calls[self.account_number] = self.api_calls[self.account_number] + 1
		print "Api calls for account %s :" %self.account_number + " [%s] " %self.api_calls[self.account_number] + " (from GetUserTimeline)"
		if (self.api_calls[self.account_number] > 325):
			
			# Get the state of all API keys
			self.api_calls[0] = 350 - self.api0.rate_limit_status()["remaining_hits"]
			self.api_calls[1] = 350 - self.api1.rate_limit_status()["remaining_hits"]
			self.api_calls[2] = 350 - self.api2.rate_limit_status()["remaining_hits"]
			self.api_calls[3] = 350- self.api3.rate_limit_status()["remaining_hits"]

			# Ensure from the start that we are using the right API key according to the rate limits.
			while (self.api_calls[self.account_number] > 325):
				self.account_number = (self.account_number + 1) % 4 # Modulo, so that account numbers rotate
				print "Sleeping for 10 minutes while API refreshes ..."
				self.output_logs.write("Sleeping for 10 minutes while API refreshes... \n")
				time.sleep(10*60)

		user_timeline = None

		try:
			if (self.account_number == 0):
				user_timeline = self.api0.user_timeline(user_id,include_entities=True)
			elif (self.account_number == 1):
				user_timeline = self.api1.user_timeline(user_id,include_entities=True)
			elif (self.account_number == 2): 
				user_timeline = self.api2.user_timeline(user_id,include_entities=True)
			elif (self.account_number == 3):
				user_timeline = self.api3.user_timeline(user_id,include_entities=True)
		except:
			self.output_logs.write("Problem getting user timeline for user %s" %user_id  + "Error: Not Authorized \n")
			print "Problem getting user timeline for user %s" %user_id  + "Error: Not Authorized"
			pass

		return user_timeline		

	@timeout(1800, None) # Timeout after 30 minutes	
	def GetTweetStatus(self, tweet_id):
		# Keep track of the Twitter API Calls
		self.api_calls[self.account_number] = self.api_calls[self.account_number] + 1
		print "Api calls for account %s :" %self.account_number + " [%s] " %self.api_calls[self.account_number] + " (from GetUserTimeline)"
		if (self.api_calls[self.account_number] > 325):
			
			# Get the state of all API keys
			self.api_calls[0] = 350 - self.api0.rate_limit_status()["remaining_hits"]
			self.api_calls[1] = 350 - self.api1.rate_limit_status()["remaining_hits"]
			self.api_calls[2] = 350 - self.api2.rate_limit_status()["remaining_hits"]
			self.api_calls[3] = 350- self.api3.rate_limit_status()["remaining_hits"]

			# Ensure from the start that we are using the right API key according to the rate limits.
			while (self.api_calls[self.account_number] > 325):
				self.account_number = (self.account_number + 1) % 4 # Modulo, so that account numbers rotate
				print "Sleeping for 10 minutes while API refreshes ..."
				self.output_logs.write("Sleeping for 10 minutes while API refreshes... \n")
				time.sleep(10*60)
	
		implicit_followee_tweet = None

		try:
		        if (self.account_number == 0):
		                implicit_followee_tweet = self.api0.get_status(tweet_id)
		        elif (self.account_number == 1):
		                implicit_followee_tweet = self.api1.get_status(tweet_id)
		        elif (self.account_number == 2):
		                implicit_followee_tweet = self.api2.get_status(tweet_id)
		        elif (self.account_number == 3):
		                implicit_followee_tweet = self.api3.get_status(tweet_id)
		except:
		        self.output_logs.write("Problem getting implicit followee tweet \n")
		        print "Problem getting implicit followee tweet \n"
		        pass

		return implicit_followee_tweet
										
	def Process(self, user_id):	
		
		start_time = time.strftime("%Y%m%d-%H""%M")	
		
		outputfilename = self.outputfile
		fqn_filename = outputfilename.split('.')
		
		# Output for normal (unfiltered runs)
		self.output = open(self.outputfile, 'w')
		self.output_info = open(fqn_filename[0] + ".info", 'w')
		self.output_logs = open(fqn_filename[0] + ".logs", 'w')
		
		# Output for TweetUM filtering
		self.output_filtered = open(fqn_filename[0] + "_filtered.out", 'w')
		self.output_filtered_info = open(fqn_filename[0] + "_filtered.info", 'w')
		self.output_filtered_logs = open(fqn_filename[0] + "_filtered.logs", 'w')
		
		self.output_logs.write("Rate Limit Account 0: %s" %self.api0.rate_limit_status() + "\n")
		self.output_logs.write("Rate Limit Account 0: %s" %self.api1.rate_limit_status() + "\n")
		self.output_logs.write("Rate Limit Account 0: %s" %self.api2.rate_limit_status() + "\n")
		self.output_logs.write("Rate Limit Account 0: %s" %self.api3.rate_limit_status() + "\n")
		
		print "Rate Limit Account 0: %s" %self.api0.rate_limit_status() + "\n"
		print "Rate Limit Account 0: %s" %self.api1.rate_limit_status() + "\n"
		print "Rate Limit Account 0: %s" %self.api2.rate_limit_status() + "\n"
		print "Rate Limit Account 0: %s" %self.api3.rate_limit_status() + "\n"
		
		# Get the state of all API keys
		self.api_calls[0] = 350 - self.api0.rate_limit_status()["remaining_hits"]
		self.api_calls[1] = 350 - self.api1.rate_limit_status()["remaining_hits"]
		self.api_calls[2] = 350 - self.api2.rate_limit_status()["remaining_hits"]
		self.api_calls[3] = 350- self.api3.rate_limit_status()["remaining_hits"]
	
		# Ensure from the start that we are using the right API key according to the rate limits.
		while (self.api_calls[self.account_number] > 325):
			self.account_number = (self.account_number + 1) % 4
			print "Sleeping for 10 minutes while API refreshes ..."
			self.output_logs.write("Sleeping for 10 minutes while API refreshes... \n")
			time.sleep(10*60)
			
		user_id = self.GetUserID(self.root_screen_name)	
		
		# Add user_id to users list, if not there already	
		if (self.users.count(user_id) == 0):
			self.users.append(user_id)
			
		# Add user_id to filtered users list, if not there already	
		if (self.filtered_users.count(user_id) == 0):
			self.filtered_users.append(user_id)
	
		# Get the last tweets published by user
		self.output_logs.write("Getting timeline for user %s \n" %user_id)
		try:
			tweets = self.GetUserTimeline(user_id)
		except:
			self.output_logs.write("Nothing further is possible if not authorized to get user_timeline from user %s \n" %user_id)
			print "Nothing further is possible if not authorized to get user_timeline from user %s" %user_id
			raise
		
		if not(tweets is None):
			# Process the tweets of user
			self.ProcessTweets(user_id, tweets)

		end_time = time.strftime("%Y%m%d-%H""%M")		
		
		self.output_logs.write("Finished!\n")
		self.output_logs.write("Number of unique users: %s \n" %len(self.users))
		self.output_logs.write("Number of unique items %s \n" %len(self.items))
		
		self.output_filtered_logs.write("Finished!\n")
		self.output_filtered_logs.write("Number of unique users: %s \n" %len(self.filtered_users))
		self.output_filtered_logs.write("Number of unique items: %s \n" %len(self.filtered_items))
		
		# Close output files
		self.output.close()
		self.output_info.close()
		self.output_logs.close()
		self.output_filtered.close()
		self.output_filtered_info.close()
		self.output_filtered_logs.close()
				
		print "Finished!"
		print "Number of unique users: %s" %len(self.users)
		print "Number of unique items: %s" %len(self.items)
		
		print "Number of unique filtered users: %s" %len(self.filtered_users)
		print "Number of unique filtered items: %s" %len(self.filtered_items)
		
		# Send an email with summary of this run
		
		message = Message(From="phoenix@tudelft.nl", To="interestingfollower@gmail.com")
		message.Subject = "Phoenix: IFN finished!"
		message.Body = "Here's the summary: \n\nTotal number of items: %s" %len(self.total_items) + "\nTotal number of filtered items: %s" %len(self.total_filtered_items) + "\n\nNumber of unique users: %s" %len(self.users) + "\nNumber of unique items: %s" %len(self.items) + "\n\nNumber of unique filtered users: %s" %len(self.filtered_users) + "\nNumber of unique filtered items: %s" %len(self.filtered_items) + "\n\nStart time: %s" %start_time + "\nEnd time: %s" %end_time 

		sender = Mailer('smtp.tudelft.nl')
		sender.send(message)
	
	def ProcessTweets(self, user_id, tweets):			
		self.output_logs.write("Entering ProcessTweets... \n")		
		print "Entering ProcessTweets... \n"	
		
		tweets_with_url = 0
		
		# For each of the (5) tweets in my user_timeline get URLs			
		for tweet in tweets:
			
			if not(len(tweet.entities['urls']) == 0):
				
				# Add it to the unique URLs list
				# NOTE: After finding that GuyKawasaki tweets > 65 times per day, and assuming that half of those
				# tweets have URLs, and that we will run these scripts 3 times per day, it was decided to collect
				# 10 tweets with URLs and use those to search for implicit followers.
				if not(len(tweet.entities['urls'][0]['url']) == 0) and not(tweets_with_url > 4):
				
					# Extract URL from Tweet
					url = self.GetURLFromTweet(tweet)
				
					self.AddToItemList(user_id, tweet.user.id, url)
					tweets_with_url = tweets_with_url + 1
					filtered_tweet = self.FilterContent(tweet)
					if not(filtered_tweet is None):
						print "Filter word match: %s" %filtered_tweet
						self.AddToFilteredItemList(user_id, tweet.user.id, url)

					# Search for URL on Twitter Search
					json_object_urls_search_for_followers = self.SearchTwitter(url)
					
					if not(json_object_urls_search_for_followers is None):
						
						self.output_logs.write(" ===> Number of results for implicit followers search: " + "%s" %len(json_object_urls_search_for_followers["results"]) + "\n")
						print " ===> Number of results for implicit followers search: " + "%s" %len(json_object_urls_search_for_followers["results"]) + "\n"
				
						# If there is more than one page of results but less than three, 
						# then consider up to two pages.
															
						for result_search in json_object_urls_search_for_followers["results"]:	
					
							try:						
								# Twitter search API returns Tweet_ID's, so use those in order to get to 
								# user_id, but first check that user_id can be found otherwise continue to next result
								implicit_followee_tweet = self.GetTweetStatus(result_search["id"])
								if not(implicit_followee_tweet is None):
									implicit_followee_user_id = implicit_followee_tweet.user.id
								else:
									self.output_logs.write("implicit of implicit followee tweet is empty! \n")
									print "implicit of implicit followee tweet is empty!"
									continue

								# Get the last tweets published by user, but first check that 
								# user_id not the root of this sub-tree otherwise continue to next result
								if not(user_id == implicit_followee_user_id):
									implicit_tweets = self.GetUserTimeline(implicit_followee_user_id)
								else:
									self.output_logs.write("Skipping the originator user_id %s" %implicit_followee_user_id + " from result set \n")
									print "Skipping the originator user_id %s" %implicit_followee_user_id + " from result set"			
									continue
				
								if not(implicit_tweets is None):
									# Process the tweets of an implicit user (followee)
									self.ProcessImplicitTweets(user_id, implicit_followee_user_id, implicit_tweets)					
							except:
								continue
		
	def ProcessImplicitTweets(self, user_id, implicit_followee_user_id, implicit_tweets):			
		self.output_logs.write("Entering ProcessImplicitTweets... \n")
		print "Entering ProcessImplicitTweets... \n"
		
		tweets_with_url = 0
		
		# For each of the (5) tweets in my user_timeline get URLs			
		for tweet in implicit_tweets:
						
			if not(len(tweet.entities['urls']) == 0):
			
				# Add it to the unique URLs list
				if not(len(tweet.entities['urls'][0]['url']) == 0) and not(tweets_with_url > 4):
				
					# Extract URL from Tweet
					url = self.GetURLFromTweet(tweet)
				
					self.AddToItemList(user_id, implicit_followee_user_id, url)
					tweets_with_url = tweets_with_url + 1
					filtered_tweet = self.FilterContent(tweet)
					if not(filtered_tweet is None):
						print "Filter word match: %s" %filtered_tweet
						self.AddToFilteredItemList(user_id, implicit_followee_user_id, url)

					# Search for URL on Twitter Search
					json_object_urls_search_for_followers = self.SearchTwitter(url)
			
					if not(json_object_urls_search_for_followers is None):
						
						self.output_logs.write(" ===> Number of results for implicit followers search: " + "%s" %len(json_object_urls_search_for_followers["results"]) + "\n")
						print " ===> Number of results for implicit followers search: " + "%s" %len(json_object_urls_search_for_followers["results"]) + "\n"
											
						#for result_search in json_object_urls_search_for_followers["comments"]:
						for result_search in json_object_urls_search_for_followers["results"]:
					
							try:
								# Twitter search API returns Tweet_ID's, so use those in order to get to 
								# user_id, but first check that user_id can be found otherwise continue to next result
								implicit_of_implicit_followee_tweet = self.GetTweetStatus(result_search["id"])
								if not(implicit_of_implicit_followee_tweet is None):
									implicit_of_implicit_followee_user_id = implicit_of_implicit_followee_tweet.user.id
								else:
									self.output_logs.write("implicit of implicit followee tweet is empty! \n")
									print "implicit of implicit followee tweet is empty!"
									continue

								# Get the last tweets published by user, but first check that 
								# user_id not the root of this sub-tree otherwise continue to next result
								if not(implicit_followee_user_id == implicit_of_implicit_followee_user_id):
									implicit_of_implicit_tweets = self.GetUserTimeline(implicit_of_implicit_followee_user_id)
								else:
									self.output_logs.write("Skipping the originator user_id %s" %implicit_followee_user_id + " from result set \n")
									print "Skipping the originator user_id %s" %implicit_followee_user_id + " from result set"
									continue

								if not(implicit_of_implicit_tweets is None):
									# Process the tweets of user
									self.ProcessImplicitOfImplicitTweets(implicit_followee_user_id, implicit_of_implicit_followee_user_id, implicit_of_implicit_tweets)
								pass
							except:
								continue

	def ProcessImplicitOfImplicitTweets(self, implicit_followee_user_id, implicit_of_implicit_followee_user_id, implicit_of_implicit_tweets):	
		
		self.output_logs.write("Entering ProcessImplicitOFImplicitTweets... \n")
		print "Entering ProcessImplicitOFImplicitTweets... \n"
		
		tweets_with_url = 0
		
		# For each of the (5) tweets in my user_timeline get URLs			
		for tweet in implicit_of_implicit_tweets:
			
			if not(len(tweet.entities['urls']) == 0):
			
				# Add it to the unique URLs list
				if not(len(tweet.entities['urls'][0]['url']) == 0) and not(tweets_with_url > 4):
				
					# Extract URL from Tweet
					url = self.GetURLFromTweet(tweet)
				
					self.AddToItemList(implicit_followee_user_id, implicit_of_implicit_followee_user_id, url)
					tweets_with_url = tweets_with_url + 1
					filtered_tweet = self.FilterContent(tweet)
					if not(filtered_tweet is None):
						print "Filter word match: %s" %filtered_tweet
						self.AddToFilteredItemList(implicit_followee_user_id, implicit_of_implicit_followee_user_id, url)		

	def GetURLFromTweet(self, tweet):	

		if not(len(tweet.entities['urls']) == 0):

			if not (len(tweet.entities['urls'][0]['url']) == 0):

				if not(tweet.entities['urls'][0]['expanded_url'] is None):
					url_posted = tweet.entities['urls'][0]['expanded_url']
					real_url = url_posted
				else:						
					url_posted = tweet.entities['urls'][0]['url']
					real_url = url_posted

				if (self.expander_service == 0):
					real_url = self.ExpandURL(url_posted)
				elif (self.expander_service == 1):
					real_url = self.LongURL(url_posted)
				elif (self.expander_service == 2): 
					real_url = self.LongURLPlease(url_posted)

				if (real_url == url_posted):
					if (self.expander_service == 0):
						real_url = self.ExpandURL(url_posted)
					elif (self.expander_service == 1):
						real_url = self.LongURL(url_posted)
					elif (self.expander_service == 2): 
						real_url = self.LongURLPlease(url_posted)

				if (real_url == url_posted):
					if (self.expander_service == 0):
						real_url = self.ExpandURL(url_posted)
					elif (self.expander_service == 1):
						real_url = self.LongURL(url_posted)
					elif (self.expander_service == 2): 
						real_url = self.LongURLPlease(url_posted)					

				if (real_url == url_posted):
					try:
						shortened_url = urllib2.urlopen(url_posted) 
						real_url = shortened_url.url
					except:
						real_url = url_posted

				expanded_url = real_url

				# Still, the 'expanded' real_url might just be another layer of the onion
				# so go through the whole process, but only once more, to see if we can
				# finally get the real real_url.
				if (len(real_url) < 30):

					if (self.expander_service == 0):
						real_url = self.ExpandURL(url_posted)
					elif (self.expander_service == 1):
						real_url = self.LongURL(url_posted)
					elif (self.expander_service == 2): 
						real_url = self.LongURLPlease(url_posted)

					if (real_url == expanded_url):
						if (self.expander_service == 0):
							real_url = self.ExpandURL(url_posted)
						elif (self.expander_service == 1):
							real_url = self.LongURL(url_posted)
						elif (self.expander_service == 2): 
							real_url = self.LongURLPlease(url_posted)

					if (real_url == expanded_url):
						if (self.expander_service == 0):
							real_url = self.ExpandURL(url_posted)
						elif (self.expander_service == 1):
							real_url = self.LongURL(url_posted)
						elif (self.expander_service == 2): 
							real_url = self.LongURLPlease(url_posted)					

					if (real_url == expanded_url):
						try:
							shortened_url = urllib2.urlopen(url_posted) 
							real_url = shortened_url.url
						except:
							real_url = expanded_url

				return real_url
		else:
			return None

	def ExpandURL(self, url_posted):

		try:
			shortened_url = urllib2.Request("http://expandurl.appspot.com/expand?url=%s" %url_posted)
			opener = urllib2.build_opener()
			f_url = opener.open(shortened_url)
			expanded_url = f_url.read()
			json_url = simplejson.loads(expanded_url)

			# Check if the URL has multiple redirects and choose the last one
			if (json_url["status"] == "OK"):
				url_key = json_url["urls"]		
				if (len(url_key) > 2):
					real_url = url_key[2]
				elif (len(url_key) == 2):
					real_url = url_key[1]
				else:
					real_url = url_key[0]	
				return real_url

			# Check if despite being 'InvalidURL' maybe there is still something useful	
			elif (json_url["status"] == "InvalidURL"):
				url_key = json_url["urls"]		
				if (len(url_key) > 2):
					real_url = url_key[1]
				elif (len(url_key) == 2):
					real_url = url_key[1]
				else:
					real_url = json_url["start_url"]	
				return real_url

			# IF status is not OK or not InvalidURL then log and return posted URL
			else:
				try:
					self.output_logs.write("ExpandURL failure with URL: %s\n" %url_posted)
				except:
					self.output_logs.write("ExpandURL failure with URL")
				self.number_exceptions[self.expander_service] = self.number_exceptions[self.expander_service] + 1
				if(self.number_exceptions[self.expander_service] > 0):
					self.expander_service = (self.expander_service + 1) % 3
					self.number_exceptions[self.expander_service] = 0
				return url_posted
		except:
			try:
				self.output_logs.write("ExpandURL exception with URL: %s\n" %url_posted)
			except:
				self.output_logs.write("ExpandURL exception with URL")
			self.number_exceptions[self.expander_service] = self.number_exceptions[self.expander_service] + 1
			if(self.number_exceptions[self.expander_service] > 0):
				self.expander_service = (self.expander_service + 1) % 3
				self.number_exceptions[self.expander_service] = 0		
			return url_posted

	def LongURL(self, url_posted):

		try:
 			shortened_url = urllib2.Request("http://api.longurl.org/v2/expand?url=%s" %url_posted + "&format=json")
			opener = urllib2.build_opener()
			f_url = opener.open(shortened_url)
			expanded_url = f_url.read()
			json_url = simplejson.loads(expanded_url)

			if not(json_url["long-url"] is None):
				real_url = json_url["long-url"]	
				return real_url
			else:
				try:
					self.output_logs.write("LongURLPlease failure with URL: %s\n" %url_posted)
				except:
					self.output_logs.write("LongURLPlease failure with URL")
				self.number_exceptions[self.expander_service] = self.number_exceptions[self.expander_service] + 1
				if(self.number_exceptions[self.expander_service] > 0):
					self.expander_service = (self.expander_service + 1) % 3
					self.number_exceptions[self.expander_service] = 0
				return url_posted
		except:
			try:
				self.output_logs.write("LongURLPlease exception with URL: %s\n" %url_posted)
			except:
				self.output_logs.write("LongURLPlease exception with URL")
			self.number_exceptions[self.expander_service] = self.number_exceptions[self.expander_service] + 1
			if(self.number_exceptions[self.expander_service] > 0):
				self.expander_service = (self.expander_service + 1) % 3
				self.number_exceptions[self.expander_service] = 0
			return url_posted

	def LongURLPlease(self, url_posted):

		try:
 			shortened_url = urllib2.Request("http://www.longurlplease.com/api/v1.1?q=%s" %url_posted)
			opener = urllib2.build_opener()
			f_url = opener.open(shortened_url)
			expanded_url = f_url.read()
			json_url = simplejson.loads(expanded_url)

 			if not(json_url[url_posted] is None):
				real_url = json_url[url_posted]	
				return real_url
			else:
				try:
					self.output_logs.write("LongURLPlease failure with URL: %s\n" %url_posted)
				except:
					self.output_logs.write("LongURLPlease failure with URL: %s\n" %url_posted)
				self.number_exceptions[self.expander_service] = self.number_exceptions[self.expander_service] + 1
				if(self.number_exceptions[self.expander_service] > 0):
					self.expander_service = (self.expander_service + 1) % 3
					self.number_exceptions[self.expander_service] = 0
				return url_posted
		except:
			try:
				self.output_logs.write("LongURLPlease exception with URL: %s\n" %url_posted)
			except:
				self.output_logs.write("LongURLPlease exception with URL")
			self.number_exceptions[self.expander_service] = self.number_exceptions[self.expander_service] + 1
			if(self.number_exceptions[self.expander_service] > 0):
				self.expander_service = (self.expander_service + 1) % 3
				self.number_exceptions[self.expander_service] = 0
			return url_posted

	def SearchTwitter(self, url):
		# Keep track of Twitter Search API calls
		self.twitter_search_api_calls = self.twitter_search_api_calls + 1
		self.output_logs.write("Twitter Search API calls: " + "%s" %self.twitter_search_api_calls + "\n")

		if not(url is None):				
			try:			
				self.output_logs.write("http://search.twitter.com/search.json?q=" + "%s \n" %url)
				urls_request = urllib2.Request("http://search.twitter.com/search.json?q=%s" %url + "&rpp=20&lang=en", headers={"Accept" : "text/html"})
				opener = urllib2.build_opener()
				f = opener.open(urls_request)
				json_urls_request = f.read()
				json_object_urls_search_for_followers = simplejson.loads(json_urls_request)									
				return json_object_urls_search_for_followers	
			except:
				return

	def AddToItemList(self, parent_user_id, child_user_id, url):

		try:
			self.output_logs.write("From " + "%s" %child_user_id+"'s profile\n")
			self.output_logs.write("URL: " "%s" %url + "\n")

			print "From " + "%s" %child_user_id+"'s profile"
			print "URL: " "%s" %url + "\n"
		except:
			pass

		# Add user_id to Users list, if not there already	
		if (self.users.count(child_user_id) == 0):
			self.users.append(child_user_id)				
		# Put URL in items list if it's not there already
		if (self.items.count(url) == 0):
			self.items.append(url)
		
		# Add URL to total items	
		self.total_items.append(url)
		
		if not(url is None):
 			try:
				self.output.write("%s" %child_user_id + ",%s" % self.items.index(url) + "\n")
				self.output_info.write("%s" %child_user_id + ", %s" % self.items.index(url) + "= " + "%s" %url + "\n")
			except:
				pass

	def AddToFilteredItemList(self, parent_user_id, child_user_id, url):

		try:
			self.output_filtered_logs.write("[FILTERED] From " + "%s" %child_user_id+"'s profile \n")
			self.output_filtered_logs.write("[FILTERED] URL: " "%s" %url + "\n")

			print "[FILTERED] From " + "%s" %child_user_id+"'s profile"
			print "[FILTERED] URL: " "%s" %url + "\n"
		except:
			pass

		# Add user_id to Users list, if not there already	
		if (self.filtered_users.count(child_user_id) == 0):
			self.filtered_users.append(child_user_id)				
		# Put URL in items list if it's not there already
		if (self.filtered_items.count(url) == 0):
			self.filtered_items.append(url)

		# Add URL to total filtered items
		self.total_filtered_items.append(url)

		if not(url is None):
			try:
				self.output_filtered.write("%s" %child_user_id + ",%s" % self.filtered_items.index(url) + "\n")		
				self.output_filtered_info.write("%s" %child_user_id + ", %s" % self.filtered_items.index(url) + "= " + "%s" %url + "\n")
			except:
				pass

	def FilterContent(self, tweet):

		words_in_tweet = tweet.text.split(' ')

		for word in words_in_tweet:
			if word in self.tweetum_entities:
				try:
					self.output_filtered_logs.write("TweetUM Match! Matched with word: %s" %word + "\n")
					print "TweetUM Match! Matched with word: %s" %word + "\n"
				except:
					pass
				return tweet

####################################################################################

def RunMain():
	usage = "(First version) usage: %prog <outputfilepath> <screen_name>"
	parser = OptionParser(usage)
	(options, args) = parser.parse_args()

	if( len(args) < 2):
		print "Invalid number of arguments. Use IFN.py --help to see the details."
	else:
		outputfilepath = args[0]
		screen_name = args[1]

		try:
			print "Updating the implicit followers user-item matrix"

			ImplicitFollowers(outputfilepath, screen_name)
		except:
			pass
			raise

if( __name__ == "__main__"):
	RunMain()				
