#!/usr/bin/python

import MySQLdb
import urllib
import urllib2
import sys
import string
import os
import shutil
from bs4 import BeautifulSoup

# Download the links

# Based on code from http://josechristian.com/2012/09/16/downloading-html-source-code-python/
def parseUsers():
	# files where info will be stored to
	rawSrc="rawSrc.txt"
	linkSrc="lnksSrc.txt"
	staffSearch="staffLinks.txt"
	departments="departments.txt"
	scheduleFile="schedule.txt"
	tempfile="tempfile.txt"
	tempfile2="tempfile2.txt"
	staffDBInfo="staffList1.txt"
	staffList3="staffList.txt"

	# this function will download the source code and save it as a txt file.
	def dlSrc(locPage):
		# Open first file where source code will be saved
		wRawSrc=open(rawSrc,"w")
		# connect and donwnload
		webPage=urllib.urlopen(locPage)
		wPageSrc=webPage.read()
		webPage.close()
		# write to text file
		wRawSrc.write(wPageSrc)
		# close file
		wRawSrc.close()

		return None

	def departmentList(fileName):
		rDepartments=open(fileName, "r")
		values = []
		departments = []

		data = rDepartments.readlines()
		for lines in data:
		#print lines
			temp = lines.split('(')
			values.extend(temp)
		#print values

		for x in range(len(values)):
			temp = values[x]
			temp = str(temp).strip('\n')
			temp = str(temp).strip(')')
			temp = str(temp).rstrip(' ')
			values[x] = temp
		for x in range(0, len(values), 2):
			name = values[x]
			tla = values[x+1]
			temp = [name, tla]
			departments.append(temp)

		#print departments
		rDepartments.close()
		return departments

	def staffList(fileName, departmentsList):
		# Open file where schedule is located
		rSchedule=open(fileName, "r")
		staffList = []
		copy_departmentsList = departmentsList

		#print departmentsList

		data = rSchedule.readlines()
		for x in range(0,len(data), 4):
		#for x in range(0, 100, 4):
			department_tla = data[x]
			department_tla = str(department_tla)
			department_tla = department_tla[:-5]
			department_tla = department_tla.strip(' ')
			instructor = data[x+2]
			instructor = str(instructor).strip('\n')
			firstName = instructor[-2:]
			firstName = firstName.strip(' ')
			lastName = instructor[:-4]
			lastName = lastName.strip(' ')
			department = department_tla
			department = department_tla.strip(' ')
			if instructor != 'Staff':
				for x in range(len(copy_departmentsList)):
					if (department_tla == (departmentsList[x][1])):
						department = str(departmentsList[x][0]).strip(' ')
					#elif (department_tla == 'IEPH ' or department_tla == 'IEPG '):
					#    department = 'Intensive English Pgm Gen Engl'
					#elif (department_tla == 'IEPA '):
					#    department = 'Intensive English Pgm Acad Eng'
				staffList.append([firstName, lastName, department, department_tla])
			else:
				temp = None

		temp1 = staffList

		#remove duplicates
		temp = sorted(staffList, key=lambda l:l[1])

		toRemove = []
		staffList = temp
		for x in range(1, len(staffList)):
			for y in range(0, len(staffList)):
				if x != y:
					if staffList[y][0] == staffList[x][0] and staffList[y][1] == staffList[x][1] and staffList[y][2] == staffList[x][2]:
						#print x
						toRemove.append(x)

		#print len(staffList)
		#print len(toRemove)

		temp1 = set(toRemove)
		toRemove = list(temp1)

		toRemove.reverse()

		#print len(staffList)
		#print len(toRemove)

		#print toRemove

		y = None

		for x in range(len(toRemove)):
			y = toRemove[x]
			staffList.pop(y)
	  
		rSchedule.close()

		#print len(staffList)
		#print len(toRemove)

		#print staffList

		return staffList



	### MAIN PROGRAM ###


	departments = departmentList(departments)
	print "Departments parsed"
	searchList = staffList(scheduleFile, departments)
	print "Initial staff list collected"

	#print searchList
	#print len(searchList) 

	staffList = open(staffSearch, "w")
	staffList2 = open(tempfile, "w")
	urlList2 = open(tempfile2, "w")
	staffData = open(staffDBInfo, "w")
	staffList3 = open(staffList3, "w")

	staffInfo = []
	linksToFollow = []
	linksToFollow2 = []

	for x in range(len(searchList)):
	#for x in range(10):
		firstName = searchList[x][0]
		lastName = searchList[x][1]
		department = searchList[x][2]
		department_split = department.split(' ')
		length = len(department_split)
		department_last = department_split[length-1]
		#directorySearch = "http://directory.oregonstate.edu/?type=search&cn="+firstName+"+"+lastName+"&surname=&mail=&telephonenumber=&osualtphonenumber=&homephone=&facsimiletelephonenumber=&osuofficeaddress=&osudepartment="+department_last+"&affiliation=employee&anyphone=&join=and"
		directorySearch="http://directory.oregonstate.edu/?type=search&cn="+firstName+"+"+lastName
		staffList.write(directorySearch)
		staffList.write('\n')
		staffList2.write(firstName+' '+lastName+' '+department+'\n')

		content = urllib2.urlopen(directorySearch).read()
		soup = BeautifulSoup(content)
		for values in soup.find_all(lambda tag: tag.name=="div" and tag.get("id")=="records"):
			for table1 in values.find_all('dd'):
				test1 = table1.string
				#print test1
				if test1 != None:
					staffInfo.append(str(test1))
					staffList3.write(str(test1))
					staffList3.write('\n')
		for link in soup.find_all('a'):
			temp = link.get('href')
			if temp.find('type=showfull') != -1:
				linksToFollow.append(str(temp))
				urlList2.write(link.get('href'))
				urlList2.write('\n')

		staffList3.write('-----------\n')

	length1 = len(linksToFollow)
	toRemove = []

	for x in range(length1):
		for y in range(x,length1):
				if x != y:
					if linksToFollow[x] == linksToFollow[y]:
						toRemove.append(x)


	temp1 = set(toRemove)
	toRemove = list(temp1)

	toRemove.sort()
	toRemove.reverse()

	if len(toRemove) < len(linksToFollow):
		for x in range(len(toRemove)):
			y = toRemove[x]
			linksToFollow.pop(y)
	else:
		print "Error - toRemove too long"
		sys.exit(0)

	print "First pass of parsing staff complete."
	#"First loop complete"

	length1 = len(linksToFollow)


	for x in range(length1):
		linksToFollow[x] = str(linksToFollow[x])
		url1 = 'http://directory.oregonstate.edu'+linksToFollow[x]
		#print url1
		try:
			content1 = urllib2.urlopen(url1).read()
			soup1 = BeautifulSoup(content1)
			for values in soup1.find_all(lambda tag: tag.name=="div" and tag.get("id")=="records"):
				for table1 in values.find_all('dd'):
					test1 = table1.string
					#print test1
					if test1 != None:
						test1 = test1.encode('utf-8')
						staffInfo.append(test1)
						staffList3.write(test1)
						staffList3.write('\n')
				for link in soup1.find_all('a'):
					temp = link.get('href')
					temp = str(temp)
					if temp.find('type=showfull') != -1:
						linksToFollow2.append(temp)
						#urlList2.write(link.get('href'))
						urlList2.write(temp)
						urlList2.write('\n')
			staffList3.write('-----------\n')
		except:
			print "Error with following site:  %s" %url1

	#print linksToFollow
	#print len(linksToFollow)
	#print staffInfo

	for x in staffInfo:
		print>>staffData, x

	#if len(urlList2) != 0:
	#    print "issue with following links"
	#    sys.exit(0)


			   
	staffList.close()
	staffList2.close()
	urlList2.close()
	staffData.close()
	staffList3.close()

	print "Second pass of parsing staff complete."
	
	return None

# "http://directory.oregonstate.edu/?type=search&cn="
# J.+Smith
# "&surname=&mail=&telephonenumber=&osualtphonenumber=&homephone=&facsimiletelephonenumber=&osuofficeaddress=&osudepartment="
# CH
# "&affiliation=employee&anyphone=&join=and"
