from BeautifulSoup import BeautifulSoup
from cookielib import CookieJar
import re
import datetime
import urllib2
import urllib
'''
Gets a walmart employees schedule from Mywalmart.com
'''

class day():
	def __init__(self, day,work=False,shift=None,meal=None):
		self.daystring=day
		self.dayasnum=datetime.datetime.strptime('%s'%self.daystring,'%A %b %d').day
		self.work=work
		if work:
			self.shift=shift
			self.start=shift.split('-')[0]
			self.end=shift.split('-')[1]
			self.meal=meal
		if meal:
			self.mealStart=meal.split('-')[0]
			self.mealEnd=meal.split('-')[1]
	def __str__(self):
		if not self.work : return '%s Not Working'%(self.daystring)
		return '%s %s %s'%(self.daystring, self.shift, self.meal)


class Week(list):
	def __init__(self): 
		self.opener=None
		self.user=None
		self.ID=None
		self.passw=None
		self.now = datetime.datetime.now()
	def getWeek(self,week=4):
		'''
		Walmart starts their year on February First, so their is a four week diffence between them and the calender year.
		So you have to subtract 4 from the current week in order to get the current week in the walmart year.
		1 is the lowest value you can pass in.
		'''
		if week < 1:
			raise ValueError, 'Week needs to be higher than 0'
		del self[:]
		if self.now.strftime('%A') in ('Saturday', 'Sunday'):
			week-=1
		week = int((self.now - datetime.timedelta(weeks=week)).strftime("%U"))
		try:
			soup = self.getSoup(week)
		except urllib2.URLError, err:
			if err in "[Errno 2]":
				raise IOError, "Cannot establish connection"
		self.organize(soup)

	def getID(self,page):
		'''
		There is a variable named winNumber that is needed in the url.
		This function finds it.
		'''
		if not page.find('Login Error') == -1:
			raise ValueError, 'Wrong username or password'
		start = page.find('winNumber') +12
		ID=''
		while not page[start] == ';':
			ID+=page[start]
			start+=1
		self.ID=ID

	def login(self,user,passw):
		'''
		Mywalmart uses cookies after you login.
		This method gets these cookies.
		'''
		self.user=user
		self.passw=passw
		opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(CookieJar()))
		url = 'https://mywalmart.com/cleartrust/ct_logon_en.html?'
		values = {'y' : '0',
				'x' : '0', 
				'password' : '%s'%self.passw,
				'user' : '%s'%self.user, 
				'auth_mode' : 'basic'
				}
		data = urllib.urlencode(values)
		login = opener.open(url, data)
		self.getID(login.read())
		self.opener=opener
		#return opener,ID

	def getSoup(self,week):
		'''
		Downloads the html source of the given week.
		And returns whats left contained in the td tags.
		'''
		date = self.now.strftime("%m-%d-%Y")
		schedUrl = 'https://mywalmart.com/Schedule/printableSchedule.aspx?w=%s&d=%s&p=%d' %(self.ID,date,week) 
		sched = self.opener.open(schedUrl).read()
		return BeautifulSoup(sched).findAll({'td':True})

	def organize(self,soup):
		'''
		Organizes the soup
		'''
		td = ['%s'%self.getVal(soup[i]) for i in range(len(soup))]
		length,i=len(td)-1,0
		while (i < length):
			if not i+3 > length and re.search('day',td[i]) and re.search('[ap]m',td[i+2]) and re.search('[ap]m',td[i+3]):
				self.extend([day(td[i],True,td[i+2],td[i+3])])
			if not i+2 > length and re.search('day',td[i]) and re.search('[ap]m',td[i+2]) and re.search('None',td[i+3]):
				self.extend([day(td[i],True,td[i+2])])
			if not i+1 > length and re.search('day',td[i]) and re.search('Not',td[i+1]):
				self.extend([day(td[i])])
			i+=1

	def getVal(self,soup):
		soup = BeautifulSoup('%s' %soup)
		if soup.span is None:
			return re.sub(r'<br />', ' ',soup.td.renderContents())
		else:
			return soup.span.renderContents()


	def __str__(self): return '\n'.join(['%s'%(self[i]) for i in range(7)])
	def today(self): return ['%s'%self[i] for i in range(7) if self[i].dayasnum == self.now.day]
	def daysOff(self): return [self[i] for i in range(7) if not self[i].work]
	def daysWorking(self): return [self[i] for i in range(7) if self[i].work]
	def fullDays(self): return [self[i] for i in range(7) if self[i].meal]
	def halfDays(self): return [self[i] for i in range(7) if not self[i].meal]
