import logging
from urllib2 import urlopen
from BeautifulSoup import BeautifulSoup
import re
from db import *
import datetime
import time
import urllib
from urllib2 import * 
import urllib2
from BeautifulSoup import BeautifulSoup
import cookielib
from google.appengine.api import urlfetch
          
## Scraper for UM Course data. Scrapes from the myUM website.
#  @param semester String value: year, followed by semester number, concatenated. Ex: "20121" signifies Fall 2012
#  @param letter String value for first letter of last name to scrape.
def scrapeUMCourse(semester, letter):
    logging.info("scrapeUMCourse for " + letter)
    
    stat = Statistics().all().get()
    if stat == None: stat = Statistics()
    
    mainUrl = "http://public.cgcent.miami.edu/myUMNet/General/ClassSearch/ClassSearch.aspx"
    cj = cookielib.CookieJar()
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))  
    if(opener == None):
        return
    resp = opener.open(mainUrl)
    mainSoup = BeautifulSoup(resp)
    
    classes = mainSoup.findAll('option');
    classCodes = []
    for b in classes:
        if(len(b['value']) == 3 and b['value'][:1] == letter):
            classCodes.append(b['value'])
    #classCodes = ['BMB']
    for c in classCodes:
        viewstate = mainSoup.find('input', id="__VIEWSTATE")['value']
        eventstate = mainSoup.find('input', id="__EVENTVALIDATION")['value']
        courseData = {
            "ctl00$MainContent$ddlTerm" : semester,
            "ctl00$MainContent$ddlDepartment" : c,
            "ctl00$MainContent$ddlCourseCareer" : "U",
            "ctl00$MainContent$ddlCourseComparator" : "GTE",
            "ctl00$MainContent$cmdSearch" : "Search",
            "__EVENTVALIDATION" : eventstate,
            "__VIEWSTATE" : viewstate,
            "__EVENTTARGET" : "",
            "__EVENTARGUMENT" : "",
            "__LASTFOCUS" : "",
            "__VIEWSTATEENCRYPTED" : ""
        }
  
        data = urllib.urlencode(courseData)
        req = urllib2.Request(mainUrl, data)
        response = opener.open(req)
        page = response.read()
        innerSoup = BeautifulSoup(page)
        courseCount = 1
        currentCourse = innerSoup.findAll('a', id="ctl00_MainContent_rClassList_ctl01_lbShowCourseDetail")
        while not (len(currentCourse) == 0):
            num = ("%2.0f" % courseCount).replace(" ", "0")
            courseName = innerSoup.find('a', id="ctl00_MainContent_rClassList_ctl" + num + "_lbShowCourseDetail").text
            spaceIndex = courseName.find(" ")
            courseSection = courseName[spaceIndex+1:]
            courseName = courseName[:spaceIndex]
            deptCode = courseName[:3]
            courseNum = courseName[3:]
            professor = innerSoup.find('span', id="ctl00_MainContent_rClassList_ctl" + num + "_lInstructors").text.strip()
            firstName = professor[:1]
            professorHTML = str(innerSoup.find('span', id="ctl00_MainContent_rClassList_ctl" + num + "_lInstructors")).strip()
            lastName = re.findall(r">(.*?)<", professorHTML)
            logging.error(lastName)
            if not len(lastName) == 0 :
                lastName = lastName[0].strip()
                lastName = re.search("(\w)+", lastName[1:]).group()
            else :
                lastName = " "
            
            logging.error(firstName + " " + lastName + "\n\n")
            
            credits = innerSoup.find('span', id="ctl00_MainContent_rClassList_ctl" + num + "_lCredits").text
            credits = int(credits[0])
            room = innerSoup.find('span', id="ctl00_MainContent_rClassList_ctl" + num + "_lRooms").text
            avail = int(innerSoup.find('span', id="ctl00_MainContent_rClassList_ctl" + num + "_l7").text)
            courseTitle = innerSoup.find('span', id="ctl00_MainContent_rClassList_ctl" + num + "_lTitle").text
            arranged = False
            #get the days MTWRF
            days = str(innerSoup.find('span', id="ctl00_MainContent_rClassList_ctl" + num + "_lDays")).strip()
            if ("Arranged" in days):
                days = ["Arranged"]
                arranged = True
            else:
                temp = re.findall(r'>[A-Z]+<', days)
                days = []
                for specDay in temp:
                    days.append(specDay[1:-1].strip())
            
            #get the timings
            times = str(innerSoup.find('span', id="ctl00_MainContent_rClassList_ctl" + num + "_lTimes")).strip()
            if ("Arranged" in times):
                start = datetime.datetime.strptime("1:00 AM", "%I:%M %p")
                end = datetime.datetime.strptime("1:00 AM", "%I:%M %p")
                arranged = True
            else:
                temp = re.findall('>\s*\d+.*?[A-Z]+\s*<', times)
                logging.error(str())
                times = []
                for specTime in temp:
                    times.append(specTime[1:-1].strip())
            
            timeObject = []
            if(not arranged and len(days) == len(times) and len(days) > 0):
                for count in range(len(days)):
                    tTime = times[count].split('-')
                    logging.error(str(tTime) + " is entered !!!!!")
                    start = datetime.datetime.strptime(tTime[0].strip(), "%I:%M %p").time()
                    end = datetime.datetime.strptime(tTime[len(tTime)-1].strip(), "%I:%M %p").time()
                    for tDay in days[count]:
                        logging.info(start)
                        logging.info(end)
                        timeRange = TimeRange(start, end, dayToOffsetMap[tDay])
                        timeObject.append(timeRange)
            else:
                logging.error("FAILED===================================================" + str(len(days)) + "and" + str(len(times)))
             
                
            #"ctl00$MainContent$rClassList$ctl" + num + "$lbShowCourseDetail"
            #ctl00%24MainContent%24rClassList%24ctl02%24lbShowCourseDetail
            viewstate2 = innerSoup.find('input', id="__VIEWSTATE")['value']
            eventstate2 = innerSoup.find('input', id="__EVENTVALIDATION")['value']
            courseData2 = { 
                "__EVENTVALIDATION" : eventstate2,
                "__VIEWSTATE" : viewstate2,
                "__EVENTTARGET" : r"ctl00$MainContent$rClassList$ctl" + num + r"$lbShowCourseDetail",
                "__EVENTARGUMENT" : "",
                "__VIEWSTATEENCRYPTED" : ""
            }
  
            data2 = urllib.urlencode(courseData2)
            req2 = urllib2.Request(mainUrl, data2)
            response2 = opener.open(req2)
            page2 = response2.read()
            newSoup = BeautifulSoup(page2)
            prereq = newSoup.find('span', id=r"ctl00_MainContent_fvCourse_Label10")
            if not (prereq == None):
                prereq = prereq.text.strip()
            description = newSoup.find('span', id=r"ctl00_MainContent_fvCourse_lLongDesc")
            if not (description == None):
                description = description.text.strip()
            notes = newSoup.find('span', id=r"ctl00_MainContent_fvCourse_lNotes")
            if not (notes == None):
                notes = notes.text.strip()
            logging.error('\nentered ' + courseName + ' ' + courseSection + '\n')
            firstName = firstName.upper()
            lastName = lastName[0].upper() + lastName[1:]
            course = Course()
            query = db.GqlQuery("SELECT * FROM Professor WHERE firstName='" + firstName + "' AND lastName='" + lastName + "' LIMIT 1")
            if(query.count() == 0) :
                prof = Professor(lastUpdate = datetime.datetime.now().date(), canecalendarRating = 0)
                prof.firstName = firstName
                prof.lastName = lastName
                prof.courses = [courseName]
                prof.put()
                course.professor = prof.key()
                logging.error("\nadded new professor with " + courseName + " and " + firstName + " " + lastName + "\n")
                
                stat.numProfessors += 1
                stat.put()
            else:
                prof = query.get()
                prof.lastUpdate = datetime.datetime.now().date()
                if not (courseName in prof.courses):
                    prof.courses.append(courseName)
                prof.put()
                course.professor = prof.key()
                logging.error("\nupdated professor with " + courseName + " and " + firstName + " " + lastName + "\n")
                
            dept = Department().all().filter("code =", deptCode).get()
            if dept == None:
                dept = Department()
                dept.code = deptCode
                dept.put()
                
                stat.numDepartments += 1
                stat.put()
            
            course.dept = dept
            course.courseNum = courseNum   
            course.section = courseSection
            course.credits = credits
            course.prerequisites = prereq
            course.description = description
            course.times = timeObject
            course.title = courseTitle
            course.openSpots = avail
            course.location = room
            course.notes = notes
            course.put()
            
            stat.numCourses += 1
            stat.put()
            
            courseCount += 1
            num = ("%2.0f" % courseCount).replace(" ", "0")
            currentCourse = innerSoup.findAll('a', id="ctl00_MainContent_rClassList_ctl" + num + "_lbShowCourseDetail")
            
## Scraper for UM professor ratings. Scrapes from myUM website.
#  @param username Username string value for logging into the myUM website.
#  @param password Password string value for logging into the myUM website.
#  @param letter String value for first letter of last name to scrape.
def scrapeUMRatings(username, password, letter):
    stat = Statistics().all().get()
    logging.error("Starting myUM Ratings Scrape for letter: " + letter)
    if stat == None: stat = Statistics()    
    
    urlLogin = 'https://caneid.miami.edu/cas/login?service=https://myum.miami.edu/idcheck.asp'
    urlFacultyMain = 'https://myum.miami.edu/Student/facinfo/fc_evals.asp'
    urlFacultySearch = 'https://myum.miami.edu/Student/facinfo/fc_evals2.asp'
  
    #open the login page to store the cookie
    cj = cookielib.CookieJar()
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
    if(opener == None):
        return
    resp = opener.open(urlLogin)
    
    #get the lt value which is needed to login
    mainSoup = BeautifulSoup(resp)
    key = mainSoup.findAll('input', attrs={'name' : 'lt', 'type' : 'hidden'})
    lt = 0
    for i in key:
        lt = i['value']
    loginValues = { 
        'username' : username,
        'password' : password,
        'Login' : 'Submit',
        'lt' : lt
    }
    
    data = urllib.urlencode(loginValues)
    req = urllib2.Request(urlLogin, data)
    response = opener.open(req)
    confirmLogin = response.read()
    
    #click the login link to process
    mainSoup = BeautifulSoup(confirmLogin)
    login = mainSoup.findAll('span', attrs={'class' : 'detail'})
    a = 0
    for j in login:
        a = j.find('a')['href']
    opener.open(a)
    
    #navigate to the faculty evaluation page
    openFaculty = opener.open(urlFacultyMain)
    mainSoup = BeautifulSoup(openFaculty.read())
    classes = mainSoup.findAll('option');
    classCodes = []
    allCourses = {}
    for b in classes:
        if(len(b['value']) == 3 and b['value'][:1] == letter):
            department = re.search(r"[^(]+", b.text).group()
            allCourses[department] = b['value']
            #print allCourses["Accounting "]
            # classCodes.append(b['value'])
                  
    terms = [1,2]
    for depart,c in allCourses.items():
        for d in terms:
            #print c , " " , d
            courseValues = {'Department' : c, 'Term' : d, 'CrsNum' : ''}
            encodeCourseValues = urllib.urlencode(courseValues) 
            req = urllib2.Request(urlFacultySearch, encodeCourseValues)
            response = opener.open(req)
            innerSoup = BeautifulSoup(response)
            links = innerSoup.findAll('a', attrs={'href' : '#'})
            for i in links:
                  
                parseThis = i['onclick']
                print parseThis
                #get the course
                course = re.search(r'[A-Z]{3}[0-9]{3}', parseThis).group()
                logging.error('\n' + course + '\n')
                #parse the first initial and last name
                fullName = re.search(r'Instr=[^&]*', parseThis).group()
                lastName = fullName[fullName.find("+"):]
                lastName = lastName.replace("2D", "-")[1:]
                lastName = lastName.replace("%", "")
                lastName = lastName.replace("+", " ")
                firstName = re.search(r'=[^%]*', fullName).group()[1:]
                print firstName + " " + lastName
                valueRatings = []
                #there can be up to 16 questions
                for ind in range(0, 15):
                    currentRating = parseRating(ind + 1, parseThis)
                    #if both strongly agree and neutral are zero, then there is no rating
                    if (currentRating[0] == 0 and currentRating[1] == 0):
                        valueRatings.append(float(-1))
                    else :
                        valueRatings.append(float(currentRating[0]))
                #query the datastore to find a match
                firstName = firstName.upper()
                lastName = lastName[0].upper() + lastName[1:]
                query = db.GqlQuery("SELECT * FROM Professor WHERE firstName='" + firstName + "' AND lastName='" + lastName + "' LIMIT 1")
                #if there is no match, insert a new professor
                if(query.count() == 0) :
                    prof = Professor(lastUpdate = datetime.datetime.now().date(), canecalendarRating = 0)
                    prof.firstName = firstName
                    prof.lastName = lastName
                    prof.myumRatings = valueRatings
                    prof.courses = [course]
                    prof.myumNumber = 1
                    prof.department = depart
                    prof.put()
                    
                    stat.numProfessors += 1
                    stat.put()
                    
                #since there is a match, update the existing record
                else:
                    prof = query.get()
                    prof.lastUpdate = datetime.datetime.now().date()
                    numRatings = prof.myumNumber
                    logging.error(prof.lastName + "\n")
                    if len(prof.myumRatings) == 0:
                        prof.myumRatings = valueRatings
                    else :
                        for i in range(0, 15):
                            if (prof.myumRatings[i] == None or valueRatings[i] == None):
                                pass
                            else:
                                prof.myumRatings[i] = round(float(((prof.myumRatings[i] * numRatings) + valueRatings[i]) / (numRatings + 1)), 2)
                    if(prof.myumNumber == None):
                        prof.myumNumber = 1
                    else:
                        prof.myumNumber += 1
                    if not (course in prof.courses):
                        prof.courses.append(course)
                    prof.department = depart
                    prof.put()        

###############################################################################
## Rating parser to convert word ratings into integers.
#  @param num Number value within string.
#  @param parseString String to parse.
def parseRating(num, parseString):
    result = []

    #strongly agree ratings
    result.append(int(re.search('Y'+str(num)+'=[^&]*', parseString).group()[2+len(str(num)):]))
  
    #neutral ratings
    result.append(int(re.search('N'+str(num)+'=[^&]*', parseString).group()[2+len(str(num)):]))
    
    #disagree ratings
    result.append(int(100 - result[0] - result[1]))
    
    return result
############################################################################### 
###############################################################################
                  
## Scraper for RateMyProfessor.com ratings.
#  @param letter String value for first letter of last name to scrape.
def scrapeRMP(letter):
    stat = Statistics().all().get()
    if stat == None: stat = Statistics()    
    
    alphabet = map(chr, range(65, 91))
    
    #for each letter in the alphabet, open the University of Miami page 
    mainUrl = urlopen("http://www.ratemyprofessors.com/SelectTeacher.jsp?the_dept=All&orderby=TLName&sid=1241&letter=" + letter)
    #mainUrl = urlfetch.fetch("http://www.ratemyprofessors.com/SelectTeacher.jsp?the_dept=All&orderby=TLName&sid=1241&letter=" + letter, deadline=10)
    mainSoup = BeautifulSoup(mainUrl)
    #grab all the professor names for that letter
    professorNames = mainSoup.findAll('div', attrs={'class' : 'profName'})
    #####################################
    #professorNames = [professorNames[0]]  #takes only the first in the alphabet
    #####################################
    #for each professor by a specific letter
    for eachProf in professorNames:
        profLink = eachProf.find('a')['href']
        profLink = profLink.strip()
        #if there are ratings entered
        if profLink.startswith("ShowRatings"):
            #go to the rating page
            print profLink
            currentProf = urlopen("http://www.ratemyprofessors.com/" + profLink)
            eachPageSoup = BeautifulSoup(currentProf)
            #get the name of the professor
            name = eachPageSoup.find('h2', id="profName")
            if name == None:
                continue
            name = name.text.split("&nbsp;")
            
            #parse the relevant details from the webpage
            fName = name[0]
            if(len(fName) > 1):
                fName = fName[0]
            lName = name[1]
            lName = lName.replace("'", "")
            fName = fName.upper();
            lName = lName[0].upper() + lName[1:]
            helpfulness = float(eachPageSoup.find('li', id='helpfulness').find('strong').text)
            clarity = float(eachPageSoup.find('li', id='clarity').find('strong').text)
            easiness = float(eachPageSoup.find('li', id='easiness').find('strong').text)
            numRatings = int(eachPageSoup.find('p', id='rateNumber').find('strong').text)
            #query the datastore to find a match
            query = db.GqlQuery("SELECT * FROM Professor WHERE firstName='" + fName + "' AND lastName='" + lName + "' LIMIT 1")
            #if there is no match, insert a new professor
            logging.error("\nadded professor: " + fName + " " + lName + " to database")
            if(query.count() == 0):
                prof = Professor(lastUpdate = datetime.datetime.now().date(), canecalendarRating = 0)
                prof.firstName = fName
                prof.lastName = lName
                prof.rateMyProfessorRawRatingHelpfulness = helpfulness
                prof.rateMyProfessorRawRatingClarity = clarity
                prof.rateMyProfessorRawRatingEasiness = easiness
                prof.rateMyProfessorNumber = numRatings
                prof.put()
                
                stat.numProfessors += 1
                stat.put()
            #since there is a match, update the existing record
            else:
                prof = query.get()
                prof.lastUpdate = datetime.datetime.now().date()
                prof.rateMyProfessorRawRatingHelpfulness = helpfulness
                prof.rateMyProfessorRawRatingClarity = clarity
                prof.rateMyProfessorRawRatingEasiness = easiness
                prof.rateMyProfessorNumber = numRatings
                prof.put()
