# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import urllib2
import codecs
import os
from time import gmtime, strftime

class CourseScraper:
    def __init__(self,depart_num,hug_num,year = '2015'):
        self.hug_num = hug_num
        self.year = year
        self.depart_num = depart_num
        data_str = "MfcISAPICommand=but&year="+ self.year
        for i in range(14):
            if (i+1).__str__()==self.depart_num:
                data_str = data_str + "&department" + (i + 1).__str__()  +"=" + self.hug_num
            else:
                data_str = data_str + "&department" + (i + 1).__str__()  +"=" 
        data_str = data_str + "&course_nam=&teach_nam="
        request = urllib2.Request("http://yedion.tau.ac.il/yed/yednew.dll",data = data_str)
        f = urllib2.urlopen(request)
        htmlSource = f.read()
        self.soup = BeautifulSoup(htmlSource)
    
    def __analyze_zamak(self,courses):
        res = []
        for course_stats in courses:
            lect = " "
            l = []
            cnum = course_stats[0].find_all("th")[1].string.replace('-',"")[:8]
            gnum = course_stats[0].find_all("th")[1].string.replace('-',"")[-2:]
            for c in course_stats[1:]:
                tag_list = c.find_all("td")
                if len(tag_list) >5:
                    stats = [x.string for x in tag_list]
                    
                    sem = stats[0]
                    if u"א" in sem:
                        sem = self.year +"1"
                    elif u"ב" in sem:
                        sem = self.year +"2"
                    else:
                        sem = " "
                        
                    if stats[1] is None or "-" not in stats[1]:
                        me = " "
                        ad = " "
                    else: 
                        me = stats[1][-4:]
                        ad = stats[1][:4]
                         
                    if stats[2] is None:
                        yom = " "
                    else:
                        yom = stats[2][0]
                         
                    if stats[3] is None:
                        heder = " "
                    else: heder = stats[3]
                         
                    if stats[4] is None:
                        building = " "
                         
                    else: building = stats[4]
                     
                    if stats[5] is None:
                        typey = " "
                    else: typey = stats[5]
                    
                    l.append({"cnum":cnum, "gnum":gnum,"sem":sem,"me":me, "ad":ad, "yom":yom,"heder":heder, "building": building, "typey":typey})
                      


                i = True
                tmp = c.find("td", colspan = "2")
                if tmp is None:
                    tmp = c.find("td", colspan = "8")
                    
                if tmp is not None and not tmp.string == " " :
                    tmp = tmp.string.split(" ")
                    try:
                        tmp.remove(u"מר")
                        tmp = u"מר" + " "+" ".join(tmp[1:]) +" "+ tmp[0]
                        i = False
                    except ValueError:
                        pass
                    
                    if i:
                        try:
                            tmp.remove(u"פרופ")
                            tmp = u"פרופ" + " "+" ".join(tmp[1:]) +" "+ tmp[0]
                            i = False
                        except ValueError:
                            pass
                    
                    if i: 
                        try:
                            tmp.remove(u"גב'")
                            tmp = u"גב'" + " "+ " ".join(tmp[1:]) +" "+ tmp[0]
                            i = False
                        except ValueError:
                            pass
                    if i:
                        try:
                            tmp.remove(u'ד"ר')
                            tmp = u'ד"ר' + " "+" ".join(tmp[1:]) + " "+ tmp[0]
                            i = False
                        except ValueError:
                            pass
                    if i:
                        tmp = " ".join(tmp)
                    if lect == " ":
                        lect  = tmp
                    else:
                        if tmp not in lect:
                            lect = lect + " + " + tmp
                    
            for x in l: x["lect"]=lect
            res.extend(l)
        return res
       
    def scrape_zamak(self):
        rows = self.soup.find_all("tr")
        course = []
        courses = []
        counter = 0
        curr_name_tag = ""
        for row in rows[1:]:
            
            if counter == 3 and row.get("bgcolor") == "#eceaeb" and row.get("align") == "right" and row.get("dir") == "rtl":
                counter = 0
                courses.append ([curr_name_tag] + course)
                course=[]
            elif row.get("bgcolor") == "#eceaeb" and row.get("align") == "right" and counter == 0:
                counter = 1
            elif counter  == 1:
                counter = 2
                curr_name_tag = row
            elif row.get("bgcolor") == "#eceaeb" and row.get("align") == "right" and counter == 2:
                counter = 3
            elif row.get("dir") == "rtl" and row.get("align") == "right" and counter == 3:
                course.append(row)
        
        return self.__analyze_zamak(courses)
    
    def scrape_courses(self):
        courses_with_video = []
        f = os.path.join(os.getcwd(), 'DB\\Data_Dumps\\TAU\\')
        with codecs.open(f + 'videos.txt', 'r',encoding = 'utf-8') as file:
            for line in file:
                courses_with_video.append(line[:8])
                 
        rows = self.soup.find_all("tr")
        courses = []
        counter = 0
        type_course = None
        sem = "-1"
        typey = " "
        course_num = " "
        group_num = " "
        course_name = " "
        main_or_not = " "
        fac = " "
        hug = " "
        curr_course = " "
        havura = ' '
        for row in rows[1:]:

            if row.get("bgcolor") == "#eceaeb" and row.get("align") == "right" and row.get("dir") == "rtl":
                counter = 0
                if course_num in courses_with_video:
                    vid = u"כן"
                else:
                    vid = u"לא"
                courses.append({"cnum": course_num,"gnum": group_num,"sem":sem,"name": course_name,"fac": fac,"hug": hug,"type":main_or_not,"ofen":typey,"video":vid,"havura":havura})
                sem = "-1"
                typey = " "
                curr_course = course_num
            elif row.get("bgcolor") == "#eceaeb" and row.get("align") == "right" and counter == 0:
                counter = 1
            elif counter  == 1:
                counter = 2
                course_name = row.th.div.string[1:]
                cour = row.find_all("th")[1].string.split("-")
                course_num = cour[0] + cour[1]
                group_num = cour[2] 
            elif counter == 2:
                counter = 3
                fachug = row.th.string.split(u"/")
                fac = fachug[0][1:].strip()
                try:hug = fachug[1].strip()
                except IndexError: 
                    hug = u"כללי" + " "+ str(cour[0])
                if hug == u"ניהול":
                    hug = hug  + " " +  str(cour[0])
            elif row.get("bgcolor") == "#eceaeb" and row.get("align") == "right" and counter == 3:
                counter = 4
            elif row.get("dir") == "rtl" and row.get("align") == "right" and counter == 4:
                try:
                    typey = row.find_all('td')[5].string
                    if course_num == curr_course:
                    
                        if type_course == typey:
                            main_or_not = u"ראשית"
                            havura = chr(ord(havura) + 1)
                        else:
                            main_or_not = u"משנית"
                    else:
                        type_course = typey
                        main_or_not = u"ראשית"
                        havura = "A"
                    counter = 5

                except IndexError:
                    pass
                    
            if row.get("dir") == "rtl" and row.get("align") == "right" and  not counter == 0:
                td = row.td
                if td.get("colspan") != "8":
                    if u"א" in td.string and sem == "-1":
                        sem = self.year + "1"
                    elif u"ב" in td.string and sem == "-1":
                        sem = self.year + "2"
                    elif u"א" in td.string and sem == self.year + "2":
                        sem = self.year + "0"
                    elif u"ב" in td.string and sem == self.year + "1":
                        sem = self.year + "0"
        
        return courses
    
        
    def scrape_test(self):
        rows = self.soup.find_all("tr")
        sem = "-1"
        course_num = " "
        group_num = " "
        date1= " "
        date2= " "
        hour1= " "
        hour2= " "
        tests = []
        counter = 0
        for row in rows[1:]:

            if row.get("bgcolor") == "#eceaeb" and row.get("align") == "right" and row.get("dir") == "rtl":
                counter = 0
                test = row.td.find("a", text = u"בחינה")
                if test is not None and sem != "-1":
                    test_url = "http://yedion.tau.ac.il/yed/" +  test["href"]
                    
                    course_full_num = test["href"].split('&')[2].split('=')[1]
                    
                    request = urllib2.Request(test_url)
                    f = urllib2.urlopen(request)
                    htmlSource = f.read()
                    soup = BeautifulSoup(htmlSource)
                    table_test = soup.find("table", bordercolor="blue")
            
                    date1= " "
                    date2= " "
                    hour1= " "
                    hour2= " "
                    
                    table_list = table_test.find_all("td")
            
                    
                    if len(table_list) > 5:
                        hour1 = table_list[0].string.replace(":","")
                        date1 = table_list[2].string.replace(".","/")
                        if len(table_list) < 7:
                            print course_full_num
                            hour1= " "
                            date1 = table_list[0].string.replace(".","/")
                            hour2 = table_list[2].string.replace(":","")
                            date2 = table_list[4].string.replace(".","/")
                        else:
                            hour2 = table_list[4].string.replace(":","")
                            date2 = table_list[6].string.replace(".","/")
                    elif len(table_list) < 3:
                        print course_full_num
                        date1 = table_list[0].string.replace(".","/") 
                    else:
                        date1 = table_list[0].string.replace(".","/")
                        date2 = table_list[2].string.replace(".","/")


                
                    
                    if len(hour1) < 4:
                        hour1 = " "
                    if len(hour2) < 4:
                        hour2 = " "
                    
                    if len(date1.split("/")) == 3:
                        temp = date1.split("/")
                        temp[2] = "20" + temp[2]
                        date1 = "/".join(temp)
                    if len(date2.split("/")) == 3:
                        temp = date2.split("/")
                        temp[2] = "20" + temp[2]
                        date2 = "/".join(temp)
                    tests.append({"cnum": course_full_num[:-2],"gnum": course_full_num[-2:],"sem":sem,"date1": date1,"date2":date2,"hour1": hour1 ,"hour2":hour2})
                    
                
                sem = "-1"
            elif row.get("bgcolor") == "#eceaeb" and row.get("align") == "right" and counter == 0:
                counter = 1
            elif row.get("dir") == "rtl" and row.get("align") == "right" and  not counter == 0:
                td = row.td
                if td.get("colspan") != "8":
                    if u"א" in td.string and sem == "-1":
                        sem = self.year + "1"
                    elif u"ב" in td.string and sem == "-1":
                        sem = self.year + "2"
                    elif u"א" in td.string and sem == self.year + "2":
                        sem = self.year + "0"
                    elif u"ב" in td.string and sem == self.year + "1":
                        sem = self.year + "0"
        return tests
        
        
            
             
             
if __name__ == '__main__':
    
    facs = [("1","08"),("2","05"),("3","10"),("4","04"),("5","06"),("6","03"),("7","14"),("8","12"),("9","01"),("10","11"),("10","07"),("10","09"),("10","15"),("11","2171"),("11","2172"),("12","18801882"),("13","1843")]
    #facs = [("12","18801882")]
            
    courses_path = os.path.join(os.path.split(__file__)[0], "Data_Dumps\\TAU\\2015\\KR_Kvutza.txt")
    zamak_path = os.path.join(os.path.split(__file__)[0], "Data_Dumps\\TAU\\2015\\KR_Kvutza_Zamak.txt")
    test_path = os.path.join(os.path.split(__file__)[0], "Data_Dumps\\TAU\\2015\\KR_Kvutza_Moed.txt")
    
    fcourses = open(courses_path,"w")
    fzamak = open(zamak_path,"w")
    ftests = open(test_path,"w")
    fcourses.write("K_COURSE_NUM~K_KVUTZA_NUM~K_KVUTZA_SEM_YEAR~MISHKAL~COURSE_NAME~FACULTY_NAME~HUG_NAME~KVUTZA_TYPE~OFEN_HORAA~HAVURA~COURSE_LANGUAGE~MATALOT~MATCONET_KVUTZA~VIDEO\n")
    fzamak.write("K_COURSE_NUM~K_KVUTZA_NUM~K_KVUTZA_SEM_YEAR~YOM~ME_SHAA~AD_SHAA~LECTURE_NAME~BINYAN~CHEDER\n")
    ftests.write('K_COURSE_NUM~K_KVUTZA_NUM~K_KVUTZA_SEM_YEAR~K_MOED~K_MOED_TYPE~ME_SHAA~AD_SHAA~MOED_DATE\n')
    
    print strftime("%X", gmtime())
    for fac in facs:
        
        print fac
        c = CourseScraper(fac[0], fac[1], '2015')
        courses = c.scrape_courses() 
        curr_course = ""
        for x in courses:
            if curr_course != x["cnum"]:
                x["havura"] = "A"
                curr_sem = x["sem"]
                curr_hav = "A"
                curr_course = x["cnum"]
            elif x["type"] == u"משנית":
                x["havura"] = curr_hav
            elif curr_sem == x["sem"]:
                curr_hav = chr(ord(curr_hav)+1)
                x["havura"] = curr_hav
            else:
                curr_hav = "A"
                x["havura"] = curr_hav
                curr_sem = x["sem"]
        for x in courses:
            s = x["cnum"] + "~" + x["gnum"] + "~" +x["sem"] + "~" + "0" + "~" +x["name"] + "~" +x["fac"] + "~" +x["hug"].replace(";touq&",'"') + "~" +x["type"] + "~" +x["ofen"] + u"~"+x["havura"] +"~ ~ ~ ~" +x["video"] +"\n"
            fcourses.write(s.encode('utf8'))
        for x in c.scrape_zamak():
            s = x["cnum"] + "~" + x["gnum"] + "~" + x["sem"] + "~" + x["yom"] + "~" + x["me"] + "~" + x["ad"] + "~" + x["lect"] + "~" + x["building"] + "~" + x["heder"] +"\n"
            fzamak.write(s.encode('utf8'))
            
        for x in c.scrape_test():
            s = x["cnum"] + "~" + x["gnum"] + "~" + x["sem"] + "~" +u"א"+ "~" + u"בחינה סופית" + "~" + x["hour1"] + "~" + " " + "~" + x["date1"] +"\n" + x["cnum"] + "~" + x["gnum"] + "~" + x["sem"] + "~" +u"ב"+ "~" + u"בחינה סופית" + "~" + x["hour2"] + "~" + " " + "~" + x["date2"] +"\n"
            ftests.write(s.encode('utf8'))
             
    print strftime("%X", gmtime())
    fzamak.close()
    fcourses.close()
    fcourses.close()
    ftests.close()

        
        
            
        
                    
          
            
