'''
Author: your name
Date: 2022-01-15 19:10:25
LastEditTime: 2022-01-15 23:41:36
LastEditors: Please set LastEditors
Description: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
FilePath: \\uic-crawler\\crawlerInfo.py
'''
import random
import warnings
import requests
import urllib3
import urllib.request, urllib.error 
from bs4 import BeautifulSoup
import bs4
import ispace_login as loginToken

session, html_post = loginToken.loginIspace()
# print(html_post.text)

content = html_post.text
soup = BeautifulSoup(content, 'html.parser')

header_list = ['Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36']
header_index = random.randint(0, len(header_list) - 1)
header = {
    'User-Agent': header_list[header_index]
}


'''
    Crawls web page content at a specified URL
'''
def askURL(url):
    request = urllib.request.Request(url, headers=head)

    html = ""
    
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        # test
        # print(html)
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)

    return html


'''
    get course name
    get course url
    <p class="tree_item branch">
    <a title="" href="">
'''
def getCourses():
    courses_list = soup.find_all('li', class_="type_course depth_3 contains_branch")

    courses = []
    courses_url = []

    for course in courses_list:
        course_title = course.find('a').get('title')
        course_url = course.find('a').get('href')
        courses.append(course_title)
        courses_url.append(course_url)

    # print(courses)
    # print(courses_url)
    return courses, courses_url


'''
    get event
    <td class="day text-center hasevent calendar_event_course duration_finish" data-eventtype-course="1" data-day-timestamp="1640275200">
'''
def getEvent():
    month_event_all = soup.find_all('td', class_="day text-center hasevent calendar_event_course")
    print(month_event_all)

    events_url = []
    events_time = []

    for month_event in month_event_all:
        event_url = month_event.find('a').get('href')
        event_title = month_event.find('a').get('data-title')
        events_url.append(event_url)
        events_time.append(event_title)
        
    return events_url, events_time


# save test
# saveHTML = loginToken.saveLoginHTML(html_post)


'''
    Crawler Courses URL
'''
def getDDLURL(url):
    html = session.post(url, headers=header, verify=False)
    # print(html)
    
    # save html
    # loginToken.saveLoginHTML(html)
    
    Dcontent = html.text
    Dsoup = BeautifulSoup(Dcontent, 'html.parser')
    
    # <li class="activity assign modtype_assign " id="module-265295">
    activities = Dsoup.find_all('li', class_="activity assign modtype_assign")
    # <a class="aalink" onclick="" href="
    
    activities_url = []
    
    for activity in activities:
        try:
            activity_url = activity.find('a', class_="aalink").get('href')
            # print(activity_url)
            activities_url.append(activity_url)
        except:
            pass
        
    # print(activities_url)
    return activities_url


def getDDLInfo(url):
    info_list = []
    
    html = session.post(url, headers=header, verify=False)
    
    # save html
    # loginToken.saveLoginHTML(html)
    
    Dcontent = html.text
    Dsoup = BeautifulSoup(Dcontent, 'html.parser')
    
    # course Name
    course_name = Dsoup.find('h1').contents
    course_name = ''.join(course_name)
    # print(course_name)
    info_list.append(course_name)
    
    # DDL Name
    ddl_name = Dsoup.find('h2').contents
    ddl_name = ''.join(ddl_name)
    # print(ddl_name)
    info_list.append(ddl_name)
    # print(info_list)
    
    # DDL Time
    # <table class="generaltable">
    # <td class="cell c1 lastcol" style="">
    ddl_time = Dsoup.find('table', class_="generaltable").find('td', class_="cell c1 lastcol").contents
    ddl_time = ''.join(ddl_time)
    # print(ddl_time)
    info_list.append(ddl_time)
    # print(info_list)
    return info_list
    
    
def saveDDLInfo(info_list):
    with open("Result\\data.txt", 'a') as f:
        for i in info_list:
            f.write(i)
        f.write('\n')
            
    f.close()
    

def main():
    courses, courses_url = getCourses()
    for url in courses_url:
        # print(url)
        activities_url = getDDLURL(url)
        # print(activities_url)
        if activities_url is not None:
            for aa_url in activities_url:
                # print(aa_url)
                info_list = getDDLInfo(aa_url)
                print("Success!")
                # save info_list
                saveDDLInfo(info_list)


if __name__ == "__main__":
    main()

