# -*- coding: utf-8 -*-
import os
from bs4 import BeautifulSoup
import urllib.request


# Terminal Color text
class TerminalColors:
    HEADER = '\033[95m'
    OKBLUE = '\033[94m'
    OKGREEN = '\033[92m'
    WARNING = '\033[93m'
    FAIL = '\033[91m'
    ENDC = '\033[0m'
    BOLD = '\033[1m'
    UNDERLINE = '\033[4m'


# Download pdf file
def getfile(file_name, url):
    file = file_name.replace(':',' ')
    u = urllib.request.urlopen(url)
    f = open(file, 'wb')
    block_size = 8192
    while True:
        buffer = u.read(block_size)
        if not buffer:
            break
        f.write(buffer)
    f.close()
    print(TerminalColors.OKGREEN + "[Success]"+ TerminalColors.OKBLUE + " download " + TerminalColors.ENDC + file_name)


# Create target dir
def mkdir(now_path, dir_name):
    full_dir_name = os.path.join(now_path, dir_name)
    if not os.path.exists(full_dir_name):
        os.mkdir(dir_name)
        print(TerminalColors.OKGREEN + '[Success]',
              TerminalColors.OKBLUE + ' mkdir ./' + TerminalColors.ENDC + dir_name)
    else:
        # print('Dir ' + dir_name + TerminalColors.OKBLUE + ' Exists')
        pass
    os.chdir(full_dir_name)
    # print(TerminalColors.OKBLUE + 'cd ' + TerminalColors.ENDC + dir_name)

# Root url
baseUrl = 'https://ocw.mit.edu'

# url = 'https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-450-principles-of-digital-communications-i-fall-2006/'

# url2 = 'https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-035-computer-language-engineering-spring-2010/'

url = input("Please input the MIT_OCW's URL:")
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html, "lxml")
# print(soup.prettify())

nav_links = []
nav_dir = {}

course_main = soup.find('div',{'id':"left"})

# get the Course Title
course_title = course_main.find('div',{'id':'course_title'}).text.strip()
# print(course_title)

# get the Course Number
course_info = course_main.find('div',{'id':'course_info'})
MIT_Course_Number = course_info.find('h3',text='MIT Course Number').next_sibling.next_sibling.text.strip()
# print(MIT_Course_Number)

# make the course dir
dir_name = 'MIT_' + MIT_Course_Number + '_' + course_title.replace(' ','_')
mkdir(os.getcwd(), dir_name)
course_home_path = os.getcwd()


course_nav = course_main.find('nav', {'id' : 'course_nav'})
nav_items = course_nav.ul.find_all('li')
# print(nav_items)
for index, item in enumerate(nav_items):
    link = item.find('a')['href']
    full_link = baseUrl + link

    # nav_name = item.find_all('a')
    # for sub_index, sub_item in enumerate(nav_name):
    #     sub_nav_dir = str(sub_index) + '_' + nav_name.replace(' ', '_')
    #     mkdir(course_home_path, sub_nav_dir)

    nav_name = item.find('a').text.strip()
    if not nav_name:
        nav_name = item.find('a').next_sibling.next_sibling.text.strip()
    # nav_dir[nav_name] = full_link
    print(" · ", nav_name, full_link)
    # nav_links.append(full_link)

    # come in the link
    nav_dir = str(index) + '_' + nav_name.replace(' ', '_')
    mkdir(course_home_path, nav_dir)

    if nav_name == 'Lecture Notes':
        # TODO: find Tag a which contains PDF file link
        print()

    if nav_name == 'Video Lectures' or nav_name == 'Lecture Videos' or nav_name == 'Recitation Videos':
        target_html = urllib.request.urlopen(full_link)
        target_soup = BeautifulSoup(target_html, 'lxml')

        lectures_all = target_soup.find('main', {'id':'course_inner_media_gallery'})
        lectures_info = lectures_all.find_all('div', {'class':'medialisting'})

        for lecture_info in lectures_info:
            current_nav_path = os.getcwd()
            mkdir(current_nav_path, 'Transcript')

            _title = lecture_info.a['title']
            _href = baseUrl + lecture_info.a['href']
            # lectureLink.append(_href)
            print(TerminalColors.OKBLUE + 'Find: ' + TerminalColors.ENDC + _title)

            # go to the single lecture
            lecture_html = urllib.request.urlopen(_href)

            vid_playlist_url = _href + '#vid_playlist'
            playlist_page = urllib.request.urlopen(vid_playlist_url)
            playlist_soup = BeautifulSoup(playlist_page,'lxml')
            transcript_link = playlist_soup.find('a',{'class':'transcript-link'})
            pdf_url = baseUrl + transcript_link['href']
            # print(pdf_url)

            getfile(_title + '.pdf', pdf_url)
            os.chdir(current_nav_path)

    os.chdir(course_home_path)


