#-*- coding:utf-8 -*-

import os

import requests

from bs4 import BeautifulSoup as bs

home_url = "https://bp.pep.com.cn/jc/"
root = "./src"

def get(url):
    html = requests.get(url)
    html.encoding = 'utf-8'
    soup = bs(html.text, 'lxml')
    return soup

def work_field(field, path):
    name = field.h5.string
    path = "/".join([path, name])
    if not os.path.exists(path):
        os.mkdir(path)
    print("  ->", name)
    categories = field.find_all('ul')
    for category in categories:
        for group in category.find_all('li'):
            work_group(group.a, path)

def work_group(group, path):
    name = group.string
    path = "/".join([path, name])
    if not os.path.exists(path):
        os.mkdir(path)
    url = group['href']
    url = url.replace('./', home_url)
    bookpage = get(url)
    print("    ->", bookpage.title.string)
    books = bookpage.ul.find_all('li')
    for book in  books:
        work_book(book, path, url)

def work_book(book, path, pre_url):
    name = book.h6.string
    path = "".join([path, "/", name, ".pdf"])
    print("      ->", path)
    if os.path.exists(path):
        print("      -> SKIP")
        return
    url = book.find(attrs={'class':'btn_type_dl'})['href']
    url = url.replace('./', pre_url)
    pdf = requests.get(url)
    with open(path, 'wb') as f:
        f.write(pdf.content)

if __name__ == '__main__':
    home = get(home_url)
    print(home.title.string)
    fields = home.find_all(attrs={'class':'list_sjzl_jcdzs2020'})
    if not os.path.exists(root):
        os.mkdir(root)
    for field in fields:
        work_field(field, root)
