# coding=utf-8

import json, time, datetime, ssl, os, json
import urllib.request
from bs4 import BeautifulSoup
from lxml import etree
import shutil


weekly_list = [
    {
        'name': '编程狂人',
        'url': 'https://www.tuicool.com/mags',
    },
]

default_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
json_file = 'article.json'

ssl._create_default_https_context = ssl._create_unverified_context

def read_article_json():
    if not os.path.exists(json_file):
        return {}

    with open(json_file,'r', encoding='UTF-8') as f:
        json_text = f.read()
        if not json_text.strip():
            return {}
        article_json = json.loads(json_text)
        return article_json

def getHtml(url, headers=default_headers):
    req = urllib.request.Request(url = url , headers = headers)
    res = urllib.request.urlopen(req)
    html = res.read()
    return html

# 获取【编程狂人】周刊列表
def get_weekly_list(catalogUrl , headers=default_headers):
    emap = {}
    
    idx = catalogUrl.index("/", catalogUrl.index("//") + 2)
    # 抓取站点url
    siteUrl = catalogUrl[:idx]
    # 文字根url
    subUrl = catalogUrl[idx:]
    
    #html = html.decode('utf-8')
    html = getHtml(catalogUrl , headers)
    #soup =  BeautifulSoup(html,'lxml')
    soup =  BeautifulSoup(html,'html.parser')

    div_list = soup.find_all(name='div', class_='mag-period-item')
    for div in div_list:
        link = div.find('a', href=True)
    
        href = link.get('href')
        if href==None or href.strip()=='':
            continue
        if href.find("javascript")>-1 or href.startswith("#"):
            continue

        # e.g. /2546/89898.html, 从根路径开始
        #print(href)
        if href[0]=='/':
            href = siteUrl + href
        elif not href.startswith("http"):	# // e.g. 2546/89898.html, 从当前页面相对开始
            href = siteUrl + subUrl[0:subUrl.rindex('/')] + "/" + href;

        if not link.string:
            continue

        emap[link.string.strip()] = href

    return  emap


def get_articles(weekly_list, headers=default_headers):
    article_json = read_article_json()

    for weekly in weekly_list:
        # 获取【编程狂人】周刊列表
        link_map = get_weekly_list(weekly['url'])

        new_article_map = {}
        for link_name, link in link_map.items():
            if link_name in article_json:
                continue

            print(link_name + " => " + link)

            html = getHtml(link , headers)
            
            html = etree.HTML(html)
            # 获取每期周刊的文章列表
            nav_list = html.xpath('//ul[@class="new-nav"]/li/strong')
            section_list = html.xpath('//ol[@class="mag-article-list"]')

            nav_map = {}
            for i, nav in enumerate(nav_list):
                nav_title = nav.text
                section = section_list[i]
                article_list = section.xpath('li/h4/a[@class="title"]')
                art_list = [(art.text, art.get('href')) for art in article_list]
                nav_map[nav_title] = art_list

            new_article_map[link_name] = {'link': link, 'nav': nav_map}

    new_article_map.update(article_json)
    article_json = new_article_map
    if os.path.exists(json_file):
        shutil.copyfile(json_file,"article-"+datetime.datetime.now().strftime('%m%d_%H%M')+'.bak')
    json_str = json.dumps(article_json, ensure_ascii=False)
    with open(json_file,'w', encoding='UTF-8') as f:
        f.write(json_str)

    md_links = []
    for link_name, page in article_json.items():
        md_links.append('\n\n## [%s](%s)\n' % (link_name, page['link']))
        for nav_title, art_list in page['nav'].items():
            md_links.append('\n### %s\n' % nav_title)
            for art in art_list:
                md_links.append('- [%s](%s)' % (art[0], art[1]))

    with open('编程狂人.md','w', encoding='UTF-8') as f:
        f.write('\n'.join(md_links))

if __name__ == '__main__':
    get_articles(weekly_list)
