#!/usr/bin/env python
# -*- coding: utf-8 -*-

import re
import os, json, requests, time
from bs4 import BeautifulSoup

url = 'https://www.kanunu8.com'
urls = []

base_path = '/data/www/unitool/src/spider/'
worker_path = 'gulong/'

book_tree = {}

def get_html(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"
    }
    count = 0
    while True:
        content = None
        try:
            res = requests.get(url, headers=headers, timeout=10)
        except Exception:
            res = None
        if res and res.status_code == 200:
            try:
                content = res.content.decode('gb2312').encode('utf-8')
            except Exception:
                try:
                    content = res.content.decode('gb18030').encode('utf-8')
                except Exception:
                    content = res.content
        else:
            content = None
        if content:
            return content
        print('无法获取页面', url)
        if count >= 10:
            with open(base_path + 'error_url.log', 'a') as f:
                f.write(url)
                f.write('\n')
            count = 0
            return None
        count += 1
        time.sleep(2)


def write_file(html, file_name):
    if not os.path.exists(base_path + worker_path):
        os.mkdir(base_path + worker_path)
    file_name = file_name.replace(url, '').replace('/', '_')
    if not file_name:
        file_name = 'index.html'
    with open(base_path + worker_path + file_name, 'w') as f:
        f.write(html)


def writer():
    with open(base_path + worker_path + "index.html", 'r') as f:
        tags = BeautifulSoup(f.read()).findAll('a')
    for tag in tags:
        content = get_html(url + tag.attrs.get('href'))
        print(tag)
        write_file(content.decode('utf-8'), tag.text)
        
        
def article():
    f_list = os.listdir(base_path + worker_path)
    f_list.remove('index.html')
    comp = re.compile(r'^\d+.html')
    with open(base_path + worker_path + 'index.html', 'r') as f:
        book_list = BeautifulSoup(f.read()).findAll('a')
    for art in f_list:
        with open(base_path + worker_path + art, 'r') as f:
            tags = BeautifulSoup(f.read()).findAll('a')
        for tag in tags:
            
            if not re.search(comp, tag.attrs.get('href', '')):
                continue
            print(art, tag.text)
            for book in book_list:
                if book.text == art:
                    book_id = book.attrs.get('href').split('/')[2]
                    continue
            content = get_html("{}/book/{}/{}".format(url, book_id, tag.attrs.get('href')))
            print(art, tag.text, book)
            if not content:
                continue
            write_file(content.decode('utf-8'), 'art/art_{}_{}'.format(art, tag.text))

def error_url():
    while True:
        with open(base_path + 'error_url.log', 'r') as f:
            urls = f.readlines()
        with open(base_path + 'error_url.log', 'w') as f:
            f.write('')
        if len(urls) <= 0:
            break
        for url in urls:
            content = get_html(url)
            if not content:
                continue
            with open(base_path + 'error_url_art_{}'.format(url.replace('https://www.kanunu8.com/', '')), 'wb') as f:
                f.write(content)
        

def main():
    # writer()
    article()
    # error_url()

if __name__ == '__main__':
    main()
