from urllib.error import HTTPError, URLError
import logging
import requests
import csv
from scrapy.http import TextResponse
from collections import deque
from bs4 import BeautifulSoup
from bs4 import NavigableString
from bs4 import Tag
from urllib.parse import urlparse
from urllib import parse
import urllib
import time
import datetime
import os
import json

from a_hospital.queue import Queue


def create_logger():
    from logging.handlers import RotatingFileHandler

    logpath = os.path.normpath('logs/py_log.log')
    logdir = os.path.dirname(logpath)
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    logging.basicConfig(
        handlers=[RotatingFileHandler(filename=os.path.normpath('logs/py_log.log'),
                                      mode='a', maxBytes=5000000, backupCount=10)],
        level=logging.DEBUG,
        format="[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
        datefmt='%Y-%m-%dT%H:%M:%S')

    logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s]  %(message)s")
    rootLogger = logging.getLogger()

    consoleHandler = logging.StreamHandler()
    consoleHandler.setFormatter(logFormatter)
    rootLogger.addHandler(consoleHandler)


create_logger()


root_file_path = 'C:/Users/dliang14/Downloads/task5'
if not os.path.isdir(root_file_path): ##判断是否是目录
    logging.warning("{0} is not a directory or not exist".format(root_file_path))
    exit(-1)

for sub_dir in os.listdir(root_file_path):   ##遍历子目录
    sub_path = os.path.join(root_file_path , sub_dir)
    logging.info('sub_path {0} {1} '.format(sub_dir, sub_path))
    if not os.path.isdir(sub_path):
        continue
    sub_path_content = os.path.join(sub_path, 'content.html')
    if not os.path.exists(sub_path_content):
        continue
    if os.path.exists(os.path.join(sub_path, 'content.html.bak')):
        sub_path_content = os.path.join(sub_path, 'content.html.bak')
    logging.info('sub_content {0}'.format(sub_path_content))

    target_content = ''
    with open(sub_path_content, mode='r', encoding='utf-8') as sourceFile:
        source_content = sourceFile.read()
        soup = BeautifulSoup(source_content, 'html.parser')
        bodyContent = soup.find(id="bodyContent")
        tb = None
        toc = None
        fromlink = []
        for child in bodyContent.children:
            if type(child) is not Tag:
                continue
            logging.debug(child)
            if child.name is not None and child.name == 'table' and tb is None:
                tb = child
            if child.get('id') is not None and child.get('id') == 'toc' and toc is None:
                toc = child

            if fromlink.__len__() > 0:
                fromlink.append(child)
                continue
            if child.get('id') is not None and child.get('id') == 'fromlink':
                fromlink.append(child)

        logging.info('pop extract {0} {1} {2}'.format(tb, toc, fromlink))
        if tb is not None:
            tb.extract()
        if toc is not None:
            toc.extract()
        for ele in fromlink:
            if ele is not None:
                ele.extract()

        target_content = soup.__str__()
        logging.debug('target content  {0}'.format(target_content))

    if(os.path.join(sub_path, 'content.html.bak') != sub_path_content):
        os.rename(sub_path_content, os.path.join(sub_path, 'content.html.bak'))

    with open(sub_path_content, mode='w', encoding='utf-8') as contentFile:
        contentFile.write(target_content)


logging.info("_____________________________________________________")
logging.info("_____________________________________________________")
logging.info("slim completed")
