from urllib.error import HTTPError, URLError
import logging
import requests
import csv
from scrapy.http import TextResponse
from collections import deque
from bs4 import BeautifulSoup
from bs4 import NavigableString
from urllib.parse import urlparse
from urllib import parse
import urllib
import time
import datetime
import os
import json

from a_hospital.queue import Queue


def create_logger():
    from logging.handlers import RotatingFileHandler

    logpath = os.path.normpath('logs/py_log.log')
    logdir = os.path.dirname(logpath)
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    logging.basicConfig(
        handlers=[RotatingFileHandler(filename=os.path.normpath('logs/py_log.log'),
                                      mode='a', maxBytes=5000000, backupCount=10)],
        level=logging.DEBUG,
        format="[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
        datefmt='%Y-%m-%dT%H:%M:%S')

    logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s]  %(message)s")
    rootLogger = logging.getLogger()

    consoleHandler = logging.StreamHandler()
    consoleHandler.setFormatter(logFormatter)
    rootLogger.addHandler(consoleHandler)


create_logger()

queueInstance = Queue()

host = 'http://www.a-hospital.com'
# dict = {'/w/%E9%A6%96%E9%A1%B5'.lower().__hash__() : '/w/%E9%A6%96%E9%A1%B5'.lower()}
source_dict = {}

now = int(time.time())
timeStruct = time.localtime(now)
strTime = time.strftime('%Y%m%d%H%M%S', timeStruct)

root_dir = 'task_' + strTime
if not os.path.exists(root_dir):
    os.mkdir(root_dir)

with open('test.csv',encoding="utf-8", mode='r') as csvFile:
    readCSV = csv.reader(csvFile)
    # skip header
    next(readCSV)

    for row in readCSV:
        cat1 = row[0]
        cat2 = row[1]
        url = row[2]
        name = urlparse(url).path.lstrip('/w/')
        url = url.replace(name, urllib.parse.quote(name))
        source_dict[name] = {'Category1':cat1, 'Category2':cat2, 'url':url,'name':name}
        queueInstance.enqueue(name)

# queueInstance.enqueue('/w/%E9%A6%96%E9%A1%B5'.lower().__hash__())

REMOVE_ATTRIBUTES = [
    'lang','language','onmouseover','onmouseout','script','style','font',
    'dir','face','size','color','style','class','width','height','hspace',
    'border','valign','align','background','bgcolor','text','link','vlink',
    'alink','cellpadding','cellspacing']

def save_raw_files(soup, key, row):
    # 1. create folder by key
    # 2. save raw html content
    # 3. slim html content to useful segments
    bodyContent = soup.find(id="bodyContent")

    for attribute in REMOVE_ATTRIBUTES:
        for tag in bodyContent.find_all(attrs={attribute: True}):
            del tag[attribute]

    # remove all scripts
    for x in bodyContent.findAll('script'):
        x.extract()

    # remove all iframes
    for x in bodyContent.findAll('iframe'):
        x.extract()

    # remove useless divs
    for x in bodyContent.findAll(id="p-views"):
        x.extract()
    for x in bodyContent.findAll(id="List_link_0"):
        x.extract()
    for x in bodyContent.findAll(id="List_link_1"):
        x.extract()
    for x in bodyContent.findAll(id="contentSub"):
        x.extract()
    for x in bodyContent.findAll(id="jump-to-nav"):
        x.extract()
    # remove all a tags
    for x in bodyContent.findAll('a'):
        # new_string = NavigableString(x.text)
        x.unwrap()

    key2 = key.replace('/','_')
    cur_dir = os.path.join(root_dir, key2)

    if not os.path.exists(cur_dir):
        os.mkdir(cur_dir)

    # 4. get important imgs from important segments
    all_imgs = bodyContent.find_all('img')
    for img in all_imgs:
        if img.get('src') is None:
            continue

        src = img.get('src')
        imgurl = urlparse(src)

        if imgurl.hostname is None:
            src = parse.urljoin(host, imgurl.geturl())

        # hash file name + extension, set img.attr = localpath
        filename = 'img' + src.__hash__().__str__() + os.path.splitext(imgurl.path)[1]
        req = urllib.request.Request(src)

        try:
            response = urllib.request.urlopen(req)  # 得到访问的网址
            with open(os.path.join(cur_dir, filename), "wb") as f:
                content = response.read()  # 获得图片
                f.write(content)  # 保存图片
                response.close()
                img['src'] = filename
        except HTTPError as e:  # HTTP响应异常处理
            logging.error(e.reason)
        except URLError as e:  # 一定要放到HTTPError之后，因为它包含了前者
            logging.error(e.reason)

        pass

    slim_content = bodyContent.prettify()
    logging.info(slim_content)


    with open(os.path.join(cur_dir, 'metadata.json'), mode='w', encoding='utf-8') as jsonFile:
        jsonFile.write(json.dumps(row, ensure_ascii=False))
    with open(os.path.join(cur_dir, 'content.html'), mode='w', encoding='utf-8') as contentFile:
        contentFile.write(slim_content)
    pass
    # finish


while queueInstance.size() > 0:
    key = queueInstance.dequeue()

    if source_dict.get(key) is None:
        continue

    row = source_dict.get(key)

    logging.info('current: ' + row['name'] + ' , ' + row['url'])
    url = row['url']

    res = requests.get(url)

    if res.status_code != 200:
        logging.warn("get url "+url+" error with code "+res.status_code.__str__())
        continue

    response = TextResponse(res.url, body=res.text, encoding='utf-8')

    soup = BeautifulSoup(response.text, 'html.parser')
    # save raw files
    save_raw_files(soup, key, row)

    logging.info(soup.prettify())

    # don't recurse
    # all_a_links = soup.find_all('a')
    #
    # for a_link in all_a_links:
    #     tmp_href = a_link.get('href')
    #     if tmp_href is None:
    #         continue
    #
    #     tmp_href_str = str(tmp_href).lower()
    #
    #     parsed_result = urlparse(tmp_href_str)
    #     print(tmp_href_str)
    #     url_path = parsed_result.path
    #
    #     # only http://****/w/ prefix path should be grabbed
    #     if not url_path or not url_path.startswith('/w/'):
    #         continue
    #
    #     # not exists than enqueue
    #     if not dict.__contains__(url_path.__hash__()):
    #         queueInstance.enqueue(url_path.__hash__())
    #         dict[url_path.__hash__()] = url_path

    time.sleep(1.2)


logging.info("_____________________________________________________")
logging.info("_____________________________________________________")
logging.info("test completed")
