#!/usr/bin/env python3
#
# 将指定网页和XPATH下的所有超链接对应的页保存为文件
#

import os
import re
import sys
import argparse
import platform
import traceback
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By

def parse_args():
    comment = ("Search and crawl all hyperlinks from specific URL and XPATH")
    parser = argparse.ArgumentParser(description = comment,
                                     formatter_class = argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-u', '--url', dest = 'url',
                        action = 'store', required = True, default = '',
                        help = "Specify the URL to crawl")
    parser.add_argument('-x', '--xpath', dest = 'xpath',
                        action = 'store', default = '',
                        help = "Specify the XPATH to crawl")
    parser.add_argument('-o', '--output', dest = 'output',
                        action = 'store', default = '',
                        help = "Save the pages to the output directory")
    parser.add_argument('-s', '--suffix', dest = 'suffix',
                        action = 'store', default = '.html',
                        help = "Save the pages with the suffix name")
    parser.add_argument('-b', '--browser', dest = 'browser',
                        choices = ['chrome', 'firefox'], action = 'store',
                        default = 'chrome',
                        help = "Specify the browser")
    args = parser.parse_args()
    return args

def get_host(url):
    try:
        host = re.split(r'://|:|/', url)[1].strip()
    except:
        host = ''
    return host

def get_suffix(url):
    try:
        url = re.split(r'\?|#', url)[0]
        sp = re.split(r'://|:|/', url)
        if len(sp) < 3: return None
        if sp[-1].find('.') != -1:
            return sp[-1].split('.')[-1].strip()
        return None
    except:
        return None

def normalize_filename(filename):
    try:
        return re.sub(r'/|\\|:|\*|"|<|>|\||\?', '_', filename)
    except:
        return filename

def format_exception():
    res = ''
    exc_type, exc_value, exc_traceback = sys.exc_info()
    estr = traceback.format_exception(exc_type, exc_value, exc_traceback)
    for s in estr:
        res += s
    return res

class HyperlinkCrawler:
    def __init__(self, browser):
        self.__init_driver(browser)

    def __init_driver(self, browser):
        # 注意浏览器的webdriver驱动的版本要与系统安装的浏览器版本匹配
        # 如果版本不匹配，find_element返回的不是web element对象
        if not browser is None and browser.lower() == 'firefox':
            self.__init_firefox()
        else:
            self.__init_chrome()

    def __init_chrome(self):
        if platform.system() == 'Linux':
            opt = webdriver.ChromeOptions()
            opt.headless = True
            self.driver = webdriver.Chrome(options = opt)
        else:
            # 在Windows下如果隐藏窗口，会报Content Security Policy错误
            self.driver = webdriver.Chrome()

    def __init_firefox(self):
        self.driver = webdriver.Firefox()

    def __del__(self):
        self.driver.close()

    def __save_page(self, url, filename, path, suffix):
        try:
            if filename is None or len(filename) == 0:
                self.driver.get(url)
                filename = self.driver.title
            if not (sx := get_suffix(url)) is None and len(sx) > 0:
                filename += '.' + sx
            else:
                filename += suffix
            filename = normalize_filename(filename.strip())
            filename = path + filename
            print('Save page "{}" as file "{}"'.format(url, filename))
            content = requests.get(url).content
            with open(filename, 'wb') as f:
                f.write(content)
        except:
            print('Save page "{}" as file "{}" failed:\n{}'
                  .format(url, filename, format_exception()))

    def crawl_pages(self, url, xpath, output, suffix):
        print('Crawling all hyperlinks from URL "{}", XPATH "{}" ...'
              .format(url, xpath))
        try:
            self.driver.get(url)
            hs = []
            if not xpath is None and len(xpath) > 0:
                es = self.driver.find_elements(By.XPATH, xpath)
                for e in es:
                    hs += e.find_elements(By.TAG_NAME, 'a')
            else:
                hs = self.driver.find_elements(By.TAG_NAME, 'a')
            ps = []
            for h in hs:
                if len(h.text) == 0: continue
                ps.append({'text': h.text, 'href': h.get_attribute('href')})
            if (pn := len(ps)) == 0:
                print('No found hyperlink in URL "{}", XPATH "{}"'
                      .format(url, xpath))
                return
            else:
                print('Found {} hyperlinks in URL "{}", XPATH "{}"'
                      .format(pn, url, xpath))
            if output is None or len(output) == 0:
                output = './'
            elif not output.endswith('/'):
                output += '/'
            path = output
            path += get_host(self.driver.current_url)
            if not path.endswith('/'):
                path += '/'
            path += normalize_filename(self.driver.title.strip())
            if not path.endswith('/'):
                path += '/'
            if not os.path.exists(path):
                os.makedirs(path)
            self.__save_page(url, self.driver.title, path, suffix)
            for p in ps:
                self.__save_page(p['href'], p['text'], path, suffix)
        except:
            print('Crawled all hyperlinks from URL "{}", XPATH "{}" '
                  'failed:\n{}'
                  .format(url, xpath, format_exception()))
        finally:
            print('Crawled all hyperlinks from URL "{}", XPATH "{}" finished'
                  .format(url, xpath))

args = parse_args()
crawler = HyperlinkCrawler(args.browser)
crawler.crawl_pages(args.url, args.xpath, args.output, args.suffix)
