from selenium import webdriver
from lxml import etree
from urllib import parse
import time
import re
import urllib.robotparser
from collections import deque


class Dynamic_crawl_slm:

    def __init__(self, name, start_url, domain, concrete_domain=None):
        self.name = name
        self.start_url = start_url
        self.domain = domain
        if concrete_domain:
            self.concrete_domain = concrete_domain
        else:
            self.concrete_domain = self.domain
        self.urlpool = [start_url]
        self.urlqueue = deque()
        self.urlqueue.append(start_url)
        self.urlgot = set()
        self.browser = None
        self.crawl_url = None
        self.html = None
        self.robotstxt = None

    def parse_robotstxt(self):
        try:
            rp = urllib.robotparser.RobotFileParser()
            url_split = urllib.parse.urlsplit(self.start_url)
            rp.set_url(urllib.parse.urlunsplit((url_split[0], url_split[1], 'robots.txt', '', '')))
            self.robotstxt = rp.read()
            print('finish parse robots.txt!')
            return True
        except:
            return False

    def browser_init(self, headless=False, nopic=True, proxy=False, Linuxenv=False, *chromeoption):
        options = webdriver.ChromeOptions()
        if Linuxenv:
            headless = True
        if headless:
            options.add_argument('--headless')
        if proxy:
            options.add_argument('--proxy-server=http://10.10.50.20:1083')
        if nopic:
            options.add_experimental_option('prefs', {'profile.default_content_setting_values': {'images': 2}})
        if chromeoption:
            for opt in chromeoption:
                options.add_argument(opt)
        if Linuxenv:
            self.browser = webdriver.Chrome('./chromedriver', options=options)
        else:
            self.browser = webdriver.Chrome(options=options)
        return True

    def check_page(self, elexpath='//a', movetime=5):
        '''检查页面是否完成加载'''
        self.browser.implicitly_wait(5)
        mt = 0
        while mt < movetime:
            try:
                self.browser.execute_script("var q=document.documentElement.scrollTop=100000")
            except:
                pass
            mt += 1
        try:
            if self.browser.find_element_by_xpath(elexpath):
                self.html = self.browser.page_source
                return True
        except:
            return False

    def get_url(self, urlxpath='//@href'):
        if self.html:
            num = 0
            for url in etree.HTML(self.html).xpath(urlxpath):
                try:
                    parseurl = parse.urljoin(self.start_url, url)
                except:
                    continue
                if self.domain not in parseurl:
                    continue
                if re.search(
                        r'\.jpg$|\.png$|\.css$|\.svg$|\.json$|facebook|instagram|twitter|m.weibo.cn|service.weibo.com|passport|qq.com|linkedin',
                        parseurl.lower()):
                    continue
                if parseurl in self.urlgot or parseurl in self.urlqueue:
                    continue
                else:
                    if self.concrete_domain in parseurl:
                        self.urlqueue.append(parseurl)
                    else:
                        self.urlqueue.appendleft(parseurl)
                    num += 1
            print('add {} url'.format(num))
            return True
        else:
            return False

    def parse_html(self, parsexpath='//text()', savefile=None, encoding='utf-8'):
        if not savefile:
            savefile = self.name + '.txt'
        if self.html:
            for text in etree.HTML(self.html).xpath(parsexpath):
                if text.strip():
                    with open(savefile, 'a', encoding=encoding) as wf:
                        wf.write(text.strip() + '\n')
            print('parse done')
            return True
        else:
            return False

    def crawl(self, urlxpath, parsexpath, elexpath, sleeptime=0):
        pagenum = 0
        while self.urlqueue:
            time.sleep(sleeptime)
            url = self.urlqueue.pop()
            if self.robotstxt and not self.robotstxt.can_fetch("*", url):
                continue
            print(url)
            print('{} url(s) left.'.format(len(self.urlqueue)))
            self.urlgot.add(url)
            try:
                self.browser.get(url)
            except:
                continue
            if self.check_page(elexpath):
                pagenum += 1
                if self.concrete_domain in url:
                    if not self.parse_html(parsexpath):
                        print('parse page error::' + url)
                if not self.get_url(urlxpath):
                    print('get url error:' + url)
        print('crawl {} page(s)!'.format(pagenum))

    def start(self, urlxpath='//@href', parsexpath='//*//text()', elexpath='//div', sleeptime=1, headless=False,
              nopic=True, proxy=False, Linuxenv=False, *chromeoption):
        if not self.parse_robotstxt():
            print('parse robots.txt error,abort!')
            return False
        if self.browser_init(headless, nopic, proxy, Linuxenv, *chromeoption):
            print('get url:' + self.start_url)
            self.browser.get(self.start_url)
            while not self.check_page(elexpath):
                pass
            if not self.get_url(urlxpath):
                print('no url to add,exit!')
                return False
            else:
                if self.parse_html(parsexpath):
                    self.crawl(urlxpath, parsexpath, elexpath, sleeptime)
                    self.browser.close()
                    return True
                else:
                    print('no item to get,exit!')
                    return False
        else:
            return False


if __name__ == '__main__':
    crawler = Dynamic_crawl_slm('shenghuo', 'https://allabout.co.jp/', 'allabout.co.jp')
    crawler.start(sleeptime=1, headless=True, Linuxenv=True,proxy=True)



