from selenium import webdriver
from lxml import etree
from urllib import parse
import time
import re
from lxml.html.clean import Cleaner

class Dynamic_crawl_slm:

    def __init__(self,name,start_url,domain):
        self.name=name
        self.start_url=start_url
        self.domain=domain
        self.urlpool=[start_url]
        self.urlqueue=[start_url]
        self.urlgot=[]
        self.browser=None
        self.crawl_url=None
        self.html=None

    def browser_init(self,headless=False,nopic=True,proxy=False,Linuxenv=False,*chromeoption):
        options=webdriver.ChromeOptions()
        if Linuxenv:
            headless=True
        if headless:
            options.add_argument('--headless')
        if proxy:
            options.add_argument('--proxy-server=http://192.168.1.20:1081')
        if nopic:
            options.add_experimental_option('prefs',{'profile.default_content_setting_values':{'images':2}})
        if chromeoption:
            for opt in chromeoption:
                options.add_argument(opt)
        if Linuxenv:
            self.browser=webdriver.Chrome('./chromedriver',options=options)
        else:
            self.browser=webdriver.Chrome(options=options)
        return True

    def check_page(self,elexpath='//a'):
        '''检查页面是否完成加载'''
        self.browser.implicitly_wait(5)
        try:
            if self.browser.find_element_by_xpath(elexpath):
                self.html=self.browser.page_source
                return True
        except:
            return False

    def get_url(self,urlxpath='//@href'):
        if self.html:
            num=0
            for url in etree.HTML(self.html).xpath(urlxpath):
                parseurl=parse.urljoin(self.start_url,url)
                if self.domain not in parseurl:
                    continue
                if re.search(r'\.jpg$|\.png$|\.css$|\.svg$|\.json$|\.ico$|\.js$',parseurl):
                    continue
                if parseurl in self.urlgot or parseurl in self.urlqueue:
                    continue
                else:
                    self.urlqueue.append(parseurl)
                    num+=1
            print('add {} url'.format(num))
            return True
        else:
            return False

    def parse_html(self,parsexpath="//[not(name='script')]/text()",savefile=None,encoding='utf-8'):
        if not savefile:
            savefile=self.name+'.txt'
        if self.html:
            cleaner = Cleaner(style=True, scripts=True, page_structure=False, safe_attrs_only=False)
            chtml = cleaner.clean_html(self.html)
            for text in etree.HTML(chtml).xpath(parsexpath):
                if text.strip():
                    with open(savefile,'a',encoding=encoding) as wf:
                        wf.write(text.strip()+'\n')
            print('parse done')
            return True
        else:
            return False

    def crawl(self,urlxpath,parsexpath,elexpath,sleeptime=0):
        pagenum=0
        while self.urlqueue:
            time.sleep(sleeptime)
            url=self.urlqueue.pop(0)
            print(url)
            self.urlgot.append(url)
            try:
                self.browser.get(url)
            except:
                pass
            if self.check_page(elexpath):
                pagenum+=1
                if not self.parse_html(parsexpath):
                    print('get nothing from'+url)
                if not self.get_url(urlxpath):
                    print('get not any url from'+url)
        print('crawl {} page(s)!'.format(pagenum))

    def start(self,urlxpath='//@href',parsexpath='//text()',elexpath='//a',sleeptime=1,headless=False,nopic=True,proxy=False,Linuxenv=False,*chromeoption):
        if self.browser_init(headless,nopic,proxy,Linuxenv,*chromeoption):
            self.browser.get(self.start_url)
            while not self.check_page(elexpath):
                pass
            if not self.get_url(urlxpath):
                print('no url to add,exit!')
                return False
            else:
                if self.parse_html(parsexpath):
                    self.crawl(urlxpath,parsexpath,elexpath,sleeptime)
                    return True
                else:
                    print('no item to get,exit!')
                    return False
        else:
            return False


if __name__=='__main__':
    crawler=Dynamic_crawl_slm('ynews','https://www.tribunnews.com/news','tribunnews')
    crawler.start(sleeptime=1,headless=True)



