# push http://data.zz.baidu.com/urls?site=www.heanny.cn&token=ZWog5v61cmbBn9uG
# update http://data.zz.baidu.com/update?site=www.heanny.cn&token=ZWog5v61cmbBn9uG
'''
POST /urls?site=www.heanny.cn&token=ZWog5v61cmbBn9uG HTTP/1.1
User-Agent: curl/7.12.1
Host: data.zz.baidu.com
Content-Type: text/plain
Content-Length: 83
http://www.example.com/1.html
http://www.example.com/2.html

#文档：https://scylla.wildcat.io/zh/latest/
'''

# http://chromedriver.storage.googleapis.com/index.html
import time,random,json

import eventlet
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.blocking import BlockingScheduler

from xml.dom.minidom import parse
from bs4 import BeautifulSoup
import xml.dom.minidom
import requests
eventlet.monkey_patch()
token = 'ZWog5v61cmbBn9uG'
pushurl = 'http://data.zz.baidu.com/urls?site=www.heanny.cn&token={}'.format(token)
updateUrl = 'http://data.zz.baidu.com/update?site=www.heanny.cn&token={}'.format(token)
headers = {
    'Connection': 'keep-alive',
    'Cache-Control': 'max-age=0',
    'Accept': 'text/html, */*; q=0.01',
    'X-Requested-With': 'XMLHttpRequest',
    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',
    'DNT': '1',
    'Accept-Encoding': 'gzip, deflate, sdch',
    'Accept-Language': 'zh-CN,zh;q=0.8,ja;q=0.6',
}


def getXmlData(path='sitemap.xml'):
    # 使用minidom解析器打开 XML 文档
    DOMTree = xml.dom.minidom.parse(path)
    collection = DOMTree.documentElement
    if collection.hasAttribute("shelf"):
        print("Root element : %s" % collection.getAttribute("shelf"))
    # 在集合中获取所有
    Urls = collection.getElementsByTagName("url")
    UrlList = []
    # 打印每条的详细信息
    for url in Urls:
        loc = url.getElementsByTagName('loc')[0]
        UrlList.append(loc.childNodes[0].data)
    return UrlList


def pushUrl2Baidu(Urls=[]):
    # res = requests.post(pushurl, data=json.dumps(UrlList))
    res = requests.post(pushurl, data='\n'.join(Urls), headers=headers)
    print(res.status_code)
    print(res.text)
    html = BeautifulSoup(res.text, 'html.parser')
    print(html.find('title'))
    return


def getAllUrlsHtml(Urls=[], Proxies=[]):
    for url in Urls:
        print('>>>>>>>>>>>>>>>>')
        proxy = random.choice(Proxies)
        try:
            res = requests.get(url, headers=headers,proxies={'https' if proxy['is_https'] else 'http': '{}:{}'.format(proxy['ip'], proxy['port'])} ,timeout=5) if 'ip' in proxy.keys() else requests.get(url, headers=headers)
            # print(res.text)
        except Exception as e:
            print(e)
            res = requests.get(url, headers=headers)
        finally:
            html = BeautifulSoup(res.text, 'html.parser')
            title = html.find('title')
            print(res.status_code,title.text if title else '', url)
            if res.status_code == 405:
                res.encoding = 'gbk'
                print(res.text)
    return


def BackgroundGetUrl(Urls=[], Proxies=[]):
    scheduler = BackgroundScheduler()
    scheduler.add_job(getAllUrlsHtml, 'cron', args=(Urls, Proxies,), hour='5,10,15', minute='1-3')
    try:
        scheduler.start()
    # except (KeyboardInterrupt, SystemExit):
    # 	scheduler.shutdown()
    except Exception as e:
        print(e)


def BlockingPushUrls(Urls=[]):
    scheduler = BackgroundScheduler()
    '''后台调度 流量截图'''
    scheduler.add_job(pushUrl2Baidu, 'cron', args=(Urls,), hour='10', minute='0-3')
    try:
        scheduler.start()
    # except (KeyboardInterrupt, SystemExit):
    # 	scheduler.shutdown()
    except Exception as e:
        print(e)


def getProxies(url='http://192.168.10.123:8899/api/v1/proxies?page=1&limit=200'):
    res = requests.get(url)
    if res.status_code == 200:
        Pros = json.loads(res.text)
        # Pros = res.json()
        if Pros['count'] > 0:
            return Pros['proxies']
    return []


def main():
    Urls = getXmlData()
    Proxies = getProxies()
    BackgroundGetUrl(Urls, Proxies)
    BlockingPushUrls(Urls)



def getHtmlByChrome(url,Proxies=[]):
    try:
        proxy = random.choice(Proxies)
        print(url,"{}://{}:{}".format('https' if proxy[2]==1 else 'http',proxy[0], proxy[1]))
        # print(url,"{}://{}:{}".format('https' if proxy['is_https'] else 'http',proxy['ip'], proxy['port']))
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument("--proxy-server={}://{}:{}".format('https' if proxy[2] else 'http',proxy[0], proxy[1]))
        # chrome_options.add_argument("--proxy-server={}://{}:{}".format('https' if proxy['is_https'] else 'http',proxy['ip'], proxy['port']))
        browser = webdriver.Chrome(chrome_options=chrome_options)
        browser.set_page_load_timeout(5)
        browser.get(url)
    except Exception as e:
        browser.quit()
        getHtmlByChrome(url,Proxies)
    else:
        print('success')
        # print('截图')
        # browser.save_screenshot('png/{}-{}.png'.format(url.replace('http://','').replace('\\','').replace('/','').replace('?',''),int(time.time())))
    finally:
        browser.quit()

if __name__ == '__main__':
    # main()
    # from getXicidaili import main
    Urls = getXmlData()
    # Proxies = getProxies()
    # Proxies = main()
    from SqlFun import savesql
    Proxies = savesql("select ip,port,isHttps from proxies where isValid =1")
    print(Proxies)
    # getAllUrlsHtml(Urls, Proxies)
    for url in Urls:
        getHtmlByChrome(url,Proxies)

    # from SqlFun import saveScylla

    # saveScylla(Proxies)
    # pushUrl2Baidu(Urls)

    # test()
