# -*- coding: utf-8 -*-
# @Time    : 2019/2/25 17:29
# @Author  : Macher
# @File    : multirbl.py
# @Software: PyCharm Community Edition

from bs4 import BeautifulSoup
from selenium import webdriver
import time
import pymysql
# from send_email import sendEmail
import sys
# soup = BeautifulSoup(open('test.txt'),'lxml')
# table = soup.find(id = 'dnsbl_data')
# trs = table.find_all(class_= 'clrBlacklisted')
# for tr in trs:
#     print(tr)
#     # print(1)
#     print(tr.find(class_='l_id').string)
#     print(tr.find(class_='l_qhost').string)
#     print(tr.find(class_='dns_zone').string)
#     print(tr.find(class_='result').string)
#     print(tr.find('a').string)
update = 'update experiment_date set multirbl = %s where ip = %s'
def sql():
    l = []
    db = pymysql.connect("219.216.65.41", "root", "123456", "mydata")
    cursor = db.cursor()
    cursor.execute("SELECT ip FROM experiment_date WHERE multirbl is null ")
    results = cursor.fetchall()
    for i in results:
        l.append(i)
    # print(results)
    cursor.close()
    db.commit()
    db.close()
    return l

def updatesql(tup):
    db = pymysql.connect("219.216.65.41", "root", "123456", "mydata")
    cursor = db.cursor()
    cursor.execute(update,tup)
    cursor.close()
    db.commit()
    db.close()

def getPage(iplist):

    browser = webdriver.Chrome()
    # wait =WebDriverWait(browser, 10)
    # chrome_options = webdriver.ChromeOptions()
    # chrome_options.add_argument('--headless')
    # browser = webdriver.Chrome(chrome_options=chrome_options)
    # SERVICE_ARGS = ['--load-images=false', '--disk-cache=true']
    while iplist:
        ip = iplist.pop()
        print(len(iplist))
        print(ip[0])
    # url = 'http://multirbl.valli.org/lookup/122.228.19.80.html'
        url = 'http://multirbl.valli.org/lookup/'+str(ip[0])+'.html'
        try:
            browser.get(url)
            time.sleep(60)
            # browser.implicitly_wait(20)
            # print(browser.page_source)
            judgePage(browser.page_source,ip[0])
        except:continue

def judgePage(pagesource,ip):
    s = ''
    soup = BeautifulSoup(pagesource, 'lxml')
    try:
        table = soup.find(id = 'dnsbl_data')
        trs = table.find_all(class_= 'clrBlacklisted')
        for tr in trs:
            # print(tr)
            # print(tr.find(class_='l_id').string)
            # print(tr.find(class_='l_qhost').string)#ip
            # print(tr.find(class_='dns_zone').string)#source
            # print(tr.find(class_='result').string)
            # print(tr.find('a').string)
            s += tr.find(class_='dns_zone').string +'&&'
            # ip = tr.find(class_='l_qhost').string
        # tup = tuple({str(s)})+tuple({str(ip)})
        # print(tup)
        # updatesql(tup)
    except:
        print('NULL')
    tup = tuple({str(s)}) + tuple({str(ip)})
    print(tup)
    updatesql(tup)

getPage(sql())

