#-*- coding: UTF-8 -*-
__author__ = 'Administrator'
import random,datetime,time,re,sys,os,logging,requests
from bs4 import BeautifulSoup
from lxml import etree
class proxyScraper(object):
    header = {'Accept-Language': 'zh-CN,zh;q=0.8','Accept': '*/*', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36', 'Connection': 'keep-alive'}
    proxies = []
    proxy = []
    valid = []
    validateUrl = 'http://ip.chinaz.com/'
    def __init__(self):
        logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(levelname)s %(message)s')

    def kxdaili(self):
        '''
        works fine
        '''
        url = 'http://www.kxdaili.com/dailiip/1/{}.html'
        for i in range(1,11):
            u = url.format(i)
            try:
                response = self.__req(u)
            except:
                continue
            s = BeautifulSoup(response,'lxml')
            trs = s.find_all('tr')
            for tr in trs:
                ip = tr.select('td:nth-of-type(1)')[0].get_text()
                port = tr.select('td:nth-of-type(2)')[0].get_text()
                self.proxies.append(ip+':'+port)
            time.sleep(random.randint(1,8))

    def xicidaili(self):
        '''
        works fine
        '''
        url = 'http://www.xicidaili.com/nn/{}'
        for i in range(1,11):
            u = url.format(i)
            try:
                response = self.__req(u)
            except:
                continue
            s = BeautifulSoup(response,'lxml')
            trs = s.find_all('tr')[1:]
            for tr in trs:
                ip = tr.select('td:nth-of-type(2)')[0].get_text()
                port = tr.select('td:nth-of-type(3)')[0].get_text()
                self.proxies.append(ip+':'+port)
            time.sleep(random.randint(1,8))
        pass

    def ip181(self):
        '''
        works fine
        '''
        url = 'http://www.ip181.com/'
        try:
            response = self.__req(url)
        except:
            return
        s = BeautifulSoup(response,'lxml')
        trs = s.find_all('tr')[1:]
        for tr in trs:
            ip = tr.select('td:nth-of-type(1)')[0].get_text()
            port = tr.select('td:nth-of-type(2)')[0].get_text()
            self.proxies.append(ip+':'+port)
        pass

    def httpdaili(self):
        '''
        works fine
        '''
        url = 'http://www.httpdaili.com/mfdl/'
        try:
            response = self.__req(url)
        except:
            return
        s = BeautifulSoup(response,'lxml')
        trs = s.select('.kb-wall-truth-item-main tr')[1:]
        for tr in trs:
            ip = tr.select('td:nth-of-type(1)')
            port = tr.select('td:nth-of-type(2)')
            if(ip and port):
                ip = ip[0].get_text()
                port = port[0].get_text()
                self.proxies.append(ip+':'+port)
        pass

    def ip66(self):
        '''
        works fine
        '''
        url = 'http://www.66ip.cn/{}.html'
        for i in range(1,11):
            u = url.format(i)
            try:
                response = self.__req(u)
            except:
                continue
            s = BeautifulSoup(response,'lxml')
            trs = s.find_all('tr')[3:]
            for tr in trs:
                ip = tr.select('td:nth-of-type(1)')[0].get_text()
                port = tr.select('td:nth-of-type(2)')[0].get_text()
                self.proxies.append(ip+':'+port)
            time.sleep(random.randint(1,8))
        pass

    def __req(self,url):
        r = requests.get(url,headers=self.header,timeout=10)
        response = r.text
        return response

    def scrape(self):
        self.kxdaili()
        self.xicidaili()
        self.ip181()
        self.ip66()
        self.httpdaili()
        for line in self.proxies:
            try:
                self.proxy.append(str(line)+os.linesep)
            except:
                continue
        logging.info('scraping complete')
        logging.info('%d http proxies scraped' % len(self.proxy))
        logging.info('starting to validate proxies')
        self.validate()

    def validate(self):
        for v in self.proxy:
            if v is None:
                continue
            v = v.strip()
            p = {'http':(r'http://'+v)}
            try:
                result = requests.get(self.validateUrl,timeout=3,proxies=p)
            except:
                logging.info('{} is not accessable'.format(v))
                continue
            else:
                logging.info('{} is accessable'.format(v))
                pass
            if result.status_code!=200:
                continue
            tree = etree.HTML(result.content)
            try:
                remoteIp = tree.xpath(r'//*[@id="leftinfo"]/div[3]/div[2]/p[2]/text()[1]')[0].strip()
            except:
                logging.error('cannot find the ip node')
                continue
            remoteIp==v.split(':')[0] and self.valid.append(remoteIp)
            logging.info('%s---%s' % (v,remoteIp))
        logging.info('validate complete')
        logging.info('%d valid proxies' % len(self.valid))
        logging.info('starting to write to file')
        with open(time.strftime('%Y%m%d%H%M%S'),'w') as f:
            f.writelines(map(self.process,self.valid))
        logging.info('save complete')
        logging.info('scraping complete')

    def process(self,item):
        return item+os.linesep



if __name__=='__main__':
    scraper = proxyScraper()
    scraper.scrape()


