# -*- coding:utf-8 -*-
import requests
import User_Agent_list
import random
import urllib2
from lxml import etree


class ProxyXpath(object):
    '''爬取西刺代理网站的代理，并判断其中的可用项'''

    def __init__(self):
        self.proxy_list = []
        self.ip_port = []
        self.response = None
        self.url = 'http://www.xicidaili.com/nn'
        self.headers = {"User-Agent": random.choice(User_Agent_list.USER_AGENT_LIST)}
        self.proxy = {"http": "222.74.225.231:3128"}

    def spider_proxy(self):
        '''爬取西刺代理网站'''
        self.response = requests.get(self.url, headers=self.headers)
        # print self.response.content

    def xpath_proxy(self):
        '''xpath匹配出代理的ip和port'''
        # 字符串解析为HTML文档
        html = etree.HTML(self.response.content)
        # with open('lxml_html.html', 'w') as f:
        #     f.write(etree.tostring(html))
        ip_port_list = html.xpath("""//table[@id="ip_list"]/tr/td[2]/text()|//table[@id="ip_list"]/tr/td[3]/text()""")
        ip_list = []
        port_list = []
        # print ip_port_list
        for i in range(len(ip_port_list)):
            if i % 2 == 0:
                ip_list.append(ip_port_list[i])
            else:
                port_list.append(ip_port_list[i])
        self.ip_port = zip(ip_list, port_list)

    def choice_proxy(self):
        '''测试代理列表中的代理是否可用'''
        for i in self.ip_port:
            http_ip = i[0] + ':' + i[1]
            proxy = {"http": http_ip}
            proxy_handler = urllib2.ProxyHandler(proxy)
            opener = urllib2.build_opener(proxy_handler)
            try:
                response = opener.open("http://ip.chinaz.com/getip.aspx", timeout=2).read()
                print response
                self.proxy_list.append(i[0] + ":" + i[1])
                if len(self.proxy_list) >= 2:
                    break
            except Exception as e:
                # print e
                pass



def get_proxy_list():
    myspider = ProxyXpath()
    myspider.spider_proxy()
    myspider.xpath_proxy()
    myspider.choice_proxy()
    print '代理准备完毕,代理池：'
    print myspider.proxy_list
    return myspider.proxy_list

    # http_ceshi = random.choice(myspider.proxy_list)
    # proxy = {"http": http_ceshi}
    # proxy_handler = urllib2.ProxyHandler(proxy)
    # opener = urllib2.build_opener(proxy_handler)
    # response = opener.open("http://ip.chinaz.com/getip.aspx", timeout=10).read()
    # print response

if __name__ == '__main__':
    get_proxy_list()
