import requests
from lxml import etree
from utils.http import get_request_header
from domain import Proxy


class BaseSpider(object):
    urls = []  # 代理ip网址的URL列表
    group_xpath = ''  # 分组xpath，获取包含代理IP信息标签列表的xpath
    detail_xpath = {}  # 组类xpath，获取代理ip详情的信息XPATH

    def __init__(self, urls=[], group_xpath=None, detail_xpath={}):
        if urls:
            self.urls = urls
        if group_xpath:
            self.group_xpath = group_xpath
        if detail_xpath:
            self.detail_xpath = detail_xpath

    def get_proxy(self):
        """获取代理IP信息"""
        for url in self.urls:
            # 发送请求，获取页面数据
            page = self.get_page_from_url(url)
            # 解析页面，提取数据
            proxies = self.get_proxy_from_page(page)
            # 返回数据
            yield from proxies

    def get_page_from_url(self, url):

        response = requests.get(url, headers=get_request_header())
        return response.content

    def get_proxy_from_page(self, page):
        """解析页面数据"""
        element = etree.HTML(page)
        trs = element.xpath(self.group_xpath)

        for tr in trs:
            ip = self.get_first(tr.xpath(self.detail_xpath['ip']))
            port = self.get_first(tr.xpath(self.detail_xpath['port']))
            area = self.get_first(tr.xpath(self.detail_xpath['area']))
            proxy = Proxy(ip, port, area=area)
            # 返回代理IP
            yield proxy

    def get_first(self, lis):
        return lis[0].strip() if len(lis) != 0 else ''

if __name__ == "__main__":
    config = {
        'urls': ['https://www.xicidaili.com/nn/{}'.format(i) for i in range(1, 3)],
        'group_xpath': '//*[@id="ip_list"]/tr[position()>1]',
        'detail_xpath': {
            'ip': './td[2]/text()',
            'port': './td[3]/text()',
            'area': './td[4]/a/text()'
        },
    }
    # 创建通用代理对象
    base_spider = BaseSpider(**config)
    for proxy in base_spider.get_proxy():
        print(proxy)