import requests
from 代理.untils.user_agent import get_request_headers
from lxml import etree
from 代理.domain import Proxy

class BASER_CAPTURE():
    urls=[]
    group_xpath=''
    detail_xpath={}

    def __init__(self,urls=[],group_xpath='',detail_xpath={}):
        if urls:
            self.urls=urls
        if group_xpath:
            self.group_xpath=group_xpath
        if detail_xpath:
            self.detail_xpath=detail_xpath

    def get_page_from_url(self,url):
        response=requests.get(url,headers=get_request_headers())
        return response.text

    def get_proxies_from_page(self,page):
        element = etree.HTML(page)
        trs=element.xpath(self.group_xpath)
        for tr in trs:
            ip=tr.xpath(self.detail_xpath['ip'])[0]
            port=tr.xpath(self.detail_xpath['port'])[0]
            area = tr.xpath(self.detail_xpath['area'])[0]
            proxy=Proxy(ip,port,area=area)
            #使用yield返回提取的数据
            yield proxy

    def get_proxies(self):
        for url in self.urls:
            page=self.get_page_from_url(url)
            proxies=self.get_proxies_from_page(page)
            yield from proxies

if __name__ == '__main__':
    config={
        'urls':['http://www.ip3366.net/?stype=1&page={}'.format(i) for i in range(1,11)],
        'group_xpath':'//*[@id="list"]/table/tbody/tr',
        'detail_xpath':{
        'ip':'./td[1]/text()',
        'port':'./td[2]/text()',
        'area':'./td[6]/text()'
        }
    }
    spider=BASER_CAPTURE(**config)
    for proxy in spider.get_proxies():
        print(proxy)