

import requests
import re
import js2py
from lxml import etree
from spiders.base_spider import BaseSpider
from utils.Uhttp import get_request_header
from fake_useragent import UserAgent
from validator.httpbin_validator import check_proxy


class KuaiSpider(BaseSpider):
    """ 快代理 """
    urls = ['https://www.kuaidaili.com/free/inha/{}/'.format(i) for i in range(1, 10)]
    group_xpath = '//*[@id="list"]/table/tbody/tr'
    detail_xpath = {
        'ip': './td[1]/text()',
        'port': './td[2]/text()',
        'area': './td[5]/text()'
    }


class IP89Spider(BaseSpider):
    """ 89免费代理IP """
    urls = ['https://www.89ip.cn/index_{}.html'.format(i) for i in range(1, 10)]
    group_xpath = "//*[@class='layui-table']/tbody/tr"
    detail_xpath = {
        'ip': './td[1]/text()',
        'port': './td[2]/text()',
        'area': './td[3]/text()'
    }


class YunSpider(BaseSpider):
    """ ip3366云代理 IP """
    urls = ['http://www.ip3366.net/?page={}'.format(i) for i in range(1, 10)]
    group_xpath = "//*[@id='list']//tbody/tr"
    detail_xpath = {
        'ip': './td[1]/text()',
        'port': './td[2]/text()',
        'area': './td[6]/text()'
    }


class KxSpider(BaseSpider):
    """ 开心代理 """
    urls = ['http://www.kxdaili.com/dailiip/1/{}.html'.format(i) for i in range(1, 10)]
    group_xpath = '//*[@class="active"]/tbody/tr'
    detail_xpath = {
        'ip': './td[1]/text()',
        'port': './td[2]/text()',
        'area': './td[6]/text()'
    }


class JXSpider(BaseSpider):
    urls = ['https://ip.jiangxianli.com/?anonymity=2']
    group_xpath = '//*[@class="layui-table"]/tbody/tr'
    detail_xpath = {
        'ip': './td[1]/text()',
        'port': './td[2]/text()',
        'area': './td[5]/text()'
    }


class Ip66Spider(BaseSpider):
    urls = ['http://www.66ip.cn/{}.html'.format(i) for i in range(1, 10)]
    group_xpath = '//*[@id="main"]/div/div[1]/table/tr[position()>1]'
    detail_xpath = {
        'ip': './td[1]/text()',
        'port': './td[2]/text()',
        'area': './td[3]/text()'}

    def get_page_from_url(self, url):
        """发送请求, 获取响应的方法"""
        # 获取session对象, session可以记录服务器设置过来的cookie信息
        session = requests.session()
        session.headers = get_request_header()
        respsone = session.get(url)
        # 如果响应码是521
        if respsone.status_code == 521:
            # 通过正则获取, 需要执行的js
            rs = re.findall('window.onload=setTimeout\("(\w+\(\d+\))", \d+\); (function \w+\(\w+\).+?)</script>', respsone.content.decode())
            # 获取js2py的js执行环境
            context = js2py.EvalJs()
            # 把执行执行js, 修改为返回要执行的js
            func = rs[0][1].replace('eval("qo=eval;qo(po);");', 'return po;')
            # 让js执行环境, 加载要执行的js
            context.execute(func)
            # 把函数的执行结果赋值给一个变量
            context.execute("a={}".format(rs[0][0]))
            # 从变量中取出cookie信息
            cookie = re.findall("document.cookie='(\w+)=(.+?);", context.a)
            # 把从js中提取的cookie信息设置给session
            session.cookies[cookie[0][0]] = cookie[0][1]
            # print(session.cookies)
            respsone = session.get(url)

        return respsone.content.decode('gbk')


if __name__ == '__main__':
    # 1.快代理
    # spider = KuaiSpider()

    # 2.89免费代理IP
    # spider = IP89Spider()

    # 3.ip3366云代理
    # spider = YunSpider()

    # 4.开心代理
    # spider = KxSpider()

    # 5.66ip代理
    # spider = Ip66Spider()
    #


    # 6.JXSpider代理
    spider = JXSpider()
    for proxy in spider.get_proxies():
        print(proxy)



    # 破解JS
    # url = "http://www.66ip.cn/1.html"
    # headers = {
    #     'User-Agent': UserAgent().random,
    #     'Cookie': 'Hm_lvt_1761fabf3c988e7f04bec51acd4073f4;'
    # }
    # print(headers)
    #
    # response = requests.get(url=url, headers=headers)
    # print(response.status_code)
    # text = response.content.decode("GBK")
    #
    # # re.findall(,text)
    #
    # print(text)
