#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os

import logging
import logging.config
import random
from scrapy.http import Request
import scrapy
import time

from proxyips.base.base import agents


class Effectivespider(scrapy.Spider):
    name = "effectivespider"
    logger = ''

    # custom_settings = {
    #     'ITEM_PIPELINES': {'proxyips.pipelines.effectivePipeline': 300},
    #     'DOWNLOADER_MIDDLEWARES': {'proxyips.middlewares.effectivesMiddleware': 100,},
    #     # 'COOKIES_ENABLES': True,
    # }


    #get请求需要的头
    headers = {
        "Accept": "text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, image/apng, */*;q=0.8",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN, zh;q=0.8",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": 1,

    }

    def __init__(self, category=None, *args, **kwargs):
        '''
        start_urls 会去调用start_requests 如果重写，但要接受参数，必须要init
        :param category: 
        :param args: 
        :param kwargs: 
        '''
        super(Effectivespider, self).__init__(*args, **kwargs)
        self.init()

    def init(self):
        path = os.path.abspath(os.path.dirname(__file__))
        dirlist = path.split(os.sep)
        dirlist.pop()
        dirlist.append('config')
        dirlist.append('logging.conf')
        path = os.sep.join(dirlist)
        logging.config.fileConfig(path)
        # create logger
        logger_name = "root"
        self.logger = logging.getLogger(logger_name)

    def start_requests(self):
        # 快代理国内普通地址
        url = 'http://www.kuaidaili.com/free/intr/1/'
        # url = 'https://www.baidu.com/'
        # yield 会抛出请求结果 并以第二个参数的函数处理
        yield scrapy.Request(url, self.parse, headers={
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3192.0 Safari/537.36",
        })
        print (
        "***********" + url + "*********************************************************************************************")
        # 快代理国内高匿地址
        # time.sleep(10)
        url = 'http://www.kuaidaili.com/free/inha/1/'
        # yield 会抛出 并以第二个参数的函数处理
        yield scrapy.Request(url, self.parse, headers={
            "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
        })
        print (
        "***********" + url + "*********************************************************************************************")
        # urls = ['http://www.kuaidaili.com/free/intr/1/','http://www.kuaidaili.com/free/inha/1/']
        # for url in urls:
        #     print ("***********"+url+"*********************************************************************************************")
        #     yield scrapy.Request(url, self.parse, headers={
        #         "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
        #     })


    def parse(self, response):
        print ("***********parse*********************************************************************************************")
        print (response.url)


