#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os

import logging
import logging.config
import random

import scrapy
import time
from scrapy import signals

from proxyips.base.base import agents
from proxyips.items import ProxyipsItem


class ProxyipsSpider(scrapy.Spider):
    # 保存数据到json scrapy crawl dmoz -o items.json
    # 启动爬虫命令 scrapy crawl getproxyips
    # 用于区别Spider。 该名字必须是唯一的，您不可以为不同的Spider设定相同的名字
    name = "getproxyips"

    logger = ''

    custom_settings = {
        #spider中间键 后面的数值是中间件运行时的先后级别
        'ITEM_PIPELINES': {'proxyips.pipelines.ProxyIpsPipeline': 300},
        # 下载器中间件
        'DOWNLOADER_MIDDLEWARES': {'proxyips.middlewares.processMiddleware': 100, }
    }

    #get请求需要的头
    headers = {
        "Accept": "text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, image/apng, */*;q=0.8",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN, zh;q=0.9",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": 1,
    }




    def __init__(self, category=None, *args, **kwargs):
        '''
        start_urls 会去调用start_requests 如果重写，但要接受参数，必须要init
        :param category: 
        :param args: 
        :param kwargs: 
        '''
        super(ProxyipsSpider, self).__init__(*args, **kwargs)

    def init(self):
        # 当前文件的绝对路径
        path = os.path.abspath(os.path.dirname(__file__))
        # 按目录分割符分割
        dirlist = path.split(os.sep)
        # 去除最后一个 目录列表到 第二级的proxyips
        dirlist.pop()
        # 加入config
        dirlist.append('config')
        # 加入 logging.conf
        dirlist.append('logging.conf')
        # 将列表组合成文件路径
        path = os.sep.join(dirlist)
        # logging 配置logging.conf文件
        logging.config.fileConfig(path)
        # create logger
        logger_name = "root"
        #得到 logger
        self.logger = logging.getLogger(logger_name)

    def start_requests(self):
        print ("**********start_requests***********")
        self.init()
        # # 快代理国内普通地址
        # url = 'http://www.kuaidaili.com/free/intr/1/'
        # # yield 会抛出请求结果 并以第二个参数的函数处理
        # yield scrapy.Request(url, self.parse, headers={
        #     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3192.0 Safari/537.36",
        # })
        # # 快代理国内高匿地址
        # url = 'http://www.kuaidaili.com/free/inha/1/'
        # # yield 会抛出 并以第二个参数的函数处理
        # yield scrapy.Request(url, self.parse, headers={
        #     "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
        # })
        # xici代理
        url = 'http://www.xicidaili.com/nn/1'
        # yield 会抛出 并以第二个参数的函数处理
        yield scrapy.Request(url, self.xici, headers={
            "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
        })

    def xici(self,response):
        print ("***********xici parse******************")
        filename = "xicioneipspage.html"
        with open(filename, 'wb') as f:
            f.write(response.body)
        # 判断是否为null
        if response.body == None or response.body == '':
            self.logger.info("have a url request is null: %s " % response.url)
        else:
            for i in range(1, 6):  # (1,2,3,4):range(1, int(maxpage)) //只要前5页
                print ("i" + str(i))
                listurl = response.url.split('/')
                listurl.pop()
                listurl.append(str(i))
                url = '/'.join(listurl)
                url = url + '/'
                self.headers["User-Agent"] = agents[random.randint(0, len(agents) - 1)]
                self.headers["Host"] = "www.xicidaili.com"
                headers = self.headers
                print ("**********************************************")
                print (headers)
                print (url)
                # dont_filter=True 禁止scrapy默认的url去重机制
                yield scrapy.Request(url=url, callback=self.xiciparsegetips, headers=headers,
                                     dont_filter=True)  # header错误会引发无请求，没有任何提示，

    def xiciparsegetips(self, response):
        print ("***********parsegetips******************")
        print response.url
        # Selector(text=body) # text转化成选则器

        filename = "xicipage" + response.url.split('/')[-2] + ".html"
        with open(filename, 'wb') as f:
            f.write(response.body)

        if response.body == None or response.body == '':
            self.logger.info("have a url request is null: %s " % response.url)
        else:
            listip = response.css('table#ip_list')[0].css('tr.odd')
            print("每页数据的总数是"+str(len(listip)))
            for ip in listip:
                ipitem = ProxyipsItem()
                items = ip.css('td')
                ipitem['position'] = items[3].css('a::text').extract()[0]
                items = ip.css('td::text').extract()
                ipitem['ip'] = items[0]
                ipitem['port'] = items[1]
                ipitem['anonymous'] = items[4]
                ipitem['type'] = items[5]
                ipitem['Responsespeed'] = items[10]
                ipitem['Finalverificationtime'] = items[11]
                print (ipitem)
                yield ipitem

                
    # 处理上面的地址
    def parse(self, response):
        print ("***********parse******************")
        # Selector(text=body) # text转化成选则器
        # 将response的结果保存到文件
        filename = "oneipspage.html"
        with open(filename, 'wb') as f:
            f.write(response.body)
        # 判断是否为null
        if response.body == None or response.body == '':
            self.logger.info("have a url request is null: %s " % response.url)
        else:
            pagenumbers = response.css('div#listnav')[0].css('li a::text').extract()
            maxpage = pagenumbers[len(pagenumbers) - 1]
            # print maxpage
            # print response.url

            for i in range(1, 6):  #(1,2,3,4):range(1, int(maxpage)) //只要前5页
                print ("i" + str(i))
                listurl = response.url.split('/')
                listurl.pop()
                listurl.pop()
                listurl.append(str(i))
                url = '/'.join(listurl)
                url = url + '/'
                self.headers["User-Agent"] = agents[random.randint(0, len(agents) - 1)]
                self.headers["Host"] = "www.kuaidaili.com"
                headers = self.headers
                print ("**********************************************")
                print (headers)
                print (url)
                # dont_filter=True 禁止scrapy默认的url去重机制
                yield scrapy.Request(url=url, callback=self.parsegetips, headers=headers,dont_filter=True)  # header错误会引发无请求，没有任何提示，

    def parsegetips(self, response):
        print ("***********parsegetips******************")
        # Selector(text=body) # text转化成选则器

        filename = "page"+response.url.split('/')[-2]+".html"
        with open(filename, 'wb') as f:
            f.write(response.body)

        if response.body == None or response.body == '':
            self.logger.info("have a url request is null: %s " % response.url)
        else:
            listip = response.css('div#list')[0].css('tbody')[0].css('tr')
            for ip in listip:
                items = ip.css('td::text').extract()
                print (items)
                ipitem = ProxyipsItem()
                ipitem['ip'] = items[0]
                ipitem['port'] = items[1]
                ipitem['anonymous'] = items[2]
                ipitem['type'] = items[3]
                ipitem['position'] = items[4]
                ipitem['Responsespeed'] = items[5]
                ipitem['Finalverificationtime'] = items[6]
                print (ipitem)
                yield ipitem