# -*- coding: utf-8 -*-
import random

import scrapy
from NewsSpider.items import TestItem

class TestSpider(scrapy.Spider):

    # 用于测试 代理和请求头 重试功能是否成功
    name = 'test'
    allowed_domains = ['exercise.kingname.info']
    # start_urls = ['http://exercise.kingname.info/exercise_middleware_ua']
    # start_urls = ['http://exercise.kingname.info/exercise_middleware_ip']
    start_urls = ['http://httpbin.org/ip']

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.FakeUserAgentMiddleware': 543,
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        # 'ITEM_PIPELINES': {
            # 'NewsSpider.pipelines.JsonExporterPipleline': 543,
            # 'NewsSpider.pipelines.JsonWithEncodingPipeline': 543,
        # }
    }

    def start_requests(self):
        for i in range(1,5):
            url = self.start_urls[0]
            # print(url)
            yield scrapy.Request(url=url,callback=self.parse,meta={'id':i},dont_filter=True)
            # yield scrapy.Request(url=url,callback=self.parse,meta={'id':i})

    def parse(self, response):
        item = TestItem()
        item['id'] = response.meta['id']
        item['text'] = response.text
        # item['text'] = eval(response.text)['origin']
        # print('response_meta_id',response.meta['id'])
        # print('response:',eval(response.text)['origin'])
        yield item



