#!/usr/bin/python
#coding=utf-8
'''
Created on 2016年10月25日
@author: Trunks(GaoMing)
'''

from trunks.items import AutoItem
from scrapy.spiders import CrawlSpider


#class CarSpider(scrapy.spiders.Spider):
'''
DEBUG: Crawled (200) <GET http://mall.autohome.com.cn/> (referer: None)
INFO: Closing spider (finished)
直接被反扒了，设置下referer,
如何动态的设置防盗链referer？
'''
class AutoSpider(CrawlSpider):
    # 自定义配置 加载个人管道
    custom_settings = {
        'ITEM_PIPELINES': {
            'trunks.pipelines.JsonWriterAtuoPipeline': 334,
            'trunks.pipelines.MysqldbautoPipeline': 501,
        }
    }
    name = "auto"
    allowed_domains = ["autohome.com.cn"]
    start_urls = [
        "http://mall.autohome.com.cn/",
         ]

    def parse(self, response):
        # 找class=nav-list-pop-main-half 的标签
        for sel in response.xpath('//div[@class="nav-list-pop-main-half"]'):
            try:
              item = AutoItem()
              # 在上面那个div里面找 dl标签下的dd标签下的a标签的text()文本
              gms_t1 = sel.xpath('dl/dd/a/text()').extract()
              gms_t2 = sel.xpath('dl/dd/a/@href').extract()
              # item['t1'] 对应items.py里面的t1，要存储的内容
              item['t1'] = gms_t1
              item['t2'] = gms_t2
              print('gms_t1: '+gms_t1)
              print('gms_t2: '+gms_t2)
              yield item
            except Exception as e:
                print(e)
