#!/usr/bin/python
#coding=utf-8
'''
Created on 2016年10月25日
@author: Trunks(GaoMing)
'''

from trunks.items import AutoItem
from scrapy.spiders import CrawlSpider
import scrapy
from trunks.utils import HeaderUtil
#class CarSpider(scrapy.spiders.Spider):
'''
DEBUG: Crawled (200) <GET http://mall.autohome.com.cn/> (referer: None)
INFO: Closing spider (finished)
直接被反扒了，设置下referer,
如何动态的设置防盗链referer？
'''
class MyAutoSpider(CrawlSpider):
    # 自定义配置 加载个人管道，输出到Json有问题，输出到mysql没问题，但是用main启动的爬虫有问题，用命令启动的没问题
    custom_settings = {
        'ITEM_PIPELINES': {
            # 'trunks.pipelines.JsonWriterAtuoPipeline': 334,
            'trunks.pipelines.MysqldbautoPipeline': 501,
        }
    }
    name = "myauto1"
    allowed_domains = ["autohome.com.cn"] 
   
    
    # 重写start_requests，设置动态headers,也可以设置动态ip
    def start_requests(self):
      requests = [] 
      referer ="http://mall.autohome.com.cn/"
      start_urls = [ 
          'http://mall.autohome.com.cn/'
          ] 
      for item in start_urls: 
        myHeaders = {'User-Agent': HeaderUtil.getRandomHeaders(),'Referer': referer}  
        #request = scrapy.Request(url,self.parse,headers=myHeaders)
        #requests.append(scrapy.Request(url=item, headers=myHeaders)) 
        #print(requests.get(url).status_code)
        yield scrapy.Request(item, self.parse,headers=myHeaders)
        #return requests 

  

    #撰写爬虫逻辑
    def parse(self, response):
      # 找class=nav-list-pop-main-half 的标签 
      for sel in response.xpath('//div[@class="nav-list-pop-main-half"]'):
          try:
            item = AutoItem()
            # 在上面那个div里面找 dl标签下的dd标签下的a标签的text()文本
            gms_t1 = sel.xpath('dl/dd/a/text()').extract()
            gms_t2 = sel.xpath('dl/dd/a/@href').extract()
            # item['t1'] 对应items.py里面的t1，要存储的内容
            item['t1'] = gms_t1
            item['t2'] = gms_t2
            print('gms_t1: '+str(gms_t1)) # str()输出list数据
            #print('gms_t2: '+str(gms_t2))
            yield item
          except Exception as e:
              print(e)
