#encoding=utf8
#设置文件的编码
import scrapy
from scrapy import selector
from lagou2.items import Lagou2Item

class LabListSpider(scrapy.Spider):
    name ="lab_list"
    allowed_domains = ["www.migelab.com"]
    start_urls = [
        "http://www.migelab.com/index.php/index/index/p/"
    ]
    url = start_urls[0]
    # def start_requests(self):
    #     yield scrapy.Request(self.url,callback=self.home_parse,dont_filter=True)
    #
    # def home_parse(self,response):
    #     sel = scrapy.selector.Selector(response)
    #
    #     dd = sel.xpath("//div[@class='menu_main job_hopping']")
    #     allurl = dd.xpath("//a/@href").extract()
    #     for u in allurl:
    #         if 'http' in u:
    #             yield scrapy.Request(u,callback=self.parse,dont_filter=True)

    def parse(self, response):
        sel = selector.Selector(response)
        dd = sel.xpath("//div[@class='fl list_cont']/table/tr")
        for d in dd:
            url = d.xpath('td[2]/table/tbody/tr/td[1]/a/@href').extract()[0]
            url = 'http://www.migelab.com' + url
            yield scrapy.Request(url,callback=self.parse_content)

        #下一页
        page_nodes = response.xpath('//div[@class="page w_1080 green-black"]/a')
        nodes_len = len(page_nodes)
        next_page = page_nodes[nodes_len-1].xpath("@href").extract()[0]
        if next_page is not None:
            print 'next page:'+next_page
            next_page = response.urljoin(next_page)
        yield scrapy.Request(next_page,callback=self.parse)

    #爬取详细内容
    def parse_content(self,response):
        d = response.xpath("//section[@class='96wxDiy']")

        lab_detail = Lagou2Item()
        img_src = response.xpath('//*[@id="largeimage"]/@src').extract()[0]
        new_img_src = "/".join(img_src.split("/")[3:])
        lab_detail['imgurl'] ='http://www.migelab.com/style/kindeditor/attached/image/'+new_img_src
        lab_detail['charge_desc'] = response.xpath("//div[@class='data_box fr']/div/ul/li[4]/h4/span/text()").extract()[0]

        lab_detail['title'] = d.xpath("p[1]/span[2]/span/text()").extract()[0]
        lab_detail['v_num'] = d.xpath("p[2]/span[2]/span/text()").extract()[0]

        lab_detail['manu'] = d.xpath("p[4]/span[2]/span/text()").extract()[0]
        lab_detail['manu_country'] = d.xpath("p[5]/span[2]/span/text()").extract()[0]

        lab_detail['contact_man'] = d.xpath("p[7]/span[2]/span/text()").extract()[0]
        lab_detail['phone'] = d.xpath("p[8]/span/span[2]/text()").extract()[0]

        lab_detail['org'] = d.xpath("p[10]/span/span[2]/text()").extract()[0]
        try:
            lab_detail['org_addr'] = d.xpath("p[11]/span/span[2]/text()").extract()[0]
        except :
            lab_detail['org_addr'] = ''

        yield lab_detail