import scrapy

class QuotesSpider(scrapy.Spider):
    name = "quotes"
    start_urls=[
        'http://www.migelab.com/index.php/index/index/p/'
    ]

    def parse(self, response):
        for d in response.xpath("//div[@class='fl list_cont']/table/tr"):
            info = {
                'imgurl': d.xpath('td[1]/div/a/img/@src').extract()[0],
                'title': d.xpath('td[2]/table/tbody/tr/td[1]/a/text()').extract()[0],
                'instru_num': d.xpath('td[2]/table/tbody/tr/td[2]/span/text()').extract()[0],
                'contact_man': d.xpath('td[2]/table/tbody/tr/td[3]/span/text()').extract()[0],
                'phone': d.xpath('td[2]/table/tbody/tr/td[4]/span/text()').extract()[0],
                'org': d.xpath('td[2]/table/tbody/tr/td[5]//span/text()').extract()[0],
            }
            yield info
            url = d.xpath('td[2]/table/tbody/tr/td[1]/a/@href').extract()[0]
            url = 'http://www.migelab.com'+url
            yield scrapy.Request(url,callback=self.parse_content)
        page_nodes = response.xpath('//div[@class="page w_1080 green-black"]/a')
        nodes_len = len(page_nodes)
        print nodes_len
        #next_page = page_nodes[nodes_len-1].xpath("@href").extract()[0]
        #if next_page is not None:
            #next_page = response.urljoin(next_page)
            #yield scrapy.Request(next_page,callback=self.parse)

    def parse_content(self,response):
        print len(response.xpath("//section[@class='96wxDiy']/p"))
        manu_info ={
            'manufactor':response.xpath("//section[@class='96wxDiy']/p[4]/span[2]/span/text()").extract()[0],
            'manufactor_country':response.xpath("//section[@class='96wxDiy']/p[5]/span[2]/span/text()").extract()[0],
            'org_add':response.xpath("//section[@class='96wxDiy']/p[11]/span/span[2]/text()").extract()[0],
        }
        print manu_info
        yield manu_info
