import scrapy
from auctioner.items import AuctionerItem

class AuctionSpider(scrapy.Spider):
    name = 'auction'
    allowed_domains = ['auction.artron.net']
    start_urls = ['https://auction.artron.net/']

    def parse(self, response):#进入网站页
        yield scrapy.Request(self.start_urls[0]+'pm/minglu.php',callback=self.parse_title)

    def parse_title(self,response):#进入标题页
        tb_list = response.xpath('/html/body/div/div[5]/div[2]/table/tr/td/table')#获取table
        for tb in tb_list:#获取单独的table
            tr_list = tb.xpath('.//tr')
            for tr in tr_list:
                if not tr.xpath('.//td[1]//div') is None:
                    td_list = tr.xpath('.//td')
                    for td in td_list:
                        meta={}
                        srname = td.xpath('.//div/a//text()').extract_first()
                        # srname = ''.join(srname_)
                        if not srname is None:
                            fileurl =td.xpath('.//div/a//@href').extract_first()
                            yield scrapy.Request(fileurl,callback=self.parse_detail,meta={'srname':srname,'fileurl':fileurl})
                        else:
                            continue
                else:
                    continue

    def parse_detail(self,response):#进入详情页
        item = AuctionerItem()
        item['fileurl'] = response.meta['fileurl']
        item['srname'] = response.meta['srname']
        st = response.xpath('//*[@id="bigBg"]/div[4]/div/div[4]/div[1]/ul')
        contact_ = st.xpath('string(.)').extract()
        contact=''.join(contact_)
        item['contact'] = contact
        yield item