import scrapy
from blanks.items import BlanksItem

class Blankspider(scrapy.Spider):
    name = 'mySpider'
    allowed_domains = ['boc.cn']

    def start_requests(self):
        url= "https://www.boc.cn/sourcedb/whpj/"
        yield scrapy.Request(url,self.parse)

    def parse(self, response):

        blank_rows = response.xpath('//tr[td[1] and td[7][contains(@class, "pjrq")]]')
        count=0
        for row in blank_rows:
            item = BlanksItem()
            item['current'] = row.xpath('./td[1]/text()').get(default='').strip()
            item['spot_buy'] = row.xpath('./td[2]/text()').get(default='').strip()
            item['cash_buy'] = row.xpath('./td[3]/text()').get(default='').strip()
            item['spot_sell'] = row.xpath('./td[4]/text()').get(default='').strip()
            item['cash_sell'] = row.xpath('./td[5]/text()').get(default='').strip()
            item['bank_rate'] = row.xpath('./td[6]/text()').get(default='').strip()
            full_date = row.xpath('./td[7]/text()').get(default='').strip()
            publish_date = full_date.split(' ')[0]
            item['publish_date'] = publish_date
            item['publish_time'] = row.xpath('./td[8]/text()').get(default='').strip()
            count +=1
            yield item
