# -*- coding: utf-8 -*-
import scrapy

from SCIF.items import ScifItem

class LetputtpostionSpider(scrapy.Spider):
    name = 'LetputtPostion'
    allowed_domains = ['www.letpub.com.cn']

    url = "http://www.letpub.com.cn/index.php?page=journalapp&fieldtag=&firstletter=&currentpage="
    offset = 1

    start_urls = [url + str(offset)]

    def parse(self, response):

        # for each in response.xpath("//table[@class='table_yjfx']/tbody/tr"):
        # for each in response.xpath("//tr[@class='even'] | //tr[@class='odd']"):
        for each in response.xpath("//table[2]/tbody/tr"):

            if each.xpath("./td[last()-1]").extract():

                # 初始化模型对象
                item = ScifItem()
                # ISSN
                item['issn'] = each.xpath("./td[1]/text()").extract()
                # 期刊名
                item['periodical'] = each.xpath("./td[2]/a/text()").extract()
                # 链接
                item['src'] = each.xpath("./td[2]/a/@href").extract()
                # 因子
                item['factor'] = each.xpath("./td[3]/text()").extract()
                # 分区
                item['partition'] =  each.xpath("./td[4]/text()").extract()
                # 大类学科
                item['bigsubject'] = each.xpath("./td[5]/text()").extract()
                # 小类学科
                item['smallsubject'] = each.xpath("./td[6]/text()").extract()
                # SCI
                item['sci'] = each.xpath("./td[7]/text()").extract()
                # 是否OA
                item['oa'] = each.xpath("./td[8]/text()").extract()
                # 录用比例
                item['proportion'] = each.xpath("./td[9]/text()").extract()
                # 审稿时长
                item['reviewtime'] = each.xpath("./td[10]/text()").extract()
                # 近期文章
                item['articles'] = each.xpath("./td[11]/a/text()").extract()
                # 查看数
                item['view'] = each.xpath("./td[12]/text()").extract()

                yield item

            if self.offset < 10:
                self.offset += 1

            # 每次处理完一页的数据之后，重新发送下一页页面请求
            # self.offset自增10，同时拼接为新的url，并调用回调函数self.parse处理Response
            yield scrapy.Request(self.url + str(self.offset), callback = self.parse)
