import scrapy
from pullref.items import PullrefItem


class GetdoiSpider(scrapy.Spider):
    # Define the website information
    name = 'getdoi'
    allowed_domains = ['link.springer.com']
    start_urls = [
        "https://link.springer.com/article/10.1007/s12264-017-0100-y"]

    # Parse every reference from the website
    def parse(self, response):
        items = []
        for each in response.xpath("//span[@class='c-article-references__counter']"):
            item = PullrefItem()

            item["number"] = each.xpath("text()").extract_first()[:-1]
            print(item['number'])

            reference_context = response.xpath(
                f"//*[@id='ref-CR{item['number']}']/text()").extract_first().split(". ")
            item["author"] = reference_context[0]
            print(item['author'])

            if len(reference_context) < 2:
                item["title"] = None
            else:
                item["title"] = reference_context[1]
            print(item['title'])

            item["year"] = response.xpath(
                f"//*[@id='Bib1-content']/div/ol/li[{item['number']}]/meta[2]/@content"
            ).extract_first()
            print(item['year'])

            item["doi"] = response.xpath(
                f"//*[@id='Bib1-content']/div/ol/li[{item['number']}]/p[2]/a[contains(text(), 'Article')]/@href"
            ).extract_first()
            print(item['doi'])

            item["pubmed"] = response.xpath(
                f"//*[@id='Bib1-content']/div/ol/li[{item['number']}]/p[2]/a[contains(text(), 'PubMed')]/@href"
            ).extract_first()
            print(item['pubmed'])

            items.append(item)
        return items
