import scrapy


class SpringerSpider(scrapy.Spider):
    name = "toscrape-springer"
    allowed_domains = ["link.springer.com"]
    start_urls = [
        "https://link.springer.com/search?query=Lithium+tetrafluoroborate&dateFrom=&dateTo=&sortBy=relevance"
    ]

    custom_settings = {
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 2,
        'DOWNLOAD_DELAY': 3,
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'COOKIES_ENABLED': True,
        'REDIRECT_ENABLED': True,
        'LOG_LEVEL': 'INFO',
        'JOBDIR': 'crawls/springer_search',
        "LOG_FILE": "logs/springer_search.log"
    }

    def start_requests(self):
        for url in self.start_urls:
            self.logger.info(f"Starting crawl on {url}")
            yield scrapy.Request(url, callback=self.parse_list, dont_filter=True)

    def parse_list(self, response):
        # Springer 搜索结果中的每篇论文卡片
        articles = response.css('ol#results-list li[data-test="result-item"]')
        self.logger.info(f"Found {len(articles)} articles on page")

        for article in articles:
            title = article.css('h2 a::text').get(default='').strip()
            url = article.css('h2 a::attr(href)').get()
            if url:
                full_url = response.urljoin(url)
                yield scrapy.Request(
                    full_url,
                    callback=self.parse_detail,
                    dont_filter=True
                )

        # 翻页
        next_page = response.css('a[title="Next page"]::attr(href)').get()
        if next_page:
            next_page_url = response.urljoin(next_page)
            self.logger.info(f"Scheduling next page: {next_page_url}")
            yield scrapy.Request(next_page_url, callback=self.parse_list, dont_filter=True)

    def parse_detail(self, response):
        # 标题
        title = response.css('h1.c-article-title::text').get(default='').strip()

        # 作者
        authors = response.css('ul.c-article-author-list a[data-test="author-name"]::text').getall()
        authors = [a.strip() for a in authors if a.strip()]

        # 发布日期
        pub_date = response.css('li.c-article-identifiers__item time::attr(datetime)').get(default='').strip()

        # 摘要
        abstract_paras = response.css('div#Abs1-section p font::text, div#Abs1-section p::text').getall()
        abstract = ''.join([p.strip() for p in abstract_paras if p.strip()])

        # PDF 下载链接
        pdf_url = response.css('a[data-test="pdf-link"]::attr(href)').get()
        if pdf_url:
            pdf_url = response.urljoin(pdf_url)

        self.logger.info(f"Parsed detail: {title}")

        yield {
            'title': title,
            'authors': ', '.join(authors),
            'publication_date': pub_date,
            'abstract': abstract,
            'pdf_url': pdf_url,
            'url': response.url,
        }
