# -*- coding: utf-8 -*-
"""
Created on 2021-11-10 08:38:25
---------
@summary:
---------
@author: Administrator
"""

import feapder
import pandas as pd
import os
import re
from feapder.utils.log import log

PaperFolder='papers'
CompletedListFile='completed.txt'
InputListFile='z:/pubmed.xlsx'

TitleColIdx=1
FactorColIdx=5
UrlColIdx=13

def read_rows(file):
    if os.path.exists(file) == False:
        raise 'Excel file not exists:'+file
    return pd.read_excel(file).values


def log_completed(url: str):
    with open(CompletedListFile, 'a', encoding='utf8') as f:
        f.write(url.strip())
        f.write('\n')


def load_completed():
    if os.path.exists(CompletedListFile) == False:
        return set()
    with open(CompletedListFile, 'r', encoding='utf8') as f:
        return set([x.strip() for x in f if x])


class PaperSpider(feapder.AirSpider):
    def __init__(self, excel, save_folder=PaperFolder, thread_count=1):
        super().__init__(thread_count=thread_count)
        self.excel = excel
        self.save_folder = save_folder
        if os.path.exists(self.save_folder) == False:
            os.mkdir(self.save_folder)
        self.total_count = 0
        self.completed_set = load_completed()

    def start_requests(self):
        rows = read_rows(self.excel)
        self.total_count = len(rows)
        for row in rows:
            item = {}
            item['title'] = row[TitleColIdx-1]
            item['factor'] = row[FactorColIdx-1]
            item['url'] = row[UrlColIdx-1].strip()
            if item['url'] in self.completed_set:
                continue
            file_name = self._make_pdf_file_name(item)
            if os.path.exists(file_name):
                continue
            item['file_name'] = file_name
            yield feapder.Request(item['url'], item=item)

    def parse(self, request, response):
        pdf_url = self.extract_pdf_url(response)
        if not pdf_url or '.pdf' not in pdf_url:
            log.error('Extract pdf url failed from:'+response.url)
            # self.log_failed_url(request.item['title'], response.url)
            return
        yield feapder.Request(pdf_url, callback=self.download_pdf, item=request.item)

    def extract_pdf_url(self, response):
        req_url = response.url
        if req_url.find('sci-hub.se') > 0:
            return self._extract_sic_hub(response)
        if req_url.find('www.ncbi.nlm.nih.gov') > 0:
            return self._extract_ncbi(response)
        log.error('Unexpected pdf page url:'+req_url)

    def _extract_sic_hub(self, response):
        url = response.xpath(
            '//*[@id="buttons"]/button/@onclick').re_first(r'sci-hub.*?\.pdf')
        return 'https://'+url if url else None

    def _extract_ncbi(self, response):
        return response.xpath('//*[@id="rightcolumn"]/div[2]/div/ul/li[3]/a/@href').extract_first()

    def download_pdf(self, request, response):
        # file_name=self._make_pdf_file_name(request.item)
        item = request.item
        try:
            with open(item['file_name'], 'wb') as f:
                f.write(response.content)
            log.info(f'Download pdf successufl!\nPage url:{item["url"]}')
            log_completed(item['url'])
            self.completed_set.add(item['url'])
        except:
            log.error(
                f'Download pdf failed!\nPage url:{item["url"]}\nPdf url:{response.url}')

    def _make_pdf_file_name(self, item):
        rstr = r"[\/\\\:\*\?\"\<\>\|]"
        name = f'[{item["factor"]}]{item["title"]}'[:250]+'.pdf'
        name = re.sub(rstr, "_", name)
        return os.path.join(self.save_folder, name)

    def end_callback(self):
        log.info(
            f'Total Paper:{self.total_count}\nDownload Count:{self.total_count-len(self.failed_list)}')
        return super().end_callback()


if __name__ == "__main__":
    spider = PaperSpider(InputListFile)
    spider.start()
    # log.info(f'Total Paper:{spider.total_count}\nDownload Count:{spider.total_count-len(spider.failed_list)}')
    # log_failure(spider.failed_list)
