import json
import os

import scrapy
from bs4 import BeautifulSoup
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from twisted.internet import reactor

SETTINGS = {
    "FEEDS": {
        "products.xlsx": {"format": "xlsx", "overwrite": True},
    },
    'USER_AGENT': 'xxx',
    'FEED_EXPORTERS': {
        'xlsx': 'scrapy_xlsx.XlsxItemExporter',
    },
    'FEED_EXPORT_FIELDS': ['title', 'desc', 'pic1', 'pic2', 'pic3', 'pic4', 'pic5', 'price', 'category', 'ean'],
}


class ScopusItem(scrapy.Item):
    # define the fields for your item here like:
    auth_id = scrapy.Field()
    full_name = scrapy.Field()
    institution = scrapy.Field()
    country = scrapy.Field()
    orc_id = scrapy.Field()
    h_index = scrapy.Field()
    email = scrapy.Field()


class FruugoSpider(scrapy.Spider):
    name = "scopus"

    def start_requests(self):
        cookie = None
        with open("cookie.txt", 'r', encoding='utf-8') as f:
            for line in f:
                if line.lower().startswith("cookie"):
                    cookie = line.split(":")[1].strip()
        if cookie is None:
            print("未填入必须的cookie到文件cookie.txt! 程序退出。")
        headers = {
            "Cookie": cookie
        }
        with open('url.txt', 'r', encoding='utf-8') as f:
            urls = f.readlines()
        urls = [x.strip() for x in urls if x and len(x) > 0]
        # urls = urls[:1]

        if os.path.exists('proxy.txt'):
            print("只会读取proxy.txt内第一行作为http代理，默认为http代理。仍需其他协议的代理，请务必带协议头")
            with open('proxy.txt', 'r', encoding='utf-8') as f:
                proxy = f.readline().strip()
            proxy = proxy if proxy.startswith('http://') else 'http://' + proxy

            for url in urls:
                yield scrapy.Request(url=url, meta={'proxy': proxy})
        else:
            for url in urls:
                yield scrapy.Request(url=url, meta={"headers": headers}, headers=headers)

    def parse(self, response):
        headers = response.meta["headers"]
        html = response.text
        soup = BeautifulSoup(html, 'html.parser')
        json_body = soup.select_one("#jsonMainResponse").text
        js = json.loads(json_body)
        authors = js['body']['AUTHOR_NAME_AND_ID']['data']
        author_ids = [x['id'].split("#")[1] for x in authors]
        for authid in author_ids:
            yield scrapy.Request(url="https://www.scopus.com/api/authors/{}".format(authid), parser=self.parse_json,
                                 headers=headers, meta={"authid": authid})

    def parse_json(self, response):
        author_id = response.meta["authid"]
        js = json.loads(response.text)
        email_address = js["emailAddress"]
        h_index = js["hindex"]
        institution = js["latestAffiliatedInstitution"]["name"]
        country = js["latestAffiliatedInstitution"]["address"]["country"]
        orc_id = js["orcId"]
        full_name = js["nameVariants"][0]["full"]
        return ScopusItem(author_id=author_id, full_name=full_name, institution=institution, country=country,
                          orc_id=orc_id, h_index=h_index, email=email_address)


def start_spider():
    configure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})
    runner = CrawlerRunner(SETTINGS)
    d = runner.crawl(FruugoSpider)
    d.addBoth(lambda _: reactor.stop())
    reactor.run()  # the script will block here until the crawling is finished


def html_parse_debug():
    with open('debug.html', 'r', encoding='utf-8') as f:
        html = ''.join(f.readlines())
    soup = BeautifulSoup(html, 'html.parser')
    json_body = soup.select_one("#jsonMainResponse").text
    js = json.loads(json_body)
    authors = js['body']['AUTHOR_NAME_AND_ID']['data']
    author_ids = [x['id'].split("#")[1] for x in authors]
    print(author_ids)


if __name__ == '__main__':
    start_spider()

    # html_parse_debug()
