import json
import re
from time import sleep
from urllib.parse import quote, unquote

import scrapy


class IfengSpider(scrapy.Spider):
    name = 'ifeng'
    allowed_domains = ['shankapi.ifeng.com', 'ishare.ifeng.com']

    # start_urls = ["https://shankapi.ifeng.com//autumn/getSoFengData/all/%E5%89%8D%E7%AB%AF/1/"]

    def __init__(self, name=None, **kwargs):
        self.count = 0
        if name is not None:
            self.name = name
        elif not getattr(self, 'name', None):
            raise ValueError("%s must have a name" % type(self).__name__)
        self.__dict__.update(kwargs)
        if not hasattr(self, 'start_urls'):
            query = quote(input("请输入搜索字符串: "))
            self.query = query
            start_urls = [f"https://shankapi.ifeng.com//autumn/getSoFengData/all/{self.query}/1/"]
            # print(start_urls)
            self.start_urls = start_urls
        super().__init__(name, **kwargs)

    def parse(self, response):
        # print('-----------------------------------------------------------------------------')
        text = json.loads(response.text)
        data = text['data']
        # print(data['totalPage'])
        items = data['items']
        current_url = response.request.url
        current_page = int(re.findall("/(\\w)/", current_url)[0])
        sleep(0.5)
        # print(current_page)
        if items:
            for item in items:
                if item.get('thumbnails', None):
                    title = item['title'].replace('<em>', '').replace('</em>', '')
                    url = item['url']
                    if "https" in url:
                        continue
                    else:
                        url = 'https:' + url
                    imgurl = item['thumbnails']['image'][0]['url']
                    # print(title)
                    # print(url)
                    # print(imgurl)
                    yield scrapy.Request(url, callback=self.parse_info_url, meta=({'imgurl': imgurl}))

        else:
            pass

        # print('-----------------------------------------------------------------------------')

        if current_page < int(data['totalPage']):
            next_page = current_page + 1
            next_url = current_url.replace(f"/{current_page}/", f"/{next_page}/")
            # print(next_url)
            yield scrapy.Request(next_url, callback=self.parse)

    def parse_info_url(self, response):
        sleep(0.5)
        text = response.text
        res = re.findall('var allData = (.*?);\\n', text)[0]
        res = json.loads(res)
        if res['docData'].get('summary', None):
            title = res['docData']['title']
            imgurl = response.meta['imgurl']
            summary = res['docData']['summary']
            content = res['docData']['contentData']['contentList'][0]['data'].replace('<a>', '<a hidden>')
            filename = unquote(self.query)
            with open(f"{filename}.html", "a+") as f:
                f.write("<h1>标题:</h1>" + title + "\n")
                f.write("<h1>图片:</h1>" + imgurl + "\n")
                f.write("<h1>简介:</h1>" + summary + "\n")
                f.write("<h1>内容:</h1>" + content + "\n")

            # print(f"title: {res['docData']['title']}")
            # print(f"image: {response.meta['imgurl']}")
            # print(f"summary: {res['docData']['summary']}")
            # print(res['docData']['contentData']['contentList'][0]['data'])
            self.count += 1
            print(f"第{self.count}篇文章已完成")
