# -*- coding: utf-8 -*-
import hashlib
import html
import json
import re

import pymysql
import redis
import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from scrapy.utils.python import to_bytes
from twisted.internet.error import DNSLookupError, TCPTimedOutError
from lxml import etree
from w3lib.url import canonicalize_url


class ToutiaoSearchSpider(scrapy.Spider):
    name = "toutiao_search"
    allowed_domains = ["toutiao.com"]
    base_url = 'https://www.toutiao.com/api/search/content/?aid=24&offset=0&format=json&keyword={}&autoload=true&count=20&cur_tab=1&from=search_tab&pd=synthesis'
    base_detail_url = 'https://www.toutiao.com/a{}/'  # 填充文章数字编号item_id

    def __init__(self, kws=None, *args, **kwargs):
        super(ToutiaoSearchSpider, self).__init__(*args, **kwargs)
        self.kws = kws

    def start_requests(self):
        self.r = redis.StrictRedis(host=self.settings.get('REDIS_HOST'), port=self.settings.get('REDIS_PORT'),
                                   db=self.settings.get('REDIS_PARAMS').get('db'))

        kws = eval(self.kws)
        for kw in kws:
            url = self.base_url.format(kw[0])
            # print(url)
            yield scrapy.Request(
                url,
                callback=self.parse,
                errback=self.parse_err,
                # 初始词不过滤，方便接着爬
                meta={'kw': kw},  # kw是个元组
                dont_filter=True
            )
            # break

    def parse(self, response):
        # print(response.text)
        kw = response.meta.get('kw')
        ret = json.loads(response.text)
        data = ret.get('data')
        # print(data)
        for i in data:  # TODO 这里的data可能会是None导致报错
            item = dict()
            item['abstract'] = i.get('abstract')  # 摘要
            # print(i.get('video_duration'), 'duration')
            if item['abstract'] and i.get('video_duration') is None:
                # 必须存在此项并且不是空字符串，才会在页面中显示内容，其他都是杂项，如果有video时长，则是视频，过滤掉
                item['title'] = i.get('title')
                item['item_id'] = i.get('id')  # item_id/id/item_source_url等均有此值，但偶有不同
                item['source_url'] = self.base_detail_url.format(item['item_id'])
                item['keyword'] = kw[0]
                item['key_id'] = kw[1]
                # print(item['source_url'])
                yield scrapy.Request(
                    item['source_url'],
                    callback=self.parse_detail,
                    errback=self.parse_err,
                    meta={'item': item}
                )
                # break
            else:
                continue

    def parse_detail(self, response):
        """详情页面提取"""
        item = response.meta.get('item')
        ret = re.search(r"content: '(.*)',", response.text)
        if ret:
            # 将字符串中的转义字符变更为标签
            content = ret.group(1)
            # print(content)
            content = html.unescape(content)
            # print(content)
            # with open('%s.html' % item['item_id'], 'w', encoding='utf-8') as f:
            #     f.write(content)
            content_html = etree.HTML(content)
            if content_html is None:
                # 应该是反爬了，从redis中删除指纹
                self.del_fingerprint(response.url)
                self.logger.error('%s---获取内容为None，应当是反爬造成的' % response.url)
            content = re.sub(r'<blockquote>.*?</blockquote>', '', content)  # 非贪婪模式匹配，去掉blockquote标签
            item['content'] = content
            img_urls = content_html.xpath('//img/@src')  # 反爬这里自己会报错，item就不会存储在数据库了
            item['img_url'] = str(img_urls) if img_urls else ''
            yield item

            # TODO 关于删除图片的问题，后续进行，暂时无法进行下去
            # first_tag = content_html.xpath('//body/div/child::*[1]')
            # 第一个标签可能是div标签，也可能是p标签
            # TODO  这里xpath匹配有问题，body下的第一个标签目前可能是div，p，img，目前看到的是如果是div，则
            # first_tag = content_html.xpath('//body/div/child::*[1] | //body/p/child::*[1]')
            # if first_tag:
            #     self.logger.warning('response.url---%s' % response.url)
            #     print('response.url---%s' % response.url)
            #     # print(etree.tostring(content_html))
            #     first_tag = first_tag[0]
            #     print(first_tag.xpath('./text()'))
            #     print(first_tag)
            #     print('img:----', first_tag.xpath('.//img'))
            # else:
            #     self.logger.error('response.url---%s' % response.url)
            #     # print(etree.tostring(content_html))
            #     print('response.url---%s' % response.url)
            #     print('---------------------------')

    def parse_err(self, failure):
        """处理非正常请求，在这里raise CloseSpider 关闭的是当前发出的请求的爬虫，在parse中执行，会直接关掉正在运行的爬虫"""
        self.logger.error(repr(failure))
        if failure.check(HttpError):
            response = failure.value.response
            self.logger.error(
                'HttpError on %s' % response.url)

        elif failure.check(DNSLookupError):
            request = failure.request
            self.logger.error('DNSLookupError on %s', request.url)

        elif failure.check(TimeoutError, TCPTimedOutError):
            request = failure.request
            self.logger.error('TimeoutError on %s', request.url)

    def del_fingerprint(self, url):
        """请求失败,删除redis中的指纹"""
        fp = hashlib.sha1()
        fp.update(to_bytes('GET'))
        fp.update(to_bytes(canonicalize_url(url)))
        fp.update(b'')
        # print(fp.hexdigest())
        # 这里存的是本爬虫的url指纹集合
        self.r.srem('toutiao_search:dupefilter', fp.hexdigest())
