# -*- coding: utf-8 -*-
"""
Created on 2021-10-13 00:50:17
---------
@summary: 分布式爬取公共管理学院
---------
@author: 大龙
"""
import datetime
import random
import re
import time

from items.scunews_item import SCUNewsDataItem
import feapder
import sys
from constants import PAGE_XPATH_CONTENT,PAGE_XPATH_TITLE,PAGE_XPATH_SUBDATE,LIST_XPATH_DIC,LIST_XPATH_URL_DIC
from lxml import etree
sys.path.append('..')
# mystr="faasd 时间： 123"
# print(mystr[mystr.rfind('时间：')+3:])
class GgglxySpider(feapder.BatchSpider):
    def __init__(self,*args,**kwargs):
        super().__init__(*args,**kwargs)
        self.acadamyUrl=None
        self.type=None
        self.source=None


    def start_requests(self, task):
        id, url, type, source,acadamyUrl= task
        yield feapder.Request(url, task_id=id,type=type,source=source,acadamyUrl=acadamyUrl)

    def parse(self, request, response):
        for li in response.xpath(LIST_XPATH_DIC[request.acadamyUrl]):
            url = li.xpath(LIST_XPATH_URL_DIC[request.acadamyUrl]).extract_first()
            yield feapder.Request(url, callback=self.parse_dir_content,task_id=request.task_id,type=request.type,source=request.source,acadamyUrl=request.acadamyUrl)
        # 更新任务状态
        yield self.update_task_batch(request.task_id, 1)

    def parse_dir_content(self, request, response):
        item = SCUNewsDataItem()
        item['D_SOURCE']=request.source
        item['D_SUBDATE'] = self.get_date(str(response.xpath(
            PAGE_XPATH_SUBDATE[request.acadamyUrl]).extract_first()))
        item['D_URL'] = response.url

        title_string = response.xpath(
            PAGE_XPATH_TITLE[request.acadamyUrl]).extract_first()
        # # title太长时会分为多个子对象，需要处理这种情况
        # if title_string == None:
        #     title_string = ''
        #     title_list = []
        #     contents = response.xpath(
        #         "//div[@class='detail_zy_title']/p")
        #     for p in contents:
        #         title_list.append(p.xpath("string(.)").extract_first())
        #     # 去除日期
        #     title_list = title_list[:-1]
        #     for str in title_list:
        #         title_string = title_string+str
        item['D_TITLE'] = title_string

        item['D_TYPE'] = request.type
        # 获取文章内容
        content_html=response.xpath(
            PAGE_XPATH_CONTENT[request.acadamyUrl]).getall()
        # 去除转义字符和换行符等
        if len(content_html)!= 0:
            content_html=re.sub(r'\\n|\\r|\\',"",content_html[0])
        item['D_CONTENT'] = content_html
        if response.xpath(PAGE_XPATH_CONTENT[request.acadamyUrl]+"//img/@src").extract_first()!=None:
            item['D_PHOTO'] =response.xpath(PAGE_XPATH_CONTENT[request.acadamyUrl]+"//img/@src").extract_first()
        yield item

    def get_date(self, mystr):
        """
        @summary:去除爬取日期前面的“发布日期：”
        --------
        @str:待去除字符串
        --------
        @result:str
        """
        group=re.search(r"(\d{4})-(\d{1,2})-(\d{1,2})|(\d{4})/(\d{1,2})/(\d{1,2})|(\d{4})年(\d{1,2})月(\d{1,2})",mystr)
        if(group==None):
            return datetime.datetime.now().strftime('%Y-%m-%d')
        else:
            mystr=group.group()
            mystr=re.sub(r'/|年|月|日',"-",mystr)
            return mystr

    def exception_request(self, request, response):
        """
        @summary: 请求或者parser里解析出异常的request
        ---------
        @param request:
        @param response:
        ---------
        @result: request / callback / None (返回值必须可迭代)
        """

        pass

    def failed_request(self, request, response):
        """
        @summary: 超过最大重试次数的request
        ---------
        @param request:
        ---------
        @result: request / item / callback / None (返回值必须可迭代)
        """

        yield request
        yield self.update_task_batch(request.task_id, -1)  # 更新任务状态为-1
