# -*- coding: utf-8 -*-
import scrapy
import random
import re
from heimao.settings import DEFAULT_REQUEST_HEADERS
from heimao.items import HeimaoItem
from heimao.loggings_settings import logger
import time
import datetime


def unix_time(dt):
    # 转换成时间数组
    timeArray = time.strptime(dt, "%Y-%m-%d %H:%M:%S")
    # 转换成时间戳
    timestamp = int(time.mktime(timeArray))
    return timestamp


def timestamp():
    '''
    生成当前时间的时间戳与昨日当前时间的时间戳
    :return: yes_stamp 昨日时间戳 today_stamp：今日当前时间戳
    '''
    today_stamp = int(time.time())
    yesterday = str(datetime.datetime.today() + datetime.timedelta(days=-1))
    yes_tr = yesterday.split('.')[0]
    yes_stamp = unix_time(yes_tr)
    return yes_stamp, today_stamp


class XinlangSpider(scrapy.Spider):
    name = 'xinlang'
    header = DEFAULT_REQUEST_HEADERS
    allowed_domains = ['www.baidu.com', 'news.sina.com.cn']
    data_number = 0

    def __init__(self, *args, **kwargs):
        domain = kwargs.pop("domains", "")
        self.alllowed_domains = filter(None, domain.split(','))
        super(XinlangSpider, self).__init__(*args, **kwargs)

    def start_requests(self):
        yes_stamp, today_stamp = timestamp()
        start_url = 'https://www.baidu.com/s?wd=黑猫投诉&rsv_btype=t&si=news.sina.com.cn&ct=2097152&gpc=stf={},' \
                    '{}|stftype=1&tfflag=1'.format(yes_stamp, today_stamp)

        yield scrapy.Request(
            url=start_url,
            callback=self.parse,
            headers=self.header
        )

    def parse(self, response):
        # 当前请求频率过快会出现验证码
        time.sleep(random.uniform(0, 1))
        try:
            news_url = re.findall(r'''data.txt-tools='{.*?"url":"(.*?)"}''', response.text, re.S)
            self.data_number += len(news_url)
            for new in news_url:
                # 当前请求重定向，headers需要添加Referer参数
                self.header['Referer'] = new
                yield scrapy.Request(
                    url=new,
                    callback=self.detail_news,
                    headers=self.header,
                    dont_filter=False
                )
        except:
            logger.error('起始或翻页url响应中未能获取到url列表！')

        next_url = response.xpath('//*[@id="page"]/a[contains(text(),"下一页")]/@href').extract_first()
        if next_url:
            yield scrapy.Request(
                url='https://www.baidu.com' + next_url,
                callback=self.parse,
                dont_filter=False
            )
        else:
            print('当前程序采集数据量为{}'.format(self.data_number))
            pass

    def detail_news(self, response):
        print(response.request.url)
        try:
            url = response.request.url
            title = response.xpath('//*[@class="main-title"]/text()').extract_first().replace('黑猫投诉：', '')
            author = response.xpath('//*[@id="article"]/p[1]/span[2]/text()').extract_first().split('，')[0].split('投诉')[
                0].strip().replace('“', '').replace('”', '')
            time = response.xpath('//*[@class="date"]/text()').extract_first().replace('年', '').replace('月',
                                                                                                        '').replace('日',
                                                                                                                    '').replace(
                ' ', '').replace(':', '') + '00'
            content_list = response.xpath('//*[@class="article"]/p[position()>2]/text()').extract()
            content = ''.join(content_list).replace('\u3000', '').replace('\n', '')
            item = HeimaoItem()
            item['url'] = url
            item['title'] = title
            item['author'] = author
            item['utime'] = time
            item['content'] = content
            yield item
        except:
            logger.error('当前页面响应解析数据不正确！')
