from scrapy import Request, Spider
import time
import datetime
from urllib import request


def dateHourRange(beginDateHour, endDateHour):
    dhours = []
    dhour = datetime.datetime.strptime(beginDateHour, "%Y-%m-%d %H")
    date = beginDateHour[:]
    while date <= endDateHour:
        dhours.append(date + ':00:00')
        dhour = dhour + datetime.timedelta(hours=1)
        date = dhour.strftime("%Y-%m-%d %H")
    result = []
    for i in range(len(dhours) - 1):
        result.append([dhours[i], dhours[i + 1]])
    return result


def backDate(date_str):
    date_str = date_str.strip()
    if '秒前' in date_str:
        seconds = int(date_str.replace('秒前', ''))
        result = (datetime.datetime.now() - datetime.timedelta(seconds=seconds)).strftime("%Y-%m-%d %H:%M:%S")
    elif '分钟前' in date_str:
        minutes = int(date_str.replace('分钟前', ''))
        result = (datetime.datetime.now() - datetime.timedelta(minutes=minutes)).strftime("%Y-%m-%d %H:%M:%S")
    elif '小时前' in date_str:
        hours = int(date_str.replace('小时前', ''))
        result = (datetime.datetime.now() - datetime.timedelta(hours=hours)).strftime("%Y-%m-%d %H:%M:%S")
    elif '天前' in date_str:
        days = int(date_str.replace('天前', ''))
        result = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime("%Y-%m-%d %H:%M:%S")
    elif '年' in date_str:
        result = datetime.datetime.strptime(date_str, '%Y年%m月%d日 %H:%M').strftime('%Y-%m-%d %H:%M:%S')
    else:
        result = date_str
    return result


def optimize_str(str):
    return str.strip().replace('\n', '').replace('\r', '').strip()


class BaiduNewsSearchSpider(Spider):
    name = "baidunews_pg"

    def start_requests(self):
        # daysAgo = (datetime.datetime.now() + datetime.timedelta(days=-5)).strftime('%Y-%m-%d %H')
        # today = datetime.datetime.now().strftime('%Y-%m-%d %H')
        daysAgo = '2019-09-01 00'
        today = '2019-10-12 10'
        date_list = dateHourRange(daysAgo, today)
        keyword = "三星8K电视"
        for item in date_list:
            print(item)
            startTimestamp = int(time.mktime(time.strptime(item[0], "%Y-%m-%d %H:%M:%S")))
            endTimestamp = int(time.mktime(time.strptime(item[1], "%Y-%m-%d %H:%M:%S")))
            gpc = "stf={},{}|stftype=2".format(str(startTimestamp), str(endTimestamp))
            # yield Request(
            #     url='http://news.baidu.com/ns?word=%s&pn=0&cl=2&ct=1&tn=newsdy&rn=20&ie=utf-8&bt=%s&et=%s' %
            #     (request.quote(keyword), int(startTimestamp), int(endTimestamp)),
            #     callback=self.parse,
            #     meta={
            #         'keyword': keyword,
            #         'postdate': item[0][:10].replace('-', '')
            #     }
            # )
            yield Request(
                # url='https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&rsv_dl=ns_pc&word={}&gpc={}'.format(keyword, gpc),
                url='https://www.baidu.com/s?tn=news&rtt=4&bsst=1&cl=2&wd={}&gpc={}'.format(
                    keyword, request.quote(gpc)
                ),
                callback=self.parse,
                meta={
                    'keyword': keyword,
                    'postdate': item[0][:10].replace('-', '')
                }
            )

    def parse(self, response):
        keyword = response.meta['keyword']
        postdate = response.meta['postdate']
        page = ''.join(response.xpath('//*[@id="page"]/strong/span[@class="pc"]/text()').extract())
        print(keyword, postdate, page)
        items = response.xpath("//div[@class='result']")
        for item in items:
            datas = {
                'keyword': keyword,
                'postdate': postdate,
                'page': page,
                'title': optimize_str(''.join(item.xpath(".//h3[@class='c-title']/a//text()").extract())),
                'url': item.xpath(".//h3[@class='c-title']/a/@href").extract()[0]
            }
            c_author = ''.join(item.xpath(".//p[@class='c-author']//text()").extract())
            c_author = c_author.split('\xa0\xa0')
            if len(c_author) == 2:
                datas['srcsys'] = c_author[0].replace('\n', '').strip()
            else:
                datas['srcsys'] = ''
            postdate = c_author[-1].replace('\t', '').replace('\n', '').strip()
            try:
                datas['postdatetime'] = datetime.strptime(postdate, '%Y年%m月%d日 %H:%M').strftime('%Y-%m-%d %H:%M:%S')
            except:
                datas['postdatetime'] = backDate(postdate)
            datas['postdate'] = datas['postdatetime'].split()[0].replace('-', '')
            yield datas
        next_page = ''.join(response.xpath('.//a[contains(text(),"下一页")]/@href').extract())
        if len(next_page):
            yield Request(
                url="https://www.baidu.com" + next_page,
                callback=self.parse,
                meta={
                    'keyword': keyword,
                    'postdate': postdate
                }
            )
