from scrapy import Request, Spider
import datetime
from urllib import request


def dateHourRange(beginDateHour, endDateHour):
    dhours = []
    dhour = datetime.datetime.strptime(beginDateHour, "%Y-%m-%d %H")
    date = beginDateHour[:]
    while date <= endDateHour:
        dhours.append(date + ':00:00')
        dhour = dhour + datetime.timedelta(hours=1)
        date = dhour.strftime("%Y-%m-%d %H")
    result = []
    for i in range(len(dhours) - 1):
        result.append([dhours[i], dhours[i + 1]])
    return result


def backDate(date_str):
    date_str = date_str.strip()
    if '秒前' in date_str:
        seconds = int(date_str.replace('秒前', ''))
        result = (datetime.datetime.now() - datetime.timedelta(seconds=seconds)).strftime("%Y-%m-%d %H:%M:%S")
    elif '分钟前' in date_str:
        minutes = int(date_str.replace('分钟前', ''))
        result = (datetime.datetime.now() - datetime.timedelta(minutes=minutes)).strftime("%Y-%m-%d %H:%M:%S")
    elif '小时前' in date_str:
        hours = int(date_str.replace('小时前', ''))
        result = (datetime.datetime.now() - datetime.timedelta(hours=hours)).strftime("%Y-%m-%d %H:%M:%S")
    elif '天前' in date_str:
        days = int(date_str.replace('天前', ''))
        result = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime("%Y-%m-%d %H:%M:%S")
    elif '年' in date_str:
        result = datetime.datetime.strptime(date_str, '%Y年%m月%d日 %H:%M').strftime('%Y-%m-%d %H:%M:%S')
    else:
        result = date_str
    return result


def optimize_str(str):
    return str.strip().replace('\n', '').replace('\r', '').strip()


class BaiduNewsSearchSpider(Spider):
    name = "baidunews"

    def start_requests(self):
        keyword = "三星8K电视"
        yield Request(
            url='http://www.baidu.com/s?ie=utf-8&cl=2&medium=0&rtt=4&bsst=1&rsv_dl=news_t_sk&tn=news&word={}'.format(request.quote(keyword)),
            callback=self.parse,
            meta={'keyword': keyword}
        )

    def parse(self, response):
        keyword = response.meta['keyword']
        page = ''.join(response.xpath('//*[@id="page"]/strong/span[@class="pc"]/text()').extract())
        print(keyword, page)
        items = response.xpath("//div[@class='result']")
        flag = True
        for item in items:
            datas = {
                'keyword': keyword,
                'page': page,
                'title': optimize_str(''.join(item.xpath(".//h3[@class='c-title']/a//text()").extract())),
                'url': item.xpath(".//h3[@class='c-title']/a/@href").extract()[0]
            }
            c_author = ''.join(item.xpath(".//p[@class='c-author']//text()").extract())
            c_author = c_author.split('\xa0\xa0')
            if len(c_author) == 2:
                datas['srcsys'] = c_author[0].replace('\n', '').strip()
            else:
                datas['srcsys'] = ''
            postdate = c_author[-1].replace('\t', '').replace('\n', '').strip()
            try:
                datas['postdatetime'] = datetime.strptime(postdate, '%Y年%m月%d日 %H:%M').strftime('%Y-%m-%d %H:%M:%S')
            except:
                datas['postdatetime'] = backDate(postdate)
            datas['postdate'] = datas['postdatetime'].split()[0].replace('-', '')
            if int(datas['postdate']) > 20190831:
                yield datas
            else:
                flag = False
        next_page = ''.join(response.xpath('.//a[contains(text(),"下一页")]/@href').extract())
        if len(next_page) and flag:
            yield Request(url="https://www.baidu.com" + next_page, callback=self.parse, meta={'keyword': keyword})
