# -*- coding: utf-8 -*-
import scrapy
from readability.readability import Document
from lxml import etree
import time
import datetime
from urllib.parse import quote
import re

class NewsSpider(scrapy.Spider):
    name = 'news'
    allowed_domains = []
    start_urls = ['http://www.baidu.com/']
    def start_requests(self):
        with open('stock_id.txt','r',encoding='utf-8') as fp:
            words = [line.strip() for line in fp]
        date = '2018-10-01'
        stop = '2019-02-26'
        while date<stop:
            bt,et = get_bt_et(date,date)
            for word in words:
                stock_name = word.split('-')[0]
                stock_id = word.split('-')[1]
                url = 'http://news.baidu.com/ns?word={word}&cl=2&ct=0&tn=newsdy&rn=50&ie=utf-8&bt={bt}&et={et}'.format(word=quote(stock_name), bt=bt, et=et)
                yield scrapy.Request(url=url,meta={'stock_id':stock_id,'stock_name':stock_name})
            date = get_before_date(-1,date)
    def parse(self, response):
        print(response.url)
        stock_id = response.meta['stock_id']
        stock_name = response.meta['stock_name']
        result = response.xpath('//*[@class="result"]')
        for div in result:
            item = {}
            item['stock_id'] = stock_id
            item['keyword'] = stock_name
            item['title'] = div.xpath(
                'string(./*[@class="c-title"]/a)').extract_first().replace(' ', '').replace('\n', '')
            news_url = div.xpath('./*[@class="c-title"]/a/@href').extract_first()
            item['url'] = news_url
            author = div.xpath(
                'string(.//*[@class="c-author"])').extract_first().split('\xa0\xa0')
            if len(author) == 2:
                item['source_website'] = author[0].replace(' ', '').strip()
                item['timestamp'] = trans_time(
                    author[1].replace('\t', '').replace('\n', ''))
            else:
                item['source_website'] = ''
                item['timestamp'] = trans_time(
                    author[0].replace('\t', '').replace('\n', ''))
            item['datetime'] = time.strftime(
                '%Y-%m-%d', time.localtime(item['timestamp']))
            item['summary'] = ''.join(div.xpath(
                './/*[@class="c-author"]/../text() | .//*[@class="c-author"]/../em/text()').extract()).replace(' ', '').replace('\r', '').replace('\n', '')
            yield scrapy.Request(url=news_url,callback=self.parse_content,meta={'item':item})
        next_page = response.xpath('//a[text()="下一页>"]/@href')
        if next_page:
            url = 'http://news.baidu.com' + next_page.extract_first()
            yield scrapy.Request(url=url,callback=self.parse,meta={'stock_id':stock_id,'stock_name':stock_name})
    def parse_content(self,response):
        item = response.meta['item']
        try:
            document = Document(response.text)
            readable_article = document.summary()
            html = etree.HTML(readable_article)
            item['content'] = ''.join(html.xpath('//*//text()')
                      ).replace('\n', '').strip().replace('\t', '').replace('\r', '').replace('&nbsp', '').replace('\u3000', '')
        except:
            item['content'] = item['summary']
        if not item['content']:
            item['content'] = item['summary']
        yield item

def get_bt_et(start_time, end_time):
    y0 = start_time.split('-')[0]
    y1 = end_time.split('-')[0]
    cha1 = Caltime('1970-1-1', start_time)
    cha2 = Caltime('1970-1-1', end_time)
    bt = cha1 * 86400 - 28800
    et = cha2 * 86400 - 28800 + 86399
    return bt,et

def Caltime(date1, date2):
    # 计算两个日期相差天数，自定义函数名，和两个日期的变量名。
    date1 = time.strptime(date1, "%Y-%m-%d")
    date2 = time.strptime(date2, "%Y-%m-%d")
    date1 = datetime.datetime(date1[0], date1[1], date1[2])
    date2 = datetime.datetime(date2[0], date2[1], date2[2])
    # 返回两个变量相差的值，就是相差天数
    # 当date2日期晚于date1日期,结果为正值
    # 反之,返回结果为负值
    return (date2 - date1).days



def trans_time(date):
    date = date.replace('年', '-').replace('月', '-').replace('日', '')
    if '分钟前' in date:
        now_ts = time.time()
        minute = int(date.rstrip(u'分钟前'))
        ts = now_ts - minute * 60
    elif '小时前' in date:
        now_ts = time.time()
        hour = int(date.rstrip(u'小时前'))
        ts = now_ts - hour * 3600
    elif '天前' in date:
        now_ts = time.time()
        day = int(date.rstrip(u'天前'))
        ts = now_ts - day * 86400
    else:
        datetime_re = re.findall(
            r'\d+-\d+-\d+', date)[0]
        timeArray = time.strptime(datetime_re, "%Y-%m-%d")
        ts = time.mktime(timeArray)
    return int(ts)

def get_before_date(date_cha,date=None):
    # date_cha为当天日期的前几天,返回前几天的日期
    # 如当天2018-7-7,date_cha=1,则返回2018-7-6
    if not date:
        today = datetime.date.today()
    else:
        today = datetime.datetime.strptime(date, '%Y-%m-%d')
    oneday = datetime.timedelta(days=date_cha)
    newday = today - oneday
    return str(newday)[:10]