# -*- coding: utf-8 -*-
import sys
import scrapy
import json
import time
import re
import datetime
from lxml import etree
from weibo_scrapy.items import MblogItem, UserItem
from weibo_scrapy.settings import LIMIT_DATE,COOKIES_ENABLED
from weibo_scrapy.login import login

HEADERS = {
    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
}

class SwbSpider(scrapy.Spider):
    name = 'swb'
    allowed_domains = ['s.weibo.com']
    # start_urls = ['http://m.weibo.cn/']

    def start_requests(self):
        with open('key_words.txt','r',encoding='utf-8') as fp:
            words = [line.strip() for line in fp]
        start_date = '2018-11-01'
        end_date = '2019-02-26'
        while start_date<end_date:
            tomorrow = get_before_date(-1,start_date)
            for word in words:
                url = 'https://s.weibo.com/weibo?q={word}&typeall=1&suball=1&timescope=custom:{start_date}-0:{tomorrow}-0&Refer=g&page={page}'.format(
                    word=word,page=1,start_date=start_date,tomorrow=tomorrow)
                yield scrapy.Request(url=url,callback=self.parse_item)
            start_date = tomorrow
    def parse_item(self, response):
        print(response.url)
        no_result = response.xpath('//*[contains(@class,"card-no-result")]')
        if no_result:
            return
        next_page = response.xpath('//a[text()="下一页"]/@href')
        mblog_list = response.xpath('//div[@mid]')
        for mblog in mblog_list:
            item = {}
            item['mid'] = mblog.xpath('./@mid').extract_first()
            user = mblog.xpath('.//a[@class="name"]')
            item['screen_name'] = user.xpath('./@nick-name').extract_first()
            item['scheme'] = user.xpath('./@href').extract_first()
            item['uid'] = re.findall(r'//weibo\.com/(\d+)\?',item['scheme'])[0]
            content = mblog.xpath('.//*[@node-type="feed_list_content"]')
            full_content = mblog.xpath('.//*[@node-type="feed_list_content_full"]')
            content = full_content if full_content else content
            item['text'] = content.xpath('string(.)').extract_first().replace('收起全文d\n','').strip()
            from_tag = mblog.xpath('string(.//*[@class="from"])').extract_first()
            date_and_source = from_tag.split('来自')
            if len(date_and_source) == 2:
                date_str = date_and_source[0].strip()
                item['source'] = date_and_source[1].strip()
            elif len(date_and_source) == 1:
                date_str = date_and_source[0].strip()
                item['source'] = ""
            item['created_at'],item['timestamp'] = trans_date_ts(date_str)
            item['reposts_count'] = mblog.xpath('string(.//div[@class="card-act"]/ul/li[2])').extract_first().replace('转发','').strip()
            item['comments_count'] = mblog.xpath('string(.//div[@class="card-act"]/ul/li[3])').extract_first().replace('评论','').strip()
            item['attitudes_count'] = mblog.xpath('string(.//div[@class="card-act"]/ul/li[4])').extract_first().strip()
            if not item['reposts_count']:
                item['reposts_count'] = 0
            else:
                item['reposts_count'] = int(item['reposts_count'])
            if not item['comments_count']:
                item['comments_count'] = 0
            else:
                item['comments_count'] = int(item['comments_count'])
            if not item['attitudes_count']:
                item['attitudes_count'] = 0
            else:
                item['attitudes_count'] = int(item['attitudes_count'])
            print(item)
        if next_page:
            next_page = 'https://s.weibo.com'+next_page.extract_first()
            yield scrapy.Request(url=next_page,callback=self.parse_item)

def trans_date_ts(date):
    date = date.replace('年','-').replace('月','-').replace('日','')
    # 转成时间戳
    if '分钟前' in date:
        now_ts = time.time()
        minute = int(date.rstrip('分钟前'))
        ts = now_ts - minute * 60
        date = ts2date(ts)
    elif '小时前' in date:
        now_ts = time.time()
        hour = int(date.rstrip('小时前'))
        ts = now_ts - hour * 3600
        date = ts2date(ts)
    elif '昨天' in date:
        now_ts = time.time()
        new_time = date.lstrip('昨天')
        date = get_before_date(1) + new_time + ":00"
    elif '今天' in date:
        now_ts = time.time()
        new_time = date.lstrip('今天')
        date = get_before_date(0) + new_time + ":00"
    elif date.count('-') == 1:
        date = time.strftime("%Y-", time.localtime()) + date+ ":00"
    elif '刚刚' in date:
        date = ts2date(time.time())
    elif '+0800' in date:
        format_time = time.strptime(date, '%a %b %d %H:%M:%S +0800 %Y')
        date = time.strftime('%Y-%m-%d %H:%M:%S', format_time)
    return date[:10], date2ts(date)


def date2ts(date):
    try:
        return int(time.mktime(time.strptime(date, '%Y-%m-%d')))
    except:
        try:
            return int(time.mktime(time.strptime(date, '%Y-%m-%d %H:%M')))
        except:
            return int(time.mktime(time.strptime(date, '%Y-%m-%d %H:%M:%S')))


def ts2date(ts):
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))

def get_today():
    today = datetime.date.today()
    return str(today)
def get_before_date(date_cha,date=None):
    # date_cha为当天日期的前几天,返回前几天的日期
    # 如当天2018-7-7,date_cha=1,则返回2018-7-6
    if not date:
        today = datetime.date.today()
    else:
        today = datetime.datetime.strptime(date, '%Y-%m-%d')
    oneday = datetime.timedelta(days=date_cha)
    newday = today - oneday
    return str(newday)[:10]


# if __name__ == '__main__':
#     print trans_date_ts('刚刚')
