# -*- coding: utf-8 -*-
import sys
import scrapy
import json
import time
import re
import datetime
from lxml import etree
from weibo_scrapy.items import MblogItem, UserItem
from weibo_scrapy.settings import LIMIT_DATE,COOKIES_ENABLED,COOKIES
from weibo_scrapy.login import login

HEADERS = {
    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
}

class SwbSpider(scrapy.Spider):
    name = 'swb_1'
    allowed_domains = ['weibo.cn']
    # start_urls = ['http://m.weibo.cn/']

    def start_requests(self):
        # with open('key_words.txt','r',encoding='utf-8') as fp:
        #     words = [line.strip() for line in fp]

        words = ['落马']
        uid ='2615417307'
        for word in words:
            url = 'https://weibo.cn/{uid}/profile?keyword={word}&hasori=0&haspic=0&endtime=20190227&advancedfilter=1&page={page}'.format(
                word=word,page=1,uid=uid)
            yield scrapy.Request(url=url,cookies=COOKIES,callback=self.parse_item)
    def parse_item(self, response):
        print(response.url)
        if '没有找到符合条件的微博,你可以尝试:' in response.text:
            return
        screen_name = response.xpath('//div[@class="ut"]//text()').extract_first().replace('的微博','').replace('\xa0','')
        print('*****************',screen_name)
        uid,page = re.findall('/(\d+)/profile.*page=(.*)',response.url)[0]
        mblog_list = response.xpath('//div[@class="c" and starts-with(@id,"M_")]')
        for mblog in mblog_list:
            item = MblogItem()
            item['screen_name'] = screen_name
            item['mid'] = mblog.xpath('./@id').extract_first().lstrip('M_')
            item['uid'] = uid
            item['text'] = ''.join(mblog.xpath('.//span[@class="ctt"]//text() | ./div/text() | .//img/@alt').extract())
            item['comments_count'] = int(re.findall('[(\d+)]',mblog.xpath('.//a[contains(@href,"comment") and contains(@href,"uid")]/text()').extract_first())[0])
            item['attitudes_count'] =int( re.findall('[(\d+)]',mblog.xpath('.//a[contains(@href,"attitude") and contains(@href,"uid")]/text()').extract_first())[0])
            item['reposts_count'] =int(re.findall('[(\d+)]',mblog.xpath('.//a[contains(@href,"repost") and contains(@href,"uid")]/text()').extract_first())[0])
            ct = mblog.xpath('.//span[@class="ct"]/text()')
            if ct:
                date_and_source = ct.extract_first().split('\xa0')
                if len(date_and_source) == 2:
                    date_str = date_and_source[0]
                    item['source'] = date_and_source[1]
                elif len(date_and_source) == 1:
                    date_str = date_and_source[0]
                    item['source'] = ""
            item['created_at'],item['timestamp'] = trans_date_ts(date_str)
            # if item['created_at'] < '2018-07-01':
            #     return
            ckAll = mblog.xpath('.//a[text()="全文"]/@href')
            if ckAll:
                ckAll_url = 'https://weibo.cn' + ckAll.extract_first()
                yield scrapy.Request(url=ckAll_url,callback=self.parse_detail,meta={'item':item})
            else:
                yield item
                # print(item)
        next_page = response.xpath('//a[text()="下页"]/@href')
        if next_page:
            next_page = 'https://weibo.cn'+ next_page.extract_first()
            yield scrapy.Request(url=next_page,callback=self.parse_item,meta={'url':next_page})
    def parse_detail(self, response):
        print(response.url)
        item = response.meta['item']
        mblog = response.xpath('//div[@class="c" and starts-with(@id,"M_")]')
        item['text'] = ''.join(mblog.xpath('.//span[@class="ctt"]//text()').extract())
        yield item
        print(item)
def trans_date_ts(date):
    date = date.replace('年','-').replace('月','-').replace('日','')
    # 转成时间戳
    if '分钟前' in date:
        now_ts = time.time()
        minute = int(date.rstrip('分钟前'))
        ts = now_ts - minute * 60
        date = ts2date(ts)
    elif '小时前' in date:
        now_ts = time.time()
        hour = int(date.rstrip('小时前'))
        ts = now_ts - hour * 3600
        date = ts2date(ts)
    elif '昨天' in date:
        now_ts = time.time()
        new_time = date.lstrip('昨天')
        date = get_before_date(1) + new_time + ":00"
    elif '今天' in date:
        now_ts = time.time()
        new_time = date.lstrip('今天')
        date = get_before_date(0) + new_time + ":00"
    elif date.count('-') == 1:
        date = time.strftime("%Y-", time.localtime()) + date+ ":00"
    elif '刚刚' in date:
        date = ts2date(time.time())
    elif '+0800' in date:
        format_time = time.strptime(date, '%a %b %d %H:%M:%S +0800 %Y')
        date = time.strftime('%Y-%m-%d %H:%M:%S', format_time)
    return date[:10], date2ts(date)


def date2ts(date):
    try:
        return int(time.mktime(time.strptime(date, '%Y-%m-%d')))
    except:
        return int(time.mktime(time.strptime(date, '%Y-%m-%d %H:%M:%S')))


def ts2date(ts):
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))


def get_before_date(date_cha):
    # date_cha为当天日期的前几天,返回前几天的日期
    # 如当天2018-7-7,date_cha=1,则返回2018-7-6
    today = datetime.date.today()
    oneday = datetime.timedelta(days=date_cha)
    newday = today - oneday
    return str(newday)