# -*- coding: utf-8 -*-
import sys
import scrapy
import json
import time
import re
import datetime
from lxml import etree
from weibo_scrapy.items import MblogItem, UserItem
from weibo_scrapy.settings import LIMIT_DATE,COOKIES_ENABLED,COOKIES
from weibo_scrapy.login import login

HEADERS = {
    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
}

class Mwb2Spider(scrapy.Spider):
    name = 'mwb_2'
    allowed_domains = ['weibo.cn']
    # start_urls = ['http://m.weibo.cn/']

    def start_requests(self):
        # uids = ['2286908003']
        uids = ["1784473157", "2286908003", "1314608344", "1644114654","1686546714", "1656737654", "2028810631", "1677991972", "3881380517", "1847582585","2615417307", "1191965271", "1643971635", "1778758223", "1977460817", "1656831930", "1699432410", "1722628512", "1267454277","2443459455", "3921730119", "1867571077", "1718493627", "1653460650", "1737737970","3271121353", "1326410461", "1645705403","1653944045", "5977555696", "1992613670","1724367710", "1974808274", "3164957712", "3266943013","2127460165", "2083844833", "5305757517", "2803301701", "2656274875", "1618051664", "1974576991","1642512402", "1402977920","1893801487", "2810373291", "1749990115","1652484947", "1265998927", "1698857957", "1698233740", "5044281310"]
        for uid in uids:
            url = 'https://weibo.cn/u/{uid}?page={page}'.format(
                uid=uid,page=1)
            HEADERS.update({
                'Referer': 'https://weibo.cn/u/{uid}'.format(uid=uid),
            })
            yield scrapy.Request(url=url, cookies=COOKIES,callback=self.parse_item,meta={'url':url})

    def parse_item(self, response):

        print(response.url)
        if 'login' in response.url:
            COOKIES = login()
            url = response.meta['url']
            yield scrapy.Request(url=url, cookies=COOKIES,callback=self.parse_item,meta={'url':url})
        else:
            screen_name = response.xpath('//div[@class="ut"]//text()').extract_first().replace('的微博','')
            print('*****************',screen_name)
            uid,page = re.findall('/u/(.*)\?page=(.*)',response.url)[0]
            mblog_list = response.xpath('//div[@class="c" and starts-with(@id,"M_")]')
            for mblog in mblog_list:
                item = MblogItem()
                item['screen_name'] = screen_name
                item['mid'] = mblog.xpath('./@id').extract_first().lstrip('M_')
                item['uid'] = uid
                item['text'] = ''.join(mblog.xpath('.//span[@class="ctt"]//text() | ./div/text() | .//img/@alt').extract())
                item['comments_count'] = int(re.findall('[(\d+)]',mblog.xpath('.//a[contains(@href,"comment") and contains(@href,"uid")]/text()').extract_first())[0])
                item['attitudes_count'] =int( re.findall('[(\d+)]',mblog.xpath('.//a[contains(@href,"attitude") and contains(@href,"uid")]/text()').extract_first())[0])
                item['reposts_count'] =int(re.findall('[(\d+)]',mblog.xpath('.//a[contains(@href,"repost") and contains(@href,"uid")]/text()').extract_first())[0])
                ct = mblog.xpath('.//span[@class="ct"]/text()')
                if ct:
                    date_and_source = ct.extract_first().split('\xa0')
                    if len(date_and_source) == 2:
                        date_str = date_and_source[0]
                        item['source'] = date_and_source[1]
                    elif len(date_and_source) == 1:
                        date_str = date_and_source[0]
                        item['source'] = ""
                item['created_at'],item['timestamp'] = trans_date_ts(date_str)
                if item['created_at'] < '2018-07-01':
                    return
                ckAll = mblog.xpath('.//a[text()="全文"]/@href')
                if ckAll:
                    ckAll_url = 'https://weibo.cn' + ckAll.extract_first()
                    yield scrapy.Request(url=ckAll_url,callback=self.parse_detail,meta={'item':item})
                else:
                    yield item
            next_page = response.xpath('//a[text()="下页"]/@href')
            if next_page:
                next_page = 'https://weibo.cn'+ next_page.extract_first()
                yield scrapy.Request(url=next_page,callback=self.parse_item,meta={'url':next_page})
    def parse_detail(self, response):
        print(response.url)
        item = response.meta['item']
        mblog = response.xpath('//div[@class="c" and starts-with(@id,"M_")]')
        item['text'] = ''.join(mblog.xpath('.//span[@class="ctt"]//text()').extract())
        yield item
def trans_date_ts(date):
    date = date.replace('年','-').replace('月','-').replace('日','')
    # 转成时间戳
    if '分钟前' in date:
        now_ts = time.time()
        minute = int(date.rstrip('分钟前'))
        ts = now_ts - minute * 60
        date = ts2date(ts)
    elif '小时前' in date:
        now_ts = time.time()
        hour = int(date.rstrip('小时前'))
        ts = now_ts - hour * 3600
        date = ts2date(ts)
    elif '昨天' in date:
        now_ts = time.time()
        new_time = date.lstrip('昨天')
        date = get_before_date(1) + new_time + ":00"
    elif '今天' in date:
        now_ts = time.time()
        new_time = date.lstrip('今天')
        date = get_before_date(0) + new_time + ":00"
    elif date.count('-') == 1:
        date = time.strftime("%Y-", time.localtime()) + date+ ":00"
    elif '刚刚' in date:
        date = ts2date(time.time())
    elif '+0800' in date:
        format_time = time.strptime(date, '%a %b %d %H:%M:%S +0800 %Y')
        date = time.strftime('%Y-%m-%d %H:%M:%S', format_time)
    return date[:10], date2ts(date)


def date2ts(date):
    try:
        return int(time.mktime(time.strptime(date, '%Y-%m-%d')))
    except:
        return int(time.mktime(time.strptime(date, '%Y-%m-%d %H:%M:%S')))


def ts2date(ts):
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))


def get_before_date(date_cha):
    # date_cha为当天日期的前几天,返回前几天的日期
    # 如当天2018-7-7,date_cha=1,则返回2018-7-6
    today = datetime.date.today()
    oneday = datetime.timedelta(days=date_cha)
    newday = today - oneday
    return str(newday)


# if __name__ == '__main__':
#     print trans_date_ts('刚刚')
