import scrapy
from scrapy.cmdline import execute
import re
from datetime import datetime, timedelta
from spiderx.tools.client import redisClientx
from spiderx.item.items import SpiderxItem
from spiderx.middlewares.http import ProxyRequest
import requests
import json
lua_script = '''
assert(splash:go(args.url))
assert(splash:wait(10.0))
return splash:get_cookies()
'''
class SearchSpider(scrapy.Spider):
    name = 'weiboSearch'
    allowed_domains = ['weibo.com']
    
    base_url = 'https://s.weibo.com'
    start_date = (datetime.now() - timedelta(hours=24)).strftime("%Y-%m-%d")
    end_date = datetime.now().strftime("%Y-%m-%d")
    mongo_error = False
    pymongo_error = False
    mysql_error = False
    pymysql_error = False
    REGION = []
    custom_settings = {
        'SCHEDULER': 'scrapy.core.scheduler.Scheduler',
        'DOWNLOAD_DELAY': 5,
        'COOKIES_ENABLED': True,
        'DEPTH_LIMIT': 0
    }
    splash_url = 'http://172.17.0.1:8050/run?url=https://s.weibo.com'
    headers = {'content-type':'application/json'}
    data = json.dumps({'lua_source':lua_script})
    response = requests.post(splash_url, headers=headers, data=data)

    ck = response.json()
    cookie = ''
    for c in ck:
        if c['domain'] == '.weibo.com':
            cookie = cookie+ c['name'] +'='+c['value'] +'; '

    #cookies = redisClientx.getClient().get('WEIBO_COOKIES').decode('utf-8', 'ignore')
    keyword_list = ''
    def start_requests(self):
        self.keyword_list = redisClientx.getClient().get('keywords').decode('utf-8', 'ignore').split(' ')
        self.cookie = redisClientx.getClient().get('WEIBO_COOKIES').decode('utf-8', 'ignore')
        start_date = datetime.strptime(self.start_date, '%Y-%m-%d')
        end_date = datetime.strptime(self.end_date,
                                     '%Y-%m-%d') + timedelta(days=1)
        start_str = start_date.strftime('%Y-%m-%d') + '-0'
        end_str = end_date.strftime('%Y-%m-%d') + '-0'
        for keyword in self.keyword_list:
            if keyword != '':
                base_url = 'https://s.weibo.com/weibo?q=%s' % keyword
                url = base_url + '&vip=2'
                url += '&suball=1'
                url += '&timescope=custom:{}:{}'.format(start_str, end_str)
                yield ProxyRequest(url=url,
                                   callback=self.parse_weibo,
                                   headers={'cookie': self.cookies},
                                   meta={
                                       'base_url': base_url,
                                       'keyword': keyword,

                                   })



    def parse_weibo(self, response):
       
        keyword = response.meta.get('keyword')
        for sel in response.xpath("//div[@class='card-wrap']"):
            info = sel.xpath(
                "div[@class='card']/div[@class='card-feed']/div[@class='content']/div[@class='info']"
            )
            if info:
                weibo = SpiderxItem()
                weibo['description'] = ''
                weibo['platform'] = 'weibo'
                weibo['pId'] = sel.xpath('@mid').extract_first()
                weibo['url'] = 'https://m.weibo.cn/detail/' + weibo['pId']

                txt_sel = sel.xpath('.//p[@class="txt"]')[0]
                retweet_sel = sel.xpath('.//div[@class="card-comment"]')

                content_full = sel.xpath(
                    './/p[@node-type="feed_list_content_full"]')
                is_long_weibo = False

                if content_full:
                    if not retweet_sel:
                        txt_sel = content_full[0]
                        is_long_weibo = True
                    elif len(content_full) == 2:
                        txt_sel = content_full[0]
                        is_long_weibo = True
                    elif retweet_sel[0].xpath(
                            './/p[@node-type="feed_list_content_full"]'):
                        pass
                    else:
                        txt_sel = content_full[0]
                        is_long_weibo = True
                weibo_text = txt_sel.xpath('string(.)') \
                    .extract_first() \
                    .replace('\u200b', '') \
                    .replace('\n', '') \
                    .replace('\u3000', '') \
                    .replace('收起全文d', '') \
                    .replace('\ue627', '') \
                    .strip()

                weibo['content'] = weibo_text.replace(' ', '')
                if is_long_weibo:
                    weibo['content'] = weibo_text[:-6]

                weibo['title'] = self.get_topics(txt_sel, weibo['content'])

                created_at = sel.xpath( '(.//div[@class="from"])[last()]/a[1]/text()').extract_first().replace(' ', '').replace('\n', '').split('前')[0]

                weibo['pubtime'] = self.standardize_date(created_at)

                source = sel.xpath('.//a[@class="name"]/text()').get()
                weibo['source'] = source if source else ''
                weibo['keyword'] = keyword

                yield weibo

    def standardize_date(self, created_at):
       
        if "刚刚" in created_at:
            created_at = datetime.now().strftime("%Y-%m-%d %H:%M")
        elif "秒" in created_at:
            second = created_at[:created_at.find(u"秒")]
            second = timedelta(seconds=int(second))
            created_at = (datetime.now() - second).strftime("%Y-%m-%d %H:%M")
        elif "分钟" in created_at:
            minute = created_at[:created_at.find(u"分钟")]
            minute = timedelta(minutes=int(minute))
            created_at = (datetime.now() - minute).strftime("%Y-%m-%d %H:%M")
        elif "小时" in created_at:
            hour = created_at[:created_at.find(u"小时")]
            hour = timedelta(hours=int(hour))
            created_at = (datetime.now() - hour).strftime("%Y-%m-%d %H:%M")
        elif "今天" in created_at:
            today = datetime.now().strftime('%Y-%m-%d')
            created_at = today + ' ' + created_at[2:]
        elif '年' not in created_at:
            year = datetime.now().strftime("%Y")
            month = created_at[:2]
            day = created_at[3:5]
            time = created_at[6:]
            created_at = year + '-' + month + '-' + day + ' ' + time
        else:
            year = created_at[:4]
            month = created_at[5:7]
            day = created_at[8:10]
            time = created_at[11:]
            created_at = year + '-' + month + '-' + day + ' ' + time
        return created_at


if __name__ == '__main__':
    execute(['scrapy', 'crawl', 'weiboSearch'])
