from scrapy import Request, Spider, FormRequest
from urllib import request
import re
import datetime
import json


def beautify_str(txt):
    return txt.strip().replace('\r\n', '').replace(',', '').replace('\t', '').replace('\r', '').replace('\n', '').replace('\r\n', '')


def beautify_date(txt):
    today = datetime.datetime.now().strftime('%Y%m%d')
    year = datetime.datetime.now().strftime('%Y')
    today_words = ['今天', '分钟', '小时']
    for word in today_words:
        if word in txt:
            return today
    date_pattern = re.findall("\d+月\d+日", txt)
    datetime_pattern = re.findall("\d+-\d+-\d+", txt)
    if len(date_pattern):
        return datetime.datetime.strptime('{}年'.format(year) + date_pattern[0], '%Y年%m月%d日').strftime('%Y%m%d')
    elif len(datetime_pattern):
        return datetime.datetime.strptime(datetime_pattern[0], '%Y-%m-%d').strftime('%Y%m%d')
    else:
        return txt


def get_cookies():
    cookies = {}
    with open('login/cookies/weibo(mb)_cookies.txt', encoding="utf-8") as f:
        cookies_list = json.loads(f.read())
        for item in cookies_list:
            cookies[item['name']] = item['value']
    return cookies


class WBSearchMSpider(Spider):
    name = 'weibo_search_mb'
    cookies = get_cookies()
    # cookies = {
    #     "_T_WM": "86ec90418c3fa3efc3f9cfc6d0cd265b",
    #     "SUB": "_2A25xvf2hDeRhGeNL7lQT9yrIzT2IHXVTQYPprDV6PUJbkdAKLUGikW1NSO9BHV3J2K4xIVOgZ-Hv7-XTs8yMSpi3",
    #     "SUHB": "0102xqZNLcQlx8",
    #     "SCF": "AjSP-W5ifiECVaHSAJHJCwEyeGdlpJ3FHm0s6b13oONMyeiU7cxmoC6RaKUBuLe7WLCmFigsy0M7WJb6Zp4OBiU.",
    #     "SSOLoginState": "1555664369"
    # }
    headers = {
        ":authority": "weibo.cn",
        ":method": "GET",
        ":path": "/",
        ":scheme": "https",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
        "accept-encoding": "gzip, deflate, br",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
        "cache-control": "no-cache",
        "cookie": "_T_WM=86ec90418c3fa3efc3f9cfc6d0cd265b; SUB=_2A25xvf2hDeRhGeNL7lQT9yrIzT2IHXVTQYPprDV6PUJbkdAKLUGikW1NSO9BHV3J2K4xIVOgZ-Hv7-XTs8yMSpi3; SUHB=0102xqZNLcQlx8; SCF=AjSP-W5ifiECVaHSAJHJCwEyeGdlpJ3FHm0s6b13oONMyeiU7cxmoC6RaKUBuLe7WLCmFigsy0M7WJb6Zp4OBiU.; SSOLoginState=1555664369",
        "pragma": "no-cache",
        "referer": "https://weibo.cn/",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
    }

    def start_requests(self):
        print(self.cookies)
        with open("keywords.txt", encoding="utf-8") as f:
            keywords = f.read().split('\n')
        for sub_keyword in keywords:
            keyword = request.quote(sub_keyword)
            for page in range(1, 4):
                yield Request(
                    url="https://weibo.cn/search/mblog?hideSearchFrame=&keyword={}&sort=hot&page={}".format(keyword, page),
                    cookies=self.cookies,
                    headers=self.headers,
                    callback=self.parse,
                    meta={'keyword': sub_keyword}
                )

    def parse(self, response):
        # print(response.text)
        items = response.xpath('//div[@class="c" and @id]')
        for item in items:
            nickname = item.xpath(".//a[@class='nk']/text()").extract()[0]
            content = beautify_str(''.join(item.xpath(".//span[@class='ctt']//text()").extract()))
            zan = item.xpath('.//a[contains(text(),"赞")]/text()').extract()[0]
            zf = item.xpath('.//a[contains(text(),"转发")]/text()').extract()[0]
            comment = item.xpath('.//a[contains(text(),"评论")]/text()').extract()[0]
            post_content = response.xpath('.//span[@class="ct"]/text()').extract()[0]
            # print(post_content)
            if len(post_content.split('\xa0')):
                postdate = post_content.split('\xa0')[0]
                srcsys = post_content.split('\xa0')[1]
            else:
                postdate = post_content.split('\xa0')[0]
                srcsys = ''
            data = {
                '关键词': response.meta['keyword'],
                '用户名': nickname,
                '正文': content,
                '赞': re.findall('\d+', zan)[0],
                '转发': re.findall('\d+', zf)[0],
                '评论': re.findall('\d+', comment)[0],
                '发表日期': beautify_date(postdate),
                '来自': srcsys
            }
            print(data)
            yield data
