from scrapy import Request, Spider
import json
from lxml import etree
import re


def prettify_str(line):
    return line.replace('\n', '').replace('\r', '').strip()


class WeiBoSpider(Spider):
    """
        抓取微博评论以及用户信息
    """
    name = "weibo_cmt"
    download_delay = 1
    custom_settings = {
        'COOKIES_ENABLED': True
    }

    def __init__(self):
        with open('login/cookies/weibo_cookies.txt') as f:
            cookies_txt = json.loads(f.read())
        self.cookies = {}
        for item in cookies_txt:
            self.cookies[item['name']] = item['value']

        self.headers = {
            "Host": "weibo.com",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Referer": "https://d.weibo.com/623751_10?from=faxian_hot&mod=fenlei"
        }

    def start_requests(self):
        w_id = '4361604247868158'
        page = 1
        yield Request(
            url="https://weibo.com/aj/v6/comment/big?id={}&page={}".format(w_id, page),
            callback=self.parse,
            cookies=self.cookies,
            headers=self.headers,
            meta={'page': page, 'w_id': w_id}
        )

    def parse(self, response):
        print('parse', response.url)
        # with open('test.json', 'w', encoding='utf-8') as f:
        #     f.write(response.text)
        json_data = json.loads(response.text)
        html = json_data['data']['html']
        resp = etree.HTML(html)
        items = resp.xpath('//div[@comment_id]')
        for item in items:
            postdate_str = item.xpath('.//div[@class="WB_from S_txt2"]/text()')[0]
            data = {
                'user_name': item.xpath('.//a[@usercard]/text()')[0],
                'user_id': item.xpath('.//a[@usercard]/@usercard')[0],
                'user_url': item.xpath('.//a[@usercard]/@href')[0],
                'content': prettify_str(''.join(item.xpath('.//div[@class="WB_text"]/text()'))),
                'like': item.xpath('.//span[@node-type="like_status"]/em[2]/text()')[0].replace('赞', '0'),
                'postdate_str': postdate_str,
            }
            # yield data
            yield Request(
                url="https://weibo.com/aj/v6/user/newcard?{}".format(data['user_id']),
                callback=self.parse_user,
                meta={'data': data},
                cookies=self.cookies,
                headers=self.headers,
                dont_filter=True
            )
        totalpage = json_data['data']['page']['totalpage']
        pagenum = json_data['data']['page']['pagenum']
        page = response.meta['page']
        if pagenum < totalpage:
            next_page = page + 1
            w_id = response.meta['w_id']
            yield Request(
                url="https://weibo.com/aj/v6/comment/big?id={}&page={}".format(w_id, next_page),
                callback=self.parse,
                cookies=self.cookies,
                headers=self.headers,
                meta={'page': next_page, 'w_id': w_id}
            )

    def parse_user(self, response):
        print('parse', response.url)
        data = response.meta['data']
        json_str = re.findall('try{\((.*)\)}catch\(e\){};', response.text)[0]
        html = json.loads(json_str)['data']
        resp = etree.HTML(html)
        data['user_name'] = resp.xpath('//a[@class="W_f14"]/@title')[0]
        data['follow_num'] = resp.xpath('//span[@class="c_follow W_fb"]//em[contains(@class,"num")]/text()')[0]
        data['fans_num'] = resp.xpath('//span[@class="c_fans W_fb"]//em[contains(@class,"num")]/text()')[0]
        data['weibo_num'] = resp.xpath('//span[@class="c_weibo W_fb"]//em[contains(@class,"num")]/text()')[0]
        data['sex'] = resp.xpath('//div[@class="name"]/em/@title')[0]
        data['location'] = ''.join(resp.xpath('//a[contains(@suda-uatrack,"chick_area")]/@title'))
        yield data
