# -*- coding: utf-8 -*-
# 爬取指定用户微博信息

import scrapy, re
from scrapy.loader import ItemLoader
from scrapy.crawler import CrawlerProcess

from sina_crawl.items import WeiBoContentItem


class SinaWeiboSpider(scrapy.Spider):

    def __init__(self):
        # 全局 Cookies 免登录
        self.cookies = {
            '_T_WM': 'f38d77eebf98b95c31423ac305ada352',
            'TMPTOKEN': 'PWDbPGO1BnYAyk86DT9BlPs4S18ncI7aqKzjgecWYf7chRiQkqcGtVq5p9uEvCFM',
            'SUB': '_2A25x81XIDeRhGeVG7FcZ9yvPzT-IHXVTHHuArDV6PUJbkdAKLWvAkW1NT5gs2F8UZNJ-Bd9MV8SelMC4vqwL3gn2',
            'SUHB': '0mxUQkSnacRLcw',
            'SCF': 'Al6oYEOIHG6uNyht6TbHJp7SLcrD339k-BxY3pGt5lGPyEyKlivwIop0lNiHDm61d7BD84odH17oCPuY3HjrXhI.',
            'SSOLoginState': '1559700888'
        }
        # 全局请求头
        self.headers = {
            'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'referer': 'https://weibo.cn/',
            'upgrade-insecure-requests': '1',
        }

    accounts = ['1260427471','210926262']
    name = 'sina_weibo'
    allowed_domains = ['weibo.cn']
    start_urls = ['https://weibo.cn/1260427471/profile','https://weibo.cn/210926262/profile']

    domain = 'https://weibo.cn'

    def start_requests(self):
        return [scrapy.Request(url=url, callback=self.parse, headers=self.headers, cookies=self.cookies) for url in
                self.start_urls]

    def parse(self, response):
        selector = scrapy.Selector(response)
        ct = selector.xpath('//div[@class="c"]')
        user_id = re.compile('https://weibo.cn/(\d+)/').findall(response.url)
        user_id = user_id[0] if user_id else ''
        for each in ct:
            if each.xpath('div'):
                item = WeiBoContentItem()
                item['user_id'] = user_id
                contents = each.xpath('div').xpath('span[@class="ctt"]').extract()
                if not contents:
                    contents = each.xpath('div').xpath('span[@class="ctt"]/img/@alt').extract()
                like_count = each.xpath('div/a[starts-with(@href,"https://weibo.cn/attitude/")]/text()').re(
                    u'赞\[(\d+)\]')
                retweet_count = each.xpath('div/a[starts-with(@href,"https://weibo.cn/repost/")]/text()').re(
                    u'转发\[(\d+)\]')
                comment_count = each.xpath('div/a[starts-with(@href,"https://weibo.cn/comment/")]/text()').re(
                    u'评论\[(\d+)\]')
                weibo_id = each.xpath('div/a[starts-with(@href,"https://weibo.cn/attitude/")]/@href').re(
                    self.domain + '/attitude/(.*)/add.*')
                res = each.xpath('div/span[@class="ct"]/text()').re(u'(.*)来自(.*)')
                post_time = res[0] if res else ''
                terminal = res[1] if res and len(res) >= 2 else ''
                item['weibo_type'] = '转发' if each.xpath('div/span[@class="cmt"]').extract() else '原创'
                reason = ''
                for i in each.xpath('div'):
                    txt = i.extract()
                    if txt.find(u'转发理由') > 0:
                        reason = re.compile(u'</span>(.*)<br>').findall(txt)[0]
                item['weibo_id'] = weibo_id[0] if weibo_id else ''
                item['like_count'] = int(like_count[0]) if like_count else 0
                item['retweet_count'] = int(retweet_count[0]) if retweet_count else 0
                item['comment_count'] = int(comment_count[0]) if comment_count else 0
                item['post_time'] = post_time
                item['terminal'] = terminal
                item['weibo_content'] = reason
                a = each.xpath('div')[0].xpath('span/a/@href').extract()
                if a:
                    # 查看全文
                    yield scrapy.Request(url=self.domain + a[0], callback=self.parse_context, headers=self.headers,
                                         cookies=self.cookies, meta={'item': item})
                else:
                    if '转发' == item['weibo_type']:
                        item['weibo_content'] = reason
                    else:
                        if len(contents) > 1:
                            # item['authority'] = authority
                            item['weibo_content'] = contents[1]
                        else:
                            if contents:
                                item['weibo_content'] = \
                                    re.compile('<span class="ctt">(.*)</span>').findall(contents[0])[0]
                            else:
                                item['weibo_content'] = ''
                    # print(item['weibo_type'], item['weibo_content'])
                    # 查看组图
                    multiple_images = each.xpath('div')[0].xpath(
                        'a[starts-with(@href,"https://weibo.cn/mblog/picAll/")]/@href').extract()
                    if multiple_images:
                        yield scrapy.Request(url=multiple_images[0], callback=self.parse_images,
                                             headers=self.headers,
                                             cookies=self.cookies, meta={'item': item})
                    images = each.xpath('div/a[starts-with(@href,"https://weibo.cn/mblog/oripic")]/@href').re(
                        'u=(.*)')
                    if images:
                        image_url = 'http://wx3.sinaimg.cn/large/%s.jpg' % images[0]
                        item['weibo_images'] = [image_url] if image_url else []
                yield item

        # 下一页
        nextLink = selector.xpath('//div[@class="pa"]/form/div/a/@href').extract()
        if nextLink:
            url = self.domain + nextLink[0]
            print(url)
            yield scrapy.Request(url=url, callback=self.parse, cookies=self.cookies,
                                 headers=self.headers)
        else:
            user_id = re.compile(self.domain + '/(\d+)/.*').findall(response.url)
            user_id = user_id[0] if user_id else ''
            self.logger.info(u'[%s] 微博已爬取完毕！' % user_id)

    def parse_context(self, response):
        item = response.meta['item']
        selector = scrapy.Selector(response=response)
        load = ItemLoader(item=item, selector=selector, response=response)
        if not item.get('weibo_content') and '' == item.get('weibo_content'):
            load.add_xpath('weibo_content', '//*[@id="M_"]/div[1]/span/text()')
        load.add_xpath('post_time', '//*[@id="M_"]/div[2]/span/text()')
        images = selector.xpath('//*[@id="M_"]/div[1]/a[2]/@href').extract()
        if images:
            yield scrapy.Request(url=self.domain + images[0], callback=self.parse_images, headers=self.headers,
                                 cookies=self.cookies, meta={'item': load.load_item()})
        else:
            image = selector.xpath('//*[@id="M_"]/div[2]/a[2]/@href').re(u'u=(.*)\&')
            if image:
                load.add_value('weibo_images', 'http://wx3.sinaimg.cn/large/%s.jpg' % image[0])
        yield load.load_item()

    def parse_images(self, response):
        item = response.meta['item']
        selector = scrapy.Selector(response)
        load = ItemLoader(item=item, selector=selector, response=response)
        div_c = selector.xpath('//div[@class="c"]')
        for each in div_c:
            origin = each.xpath('a[2]/@href')
            if origin.extract() and origin.extract()[0].find('/mblog/oripic') >= 0:
                print(origin.extract())
                image_id = origin.re('u=(.*)\&')
                load.add_value('weibo_images', 'http://wx3.sinaimg.cn/large/%s.jpg' % image_id[0])
        yield load.load_item()


if __name__ == '__main__':
    process = CrawlerProcess({
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
    })
    process.crawl(SinaWeiboSpider)
    process.start()
