# -*- coding: utf-8 -*-
import scrapy
import datetime
from meizhuang.items import *
from urllib.parse import quote
from scrapy import Request
import random
from copy import deepcopy
import re
import json

class LargeSpider(scrapy.Spider):
    name = 'large'
    allowed_domains = ['weibo.cn', 'm.weibo.cn',]
    topic_list = [
        '#好物分享#',
        '#美妆爱用品#',
        '#空瓶记#',
        '#香水#',
        '#美妆生活#',
        '#彩妆#',
    ]

    # 话题搜索结果请求网址
    url_format = "https://weibo.cn/search/mblog?hideSearchFrame=&keyword={keyword}&advancedfilter=1&hasori=1&starttime={starttime}&endtime={endtime}&sort=time"

    # 搜索的起始日期，自行修改   微博的创建日期是2009-08-16 也就是说不要采用这个日期更前面的日期了
    date_start = datetime.datetime.strptime("2019-01-20", '%Y-%m-%d')
    # 搜索的结束日期，自行修改
    date_end = datetime.datetime.strptime("2020-01-20", '%Y-%m-%d')

    # 以3天为周期爬取数据（减少请求次数+数据量过少时提高每次请求存在数据的成功率）
    time_spread = datetime.timedelta(days=2)
    delta_time = datetime.timedelta(days=1)

    # 限定每个周期爬取的最大页码数
    designated_page = 10

    # 用户详情API（点击用户头像或者昵称进入的页面，其最上方显示的信息）
    user_url = 'https://m.weibo.cn/api/container/getIndex?uid={uid}&type=uid&value={uid}&containerid=100505{uid}'

    def start_requests(self):
        for topic in self.topic_list:
            item = WeiboItem()
            item["topic"] = topic
            date_start = self.date_start
            end_time = self.date_end
            while end_time >= date_start:
                next_time = end_time - self.time_spread
                next_time = next_time if next_time >= date_start else date_start
                url = self.url_format.format(keyword=quote(topic), starttime=next_time.strftime("%Y%m%d"),
                                             endtime=end_time.strftime("%Y%m%d"))
                end_time = next_time - self.delta_time
                # yield Request(url, callback=self.parse_page, meta={"item": item}, dont_filter=True) #这里必须不去重，后面可能会再次请求该网址
                # 这里必须去重，不然会浪费好多时间重新去请求这个网址，效率会越来越低，后面请求网址时，增加了page字段，经过验证，两个url地址生成的指纹是不一样的
                yield Request(url, callback=self.parse_page, meta={"item": item}, dont_filter=False)

    def parse_page(self, response):
        item = response.meta.get("item")
        # 符合搜索条件的微博总数，虽然数目存在问题（莫名放大），但是可以据此判断有无内容
        weibo_count = response.xpath('//span[@class="cmt"]/text()').re_first('共(\d*?)条')
        if weibo_count:
            total_page = response.xpath('//div[@id="pagelist"]/form/div/text()').re_first('\/(\d*?)页')
            # 根据total_page来选择爬取页码
            if total_page:
                total_page = int(total_page)
                if total_page<=10:
                    page_list = [i for i in range(1,total_page+1)]
                else:
                    page_list = random.sample(range(1, total_page+1), self.designated_page)
                for page in page_list:
                    page_url = response.url + "&page=" + str(page)
                    yield Request(page_url, callback=self.parse_index, meta={"item": item})
            # 只有当前一页
            else:
                page_url = response.url + "&page=1"
                yield Request(page_url, callback=self.parse_index, meta={"item": item})



# 2.请求微博信息
    def parse_index(self, response):
        item = response.meta.get("item")
        weibo_list = response.xpath('//div[@class="c" and contains(@id, "M_")]')
        # 判断有无内容(这里其实可以不要)
        if len(weibo_list)>0:
            for weibo in weibo_list:
                detail_url = weibo.xpath('.//a[contains(., "评论[")]/@href').extract_first()  # 第一个参数是点，代表选择当前的文本
                # print(detail_url)
                yield Request(detail_url, callback=self.parse_detail, meta={"item":deepcopy(item)}) #这里肯定要去重

    # 3.爬取每条微博具体信息
    def parse_detail(self, response):
        item = response.meta.get("item")
        url = response.url
        # 运行过程中出现错误 2020-03-07 修改
        if "comment" in url:
            wid = re.search('comment\/(.*?)\?', url).group(1) #博文id
            uid = re.search('uid=(.*?)&', url).group(1) #博主id
            # 发现如果存在表情的话，内容爬取不完全
            # content1 = response.xpath('//div[@id="M_"]/div[1]/span[@class="ctt"]//text()').extract()
            # content2 = response.xpath('//div[@id="M_"]/div[1]/text()').extract()
            # content = ' '.join([i.strip() for i in content1+content2 if len(i.strip())>0]) #空格连接
            # 2020-04-06：为解决内容爬取不完全，最终解决方案
            raw_content = response.xpath('//div[@id="M_"]/div[1]//text()').extract()
            raw_content = [i.strip() for i in raw_content if len(i.strip()) > 0]
            content = []
            for element in raw_content:
                if re.search("\d+:\d+", element): # 如果不存在图片，就会出现在 div[1] 中，需要剔除
                    break
                else:
                    content.append(element)
            content = ' '.join(content)
            # content = ''.join(response.xpath('//div[@id="M_"]/div[1]//text()').extract()[1:])
            # content = ''.join(response.xpath('//div[@id="M_"]//span[@class="ctt"]//text()').extract())
            pic_num = 1 if response.xpath('//div[@id="M_"]/div[2]') else 0
            if pic_num:
                if response.xpath('//div[@id="M_"]//a[contains(.,"组图共")]'):
                    pic_num = int(response.xpath('//div[@id="M_"]//a[contains(.,"组图共")]').re_first('组图共(\d*?)张'))
            # print(wid, url, content)
            # 中括号前必须加转义字符 \
            comment_count = response.xpath('//span[@class="pms"]/text()').re_first('评论\[(\d*?)\]')  # “\d”换成“.”效果一样的
            forward_count = response.xpath('//a[contains(., "转发[")]/text()').re_first('转发\[(\d*?)\]')
            like_count = response.xpath('//a[contains(., "赞[")]/text()').re_first('赞\[(.*?)\]')
            # 数据清理（也可放在pipeline中）
            comment_count = int(comment_count) if comment_count is not None else 0 #这里数据类型没有注意！！！
            forward_count = int(forward_count) if forward_count is not None else 0
            # like_count不会转换的，哪怕是0，也会爬到str(0),永远不会为None
            like_count = int(like_count) if like_count is not None else 0
            # print(comment_count, forward_count, like_count)
            posted_at = response.xpath('//span[@class="ct"]/text()').extract_first()
            user = response.xpath('//div[@id="M_"]/div[1]/a/text()').extract_first(default=None)  # default=None加不加无所谓
            # print(posted_at, user)
            item["wid"] = wid
            item["user"] = user
            item["uid"] = uid
            item["content"] = content
            item["comment_count"] = comment_count
            item["forward_count"] = forward_count
            item["like_count"] = like_count
            item["pic_num"] = pic_num
            item["posted_at"] = posted_at

            # 4.请求博主个人主页（这里可以设置来只爬取某个人最近的一条微博——dont_filter = False（或者不写）；全部爬取——dont_filter=True）
            yield Request(self.user_url.format(uid=uid), callback=self.parse_user, meta={"item":item}, )

        # 2020-03-07 修改
        else:
            yield Request(url, callback=self.parse_page, meta={"item":deepcopy(item)})

    # 5.爬取博主个人信息
    def parse_user(self, response):
        """
        解析用户信息
        :param response:Response对象
        :yield:
        """
        # self.logger.debug(response)
        result = json.loads(response.text)
        if result.get('data').get('userInfo'):
            user_info = result.get('data').get('userInfo')
            item = response.meta.get("item")
            # 定义字段映射关系，处理自定义字段名和JSON中用户的字段名不同的情况，然后遍历字典实现赋值
            field_map = {
                # 'id': 'id',
                # 'name': 'screen_name',
                # 'avatar': 'profile_image_url',
                # 'cover': 'cover_image_phone',
                'gender': 'gender',
                'description': 'description',
                'fans_count': 'followers_count',
                'follows_count': 'follow_count',
                'weibos_count': 'statuses_count',
                'verified': 'verified',
                'verified_reason': 'verified_reason',
                'verified_type': 'verified_type',
                'verified_type_ext': 'verified_type_ext',
            }
            for field, attr in field_map.items():
                item[field] = user_info.get(attr)
            yield item