# -*- coding: utf-8 -*-
import scrapy
import json
import time
import math
from kaola.items import KaolaArticleItem,KaolaItem
import re
from kaola.redisfilter import BloomFilter
import itertools

class KlSpider(scrapy.Spider):
    name = 'kltj'
    custom_settings = {
        "DEFAULT_REQUEST_HEADERS" : {
            'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; MI 6  Build/NMF26X)',
            'Cookie': 'current_env=online;NTESKL-COMMUNITYSI=01B12BA3578D7F12FA0E4981C3126E81.kaola-community-web-onlinejd002.v1.kaola.jd1.vpc-8081;',
        },
    }
    def start_requests(self):
        base_url = 'https://community.kaola.com/api/navbar/feed'
        # id_list = [-1, 2, 4, 6, 4001, 4002, 4003]
        # pages = range(1, 200)
        for page in range(1, 1000):
            print('正在抓取的页数是%s' % page)
            formdata = {
                "id":-1,
                "pageNo":page,
                "refresh":'0',
            }
            headers = {
                'Content-type': "application/json",
                'apiVersion': '207',
                'platform': '1',
                'appVersion': '4.2.5',
                'deviceModel': 'MI 6',
                'uiUpdateSwitch': '{"appCartAbtestSwitch":1,"appGoodsDetailAbtestSwitch":1,"appHomeAbtestSwitch":1,"appSearchBarAbtestSwitch":1,"appSearchListAbtestSwitch":1,"appSearchNavAbtestSwitch":1,"cart420AbTest":1}',
                'deviceUdID': '8c6d38c73e96c25fed0f88f16c4bd390834d9e4a',
                'appSystemVersion': '4.4.2',
                'appServerConfig': '{"cartAbTest":true,"warehouseAbtest":true}',
                'version': '40020510',
            }
            yield scrapy.Request(base_url,method='POST',body =json.dumps(formdata),headers = headers,callback=self.parse_list)

    def parse_list(self,response):
        bf = BloomFilter()
        html = json.loads(response.text)
        feeds = html.get('body').get('feeds')
        if feeds:
            for feed in feeds:
                # 这是笔记
                if 'idea' in feed:
                    idea_url = feed.get('idea').get('userInfo').get('jumpUrl')
                    openid = feed.get('idea').get('userInfo').get('openid') # 每个用户的唯一标识
                    if bf.isContains(openid):  # 判断字符串是否存在
                        print('exists!')
                    else:
                        print('not exists!')
                        bf.insert(openid)
                        headers = {
                            "referer": idea_url,
                        }
                        author_url = 'https://community.kaola.com/api/user/home/{openid}?t={times}'.format(openid=openid,
                                                                                                           times=str(int(
                                                                                                               time.time() * 1000)))
                        yield scrapy.Request(author_url, headers=headers, callback=self.parse_info)
                # 这是视频
                if 'novel' in feed:
                    idea_url = feed.get('novel').get('userInfo').get('jumpUrl')
                    openid = feed.get('novel').get('userInfo').get('openid') # 必须唯一标识
                    if bf.isContains(openid):  # 判断字符串是否存在
                        print('exists!')
                    else:
                        print('not exists!')
                        bf.insert(openid)
                        headers = {
                            "referer": idea_url,
                        }
                        author_url = 'https://community.kaola.com/api/user/home/{openid}?t={times}'.format(openid=openid,
                                                                                                           times=str(int(
                                                                                                               time.time() * 1000)))
                        yield scrapy.Request(author_url, headers=headers, callback=self.parse_info)

    # 作者的信息详情函数
    def parse_info(self,response):
        useritem = KaolaItem()

        info = json.loads(response.text)

        info_homepageCounter = info.get('body').get('homepageCounter')
        useritem["article"] = info_homepageCounter.get('article',0) # 文章数
        useritem["follower"] = info_homepageCounter.get('follower',0) # 粉丝
        useritem["following"] = info_homepageCounter.get('following',0) # 关注
        useritem["liked"] = info_homepageCounter.get('liked',0) # 被赞数
        # print(article,follower,following,liked)

        info_userInfo = info.get('body').get('userInfo')
        useritem["blackcard"] = '否' if info_userInfo.get('blackcard') == 0 else '是' # 是否黑卡
        useritem["gender"] = '女' if info_userInfo.get('gender') == 2 else '男'

        def remove_emoji(desstr, restr=''):
            '''
            过滤表情
            '''
            try:
                co = re.compile(u'[\U00010000-\U0010ffff]')
            except re.error:
                co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
            return co.sub(restr, desstr)

        useritem["personalStatus"] = remove_emoji(info_userInfo.get('personalStatus','')) # 个人简介 有特殊表情，需处理用正则
        useritem["nickName"] = remove_emoji(info_userInfo.get('nickName')) # 昵称
        useritem["profilePhoto"] = info_userInfo.get('profilePhoto') # 头像路径

        useritem["openid"] = info_userInfo.get('openid')
        useritem["jumpUrl"] = info_userInfo.get('jumpUrl')
        if 'verifyDesc' in info_userInfo:
            useritem['verifyDesc'] = info_userInfo.get('verifyDesc')
        else:
            useritem['verifyDesc'] = ''
        yield useritem
        # 在此构建作者名下的所有文章的详情url,由于每次文章都需要获得lasttime参数，此次请求为了获得打开第二页的钥匙lastime
        if useritem["article"] != 0:
            article_url = 'https://community.kaola.com/api/user/home/{openid}/feed'.format(openid = useritem["openid"])
            pages = int(math.ceil(useritem["article"]/19))

            for page in range(1,pages+1):
                print('本up主为'+useritem["nickName"]+'共' + str(pages) + '页文章','正在抓取第'+str(page)+'页文章')
                data = {
                    'context':{
                        'hasMore': 1,
                        'page': page,
                        # 'lastTime': int(time.time() * 1000)
                    },
                    'topId': None,
                    't': int(time.time() * 1000)
                }
                headers = {
                    'Content-type': 'application/json',
                    'origin': 'https://community.kaola.com',
                    'referer': 'https://community.kaola.com/user/{openid}.html'.format(openid=useritem["openid"]),
                    'accept': 'application/json',
                }
                yield scrapy.Request(article_url,method='POST',body=json.dumps(data),headers=headers,callback=self.article_info,meta={'useritem':useritem})
    # 文章详情
    def article_info(self,response):
        def remove_emoji(desstr, restr=''):
            '''
            过滤表情
            '''
            try:
                co = re.compile(u'[\U00010000-\U0010ffff]')
            except re.error:
                co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
            return co.sub(restr, desstr)

        useritem = response.meta['useritem']
        article_info = json.loads(response.text)
        bodys = article_info.get('body').get('body')
        for body in bodys:

            articleitem = KaolaArticleItem()
            articleitem["openid_userid"] = useritem["openid"]
            # 如果文章是笔记
            if 'idea' in body:
                articleitem["article_id"] = body.get('id')
                articleitem["article_info_url"] = body.get('wapJumpUrl')
                article_idea = body.get('idea')
                articleitem["commentNum"] = article_idea.get('commentNum')
                articleitem["descs"] = remove_emoji(article_idea.get('desc')) # 描述简介
                articleitem["favorNum"] = article_idea.get('favorNum',0) # 点赞
                articleitem["title"] = remove_emoji(article_idea.get('title'))
                publish = article_idea.get('publishTime',0) # 推送时间
                articleitem["publishTime"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(publish / 1000))
                yield articleitem

            if 'entity' in body:
                articleitem["article_id"] = body.get('id')
                articleitem["article_info_url"] = body.get('wapJumpUrl')
                video_entity = body.get('entity')
                articleitem["commentNum"] = video_entity.get('commentNum')
                articleitem["descs"] = remove_emoji(video_entity.get('desc'))  # 描述简介
                articleitem["favorNum"] = video_entity.get('favorNum', 0)  # 点赞数
                articleitem["title"] = remove_emoji(video_entity.get('title'))
                publish = video_entity.get('publishTime', 0)  # 推送时间
                articleitem["publishTime"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(publish / 1000))

                yield articleitem



