import time
import json
import math
import requests
requests.packages.urllib3.disable_warnings()
import itertools
import re
import pymysql
from kaola.redisfilter import BloomFilter
import sys
from retry import retry

sys.setrecursionlimit(100000) # set the maximum depth as 1500

# 文章的数据库函数
class MysqlSave:
    def __init__(self):
        self.coon = pymysql.connect('127.0.0.1','root','123456','kaola',charset= 'utf8')
        self.cursor = self.coon.cursor()
    def save_to_author(self,sql,data):
        try:
            self.cursor.execute(sql,data)
            self.coon.commit()
            print('---------作者信息写入成功---------')
        except Exception as e:
            print('***********写入失败%s***********' % e)
    def save_to_article(self,sql,data):
        try:
            self.cursor.execute(sql, data)
            self.coon.commit()
            print('---------文章信息写入成功---------')
        except Exception as e:
            print('***********写入失败%s***********' % e)
    def close_mysql(self):
        self.cursor.close()
        self.coon.close()

# 解析文章的函数
@retry()
def parse_article(headers,url,data,openid):
    article_html = requests.post(url,data=json.dumps(data),headers=headers,verify=False)
    article_info = json.loads(article_html.text)
    def remove_emoji(desstr, restr=''):
        '''
        过滤表情
        '''
        try:
            co = re.compile(u'[\U00010000-\U0010ffff]')
        except re.error:
            co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
        return co.sub(restr, desstr)

    bodys = article_info.get('body').get('body')
    for body in bodys:
        # 如果文章是笔记
        if 'novel' in body:
            articleitem={}
            articleitem["openid_userid"] = openid
            articleitem["article_id"] = body.get('id')
            articleitem["article_info_url"] = body.get('wapJumpUrl')
            article_idea = body.get('novel')
            articleitem["commentNum"] = article_idea.get('commentNum')
            articleitem["descs"] = remove_emoji(article_idea.get('desc'))  # 描述简介
            articleitem["favorNum"] = article_idea.get('favorNum', 0)  # 点赞
            articleitem["title"] = remove_emoji(article_idea.get('title'))
            publish = article_idea.get('publishTime', 0)  # 推送时间
            articleitem["publishTime"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(publish / 1000))
            print(articleitem)

            sqls = 'insert into kaola_article_info(openid_userid,article_id,article_info_url,commentNum,descs,favorNum,title,publishTime)' \
                   'values (%s,%s,%s,%s,%s,%s,%s,%s)'
            datas = (articleitem["openid_userid"],articleitem["article_id"],articleitem["article_info_url"],articleitem["commentNum"],articleitem["descs"],articleitem["favorNum"],articleitem["title"],articleitem["publishTime"])
            coon = MysqlSave()
            coon.save_to_article(sqls,datas)
            coon.close_mysql()
        if 'idea' in body:
            articleitem = {}
            articleitem["openid_userid"] = openid
            articleitem["article_id"] = body.get('id')
            articleitem["article_info_url"] = body.get('wapJumpUrl')
            article_idea = body.get('idea')
            articleitem["commentNum"] = article_idea.get('commentNum')
            articleitem["descs"] = remove_emoji(article_idea.get('desc'))  # 描述简介
            articleitem["favorNum"] = article_idea.get('favorNum', 0)  # 点赞
            articleitem["title"] = remove_emoji(article_idea.get('title'))
            publish = article_idea.get('publishTime', 0)  # 推送时间
            articleitem["publishTime"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(publish / 1000))
            print(articleitem)

            sqls = 'insert into kaola_article_info(openid_userid,article_id,article_info_url,commentNum,descs,favorNum,title,publishTime)' \
                   'values (%s,%s,%s,%s,%s,%s,%s,%s)'
            datas = (articleitem["openid_userid"], articleitem["article_id"], articleitem["article_info_url"],articleitem["commentNum"], articleitem["descs"], articleitem["favorNum"], articleitem["title"],articleitem["publishTime"])
            coon = MysqlSave()
            coon.save_to_article(sqls, datas)
            coon.close_mysql()

# 解析作者的函数
@retry()
def parse_info(url,header):


    info_html = requests.get(url, headers = header, verify = False)

    info = json.loads(info_html.text)
    useritem = {}
    info_homepageCounter = info.get('body').get('homepageCounter')
    useritem["article"] = info_homepageCounter.get('article', 0)  # 文章数
    useritem["follower"] = info_homepageCounter.get('follower', 0)  # 粉丝
    useritem["following"] = info_homepageCounter.get('following', 0)  # 关注
    useritem["liked"] = info_homepageCounter.get('liked', 0)  # 被赞数
    # print(article,follower,following,liked)
    info_userInfo = info.get('body').get('userInfo')
    useritem["blackcard"] = '否' if info_userInfo.get('blackcard') == 0 else '是'  # 是否黑卡
    useritem["gender"] = '女' if info_userInfo.get('gender') == 2 else '男'

    def remove_emoji(desstr, restr=''):
        '''
        过滤表情
        '''
        try:
            co = re.compile(u'[\U00010000-\U0010ffff]')
        except re.error:
            co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
        return co.sub(restr, desstr)
    useritem["personalStatus"] = remove_emoji(info_userInfo.get('personalStatus', ''))  # 个人简介 有特殊表情，需处理用正则
    useritem["nickName"] = remove_emoji(info_userInfo.get('nickName'))  # 昵称
    useritem["profilePhoto"] = info_userInfo.get('profilePhoto')  # 头像路径

    useritem["openid"] = info_userInfo.get('openid')
    useritem["jumpUrl"] = info_userInfo.get('jumpUrl')
    if 'verifyDesc' in info_userInfo:
        useritem['verifyDesc'] = info_userInfo.get('verifyDesc')
    else:
        useritem['verifyDesc'] = ''

    print(useritem)
    sql = 'insert into kaola_user_info(openid,article,follower,following,liked,blackcard,gender,personalStatus,nickName,profilePhoto,jumpUrl,verifyDesc)' \
          'values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
    datas = (useritem["openid"],useritem["article"],useritem["follower"],useritem["following"],useritem["liked"],useritem["blackcard"],useritem["gender"],useritem["personalStatus"],useritem["nickName"],useritem["profilePhoto"],useritem["jumpUrl"],useritem['verifyDesc'])
    # 实例化数据库
    coon = MysqlSave()
    coon.save_to_author(sql,datas)
    coon.close_mysql()
    # 在此构建作者名下的所有文章的详情url,由于每次文章都需要获得lasttime参数，此次请求为了获得打开第二页的钥匙lastime
    if useritem["article"] != 0:
        article_url = 'https://community.kaola.com/api/user/home/{openid}/feed'.format(openid=useritem["openid"])
        pages = int(math.ceil(useritem["article"] / 19))
        for page in range(1, pages + 1):
            print('本up主为' + useritem["nickName"] + '共' + str(pages) + '页文章', '正在抓取第' + str(page) + '页文章')
            data = {
                'context': {
                    'hasMore': 1,
                    'page': page,
                    # 'lastTime': int(time.time() * 1000)
                },
                'topId': None,
                't': int(time.time() * 1000)
            }
            headers = {
                'Content-type': 'application/json',
                'origin': 'https://community.kaola.com',
                'referer': 'https://community.kaola.com/user/{openid}.html'.format(openid=useritem["openid"]),
                'accept': 'application/json',
            }
            time.sleep(1)

            parse_article(headers,article_url,data,openid=useritem["openid"])

# 第二页的递归获取lasttime.lastid
@retry()
def parse_next(lastTime,lastId,headers,pages):
    # id_list = [2, 4, 6, 4001, 4002, 4003]
    if pages > 499:
        return
    data = {
        "id": 4,  # 赶时髦
        "pageNo": pages,
        "lastId":lastId,
        "lastTime":lastTime,
        "refresh": 0,
    }
    print(data)
    article_url = 'https://community.kaola.com/api/navbar/feed'
    html = requests.post(article_url, data=json.dumps(data), headers=headers, verify=False)
    html_response = json.loads(html.text)
    # 必须参数
    lastTime = html_response.get('body').get('context').get('lastTime')
    lastId = html_response.get('body').get('context').get('lastId')
    # 解析作者的信息
    feeds = html_response.get('body').get('feeds')
    if feeds:
        for feed in feeds:
            # 这是笔记
            if 'idea' in feed:
                idea_url = feed.get('idea').get('userInfo').get('jumpUrl')
                openid = feed.get('idea').get('userInfo').get('openid')  # 每个用户的唯一标识
                header = {
                    "referer": idea_url,
                    'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; MI 6  Build/NMF26X)',
                    'Cookie': 'current_env=online;NTESKL-COMMUNITYSI=01B12BA3578D7F12FA0E4981C3126E81.kaola-community-web-onlinejd002.v1.kaola.jd1.vpc-8081;',
                }
                author_url = 'https://community.kaola.com/api/user/home/{openid}?t={times}'.format(openid=openid,
                                                                                                   times=str(int(
                                                                                                       time.time() * 1000)))
                time.sleep(1)
                bf = BloomFilter()
                if bf.isContains(openid):  # 判断字符串是否存在
                    print('exists!')
                else:
                    print('not exists!')
                    bf.insert(openid)
                    parse_info(author_url,header)
            if 'novel' in feed:
                idea_url = feed.get('novel').get('userInfo').get('jumpUrl')
                openid = feed.get('novel').get('userInfo').get('openid')  # 必须唯一标识
                header = {
                    "referer": idea_url,
                    'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; MI 6  Build/NMF26X)',
                    'Cookie': 'current_env=online;NTESKL-COMMUNITYSI=01B12BA3578D7F12FA0E4981C3126E81.kaola-community-web-onlinejd002.v1.kaola.jd1.vpc-8081;',
                }
                author_url = 'https://community.kaola.com/api/user/home/{openid}?t={times}'.format(openid=openid,
                                                                                                   times=str(int(
                                                                                                     time.time() * 1000)))
                time.sleep(1)
                bf = BloomFilter()
                if bf.isContains(openid):  # 判断字符串是否存在
                    print('exists!')
                else:
                    print('not exists!')
                    bf.insert(openid)
                    parse_info(author_url, header)
    # 递归调用
    time.sleep(1)
    parse_next(lastTime,lastId,headers,pages+1)
@retry()
def parse():
    # id_list = [2, 4, 6, 4001, 4002, 4003]
        headers = {
            'User-Agent': 'User-Agent: Dalvik/1.6.0 (Linux; U; Android 4.4.2; MI 6  Build/NMF26X)',
            'Content-type': 'application/json',
            'Host': 'community.kaola.com',
            'Origin': 'https://community.kaola.com',
            'apiVersion': '207',
            'platform': '1',
            'appVersion': '4.2.5',
            'deviceModel': 'MI 6',
            'uiUpdateSwitch': '{"appCartAbtestSwitch":1,"appGoodsDetailAbtestSwitch":1,"appHomeAbtestSwitch":1,"appSearchBarAbtestSwitch":1,"appSearchListAbtestSwitch":1,"appSearchNavAbtestSwitch":1,"cart420AbTest":1}',
            'deviceUdID': '8c6d38c73e96c25fed0f88f16c4bd390834d9e4a',
            'appSystemVersion': '4.4.2',
            'appServerConfig': '{"cartAbTest":true,"warehouseAbtest":true}',
            'version': '40020510',
        }
        data = {
            "id": 4,# 赶时髦
        }
        time.sleep(1)
        article_url = 'https://community.kaola.com/api/navbar/feed'
        html = requests.post(article_url,data=json.dumps(data),headers=headers,verify = False)
        html_response= json.loads(html.text)
        # 两个关键字参数,供下次使用
        lastTime = html_response.get('body').get('context').get('lastTime')
        lastId = html_response.get('body').get('context').get('lastId')

        # 调用解析函数
        time.sleep(1)
        parse_next(lastTime,lastId,headers,2)

        # feeds = html_response.get('body').get('feeds')
        # if feeds:
        #     for feed in feeds:
        #         # 这是笔记
        #         if 'idea' in feed:
        #             idea_url = feed.get('idea').get('userInfo').get('jumpUrl')
        #             openid = feed.get('idea').get('userInfo').get('openid')  # 每个用户的唯一标识
        #             headers = {
        #                 "referer": idea_url,
        #             }
        #             author_url = 'https://community.kaola.com/api/user/home/{openid}?t={times}'.format(openid=openid,
        #                                                                                            times=str(int(
        #                                                                                                time.time() * 1000)))
        #         if 'novel' in feed:
        #             idea_url = feed.get('novel').get('userInfo').get('jumpUrl')
        #             openid = feed.get('novel').get('userInfo').get('openid')  # 必须唯一标识
        #             headers = {
        #                 "referer": idea_url,
        #             }
        #             author_url = 'https://community.kaola.com/api/user/home/{openid}?t={times}'.format(openid=openid,
        #                                                                                                times=str(int(
        #                                                                                                    time.time() * 1000)))
if __name__ == '__main__':
    parse()





