import base64
import json
import os
import time
import random
from collections import OrderedDict
from urllib.parse import quote_plus

import redis
import requests
from select import select
from sqlalchemy import create_engine
import pandas as pd
from DrissionPage import ChromiumPage
from DrissionPage import ChromiumOptions
from DrissionPage.common import Settings
from DrissionPage.common import Keys
from DrissionPage.common import Actions
import pymysql
from DrissionPage.errors import *

import datetime

# import #logging
# logging.basicConfig(
#     level=#logging.DEBUG,  # 设置日志级别
#     format='%(asctime)s - %(levelname)s - %(message)s',  # 日志格式
#     handlers=[
#         #logging.FileHandler('xhs_by_day.log'),  # 将日志输出到文件
#         #logging.StreamHandler()  # 同时将日志输出到控制台
#     ]
# )
redis_conn = redis.StrictRedis(host='r-bp162522qrwjh5e7jhpd.redis.rds.aliyuncs.com', port=6379,
                               password='123sdmmh568%$#vs*!', db=22)

"""
https://customer.xiaohongshu.com/api/cas/customer/web/qr-code?service=https:%2F%2Fcreator.xiaohongshu.com&qr_code_id=68c517395516719772176435"""


class XHS_XPIDER:
    def __init__(self, user_id):
        self.CO = ChromiumOptions()  ##创建chrome实例
        # self.user_id = self.user_input()  ##用户
        self.user_id = str(user_id)  ##用户

        self.PORT = self.port_input()  ##chrome端口
        self.CO.set_paths(
            browser_path="browser_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome")  ##chrome启动路径
        self.CO.set_local_port(self.PORT)  ##生成的端口 这里是手动生成
        self.CO.set_user_data_path(os.getcwd() + '/user_info_dir/{}'.format(self.user_id))  ##用户chrome文件夹
        # self.CO.arguments.append('--headless')
        self.ch = ChromiumPage(self.CO)
        self.ch.set.download_path(os.getcwd() + ('/data_folder_dir/{}'.format(user_id)))

        self.ch.set.window.max()  ##chrome窗口最大化
        self.ac = Actions(self.ch)
        # self.fans = self.xhs_user_info_requests()
        # print(self.user_id)
        self.redis_conn = redis.StrictRedis(host='r-bp162522qrwjh5e7jhpd.redis.rds.aliyuncs.com', port=6379,
                                            password='123sdmmh568%$#vs*!', db=22)

        # self.ch.wait.load_start()

    """从数据库判断用户是否存在"""

    def user_state(self):
        sql = "select spider_user_id,spider_port,spider_login_code from live_data.xhs_spider_config"
        result = self.pymysql_fetchall(sql)
        user_list = []
        for i in result:
            user_list.append(i[0])
        # print(user_list)
        if self.user_id in user_list:
            print("存在")
            return "用户存在"
        else:
            return "用户不存在"

    """查询数据库"""

    def pymysql_fetchall(self, sql):

        con = pymysql.connect(host='rm-bp17r99475xuauk63yo.mysql.rds.aliyuncs.com', port=3306, user='live_data',
                              passwd='CJMg2tsHg7sj6bo0UTKq0a4T!fPDpJ', db='live_data', charset='utf8')
        # 获取操作数据的对象 cursor
        cursor = con.cursor()
        cursor.execute(sql)
        sql_result = cursor.fetchall()
        # 提交事务
        con.commit()
        # 关闭Cursor
        cursor.close()
        # 关闭链接
        con.close()

        return sql_result

    """自动随机生成端口 并进行检测"""

    def port_jiance(self):
        port1 = random.randint(5000, 9000)

        sql = "select spider_user_id,spider_port,spider_login_code from live_data.xhs_spider_config"
        spider_config = self.pymysql_fetchall(sql)
        port_list = []
        # print(spider_config)
        for i in spider_config:
            port_list.append(i[1])
        user_exists = self.user_state()
        # print(user_exists,'************')
        if user_exists == "用户不存在":
            if port1 not in port_list:
                return "端口不存在"
            else:
                return "端口存在"
        else:
            print('当前用户存在')
            return '用户存在'

    """根据用户ID查出端口号"""

    def port_fe(self):
        sql = "select spider_user_id,spider_port,spider_login_code from live_data.xhs_spider_config where spider_user_id='{}'".format(
            self.user_id)
        spider_config = self.pymysql_fetchall(sql)
        port = []
        for i in spider_config:
            port.append(i[1])

        return ''.join(port)

    """自动输入端口号"""

    def port_input(self):

        port1 = random.randint(5000, 9000)

        cc = self.port_jiance()
        while True:
            cc = self.port_jiance()
            if cc == "用户存在":
                print("当前用户存在")
                port_ = self.port_fe()
                # print(port_)
                return port_
            if cc == "端口不存在":
                print(port1)
                return port1
            else:
                return self.PORT

    def mysql_config(self):
        user = 'live_data'
        password = quote_plus('CJMg2tsHg7sj6bo0UTKq0a4T!fPDpJ')
        # passwd ='merchantsasd123!@%&'
        host = 'rm-bp17r99475xuauk63yo.mysql.rds.aliyuncs.com'
        # port1 ='3306'
        dbname2 = 'live_data'
        engine2 = create_engine(f"mysql+pymysql://{user}:{password}@{host}:3306/{dbname2}?charset=utf8mb4")
        return engine2

    def pymysql_update(self, sql, num):

        con = pymysql.connect(host='rm-bp17r99475xuauk63yo.mysql.rds.aliyuncs.com', port=3306, user='live_data',
                              passwd='CJMg2tsHg7sj6bo0UTKq0a4T!fPDpJ', db='live_data', charset='utf8')
        # 获取操作数据的对象 cursor
        cursor = con.cursor()
        cursor.execute(sql, (num, self.user_id))
        # sql_result = cursor.fetchall()
        # 提交事务
        con.commit()
        # 关闭Cursor
        cursor.close()
        # 关闭链接
        con.close()

        return "用户{}状态修改成功,当前状态值为{}".format(self.user_id, num)

    """二维码获取并保存"""

    def qrcode_save(self):
        print("二维码获取中........")
        time.sleep(2)
        xhs_qrcode_text = self.ch.ele(
            'xpath://*[@id="page"]/div/div[2]/div[1]/div[2]/div/div/div/div/div/div[2]/img[1]')
        # print(xhs_qrcode_text)
        xhs_qrcode_redis = \
        str(xhs_qrcode_text).split("<ChromiumElement img src=")[1].split(" class='css-1lhmg90'>")[0].split('\n')[0]
        xhs_qrcode_save_png = str(xhs_qrcode_text).split("<ChromiumElement img src='data:image/png;base64,")[1].split(
            "' class='css-1lhmg90'>")[0].split('\n')[0]

        img_base = xhs_qrcode_redis.encode("utf-8")
        img_base1 = xhs_qrcode_save_png.encode("utf-8")

        image_bytes = base64.b64decode(img_base1)

        self.redis_conn.set('xhs_qrcode:{}'.format(self.user_id), img_base)
        print("写入成功")
        self.redis_conn.expire('xhs_qrcode:{}'.format(self.user_id), 300)
        with open(os.getcwd() + "/qrcode_img_dir/" + "{}.png".format(self.user_id), "wb") as f:
            f.write(image_bytes)

        return "用户{} 二维码获取成功".format(self.user_id)

    """获取小红书用户主页信息"""

    # def xhs_user_info_requests(self):
    #     time.sleep(1)
    #     print("***开始获取用户主页***")
    #     """昵称"""
    #     nickname = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[1]/div[2]/h4')
    #     # nickname = self.ch.ele('xpath://*[@id="content-area"]/main/div[3]/div/div[1]/div[1]/div/div[1]/div[2]/div[1]/div')
    #     nickname = nickname.text
    #     print(nickname,"********")
    #     """关注"""
    #     follow = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[1]/div[2]/p[1]/span[1]/label')
    #     # follow = self.ch.ele('xpath://*[@id="content-area"]/main/div[3]/div/div[1]/div[1]/div/div[1]/div[2]/div[2]/div[1]/span[1]')
    #     follow = follow.text
    #     print(follow, "********")
    #     """粉丝"""
    #     fans = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[1]/div[2]/p[1]/span[2]/label')
    #     # fans = self.ch.ele('xpath://*[@id="content-area"]/main/div[3]/div/div[1]/div[1]/div/div[1]/div[2]/div[2]/div[2]/span[1]')
    #     fans = fans.text
    #     print(fans, "********")
    #
    #     """获赞与收藏"""
    #     receive_likes_collect = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[1]/div[2]/p[1]/span[3]/label')
    #     # receive_likes_collect = self.ch.ele('xpath://*[@id="content-area"]/main/div[3]/div/div[1]/div[1]/div/div[1]/div[2]/div[2]/div[3]/span[1]')
    #     receive_likes_collect = receive_likes_collect.text
    #     print(receive_likes_collect, "********")
    #
    #     """小红书账号"""
    #     xhs_account = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[1]/div[2]/p[1]/span[5]')
    #     # xhs_account = self.ch.ele('xpath://*[@id="content-area"]/main/div[3]/div/div[1]/div[1]/div/div[1]/div[2]/div[3]/div[1]')
    #     print(xhs_account.text)
    #     xhs_account = xhs_account.text.split("小红书号：")[1]
    #     print(xhs_account, "********")
    #     """个人简介"""
    #     user_introduction = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[1]/div[2]/p[2]')
    #     # print(user_introduction.text,"***********")
    #     # user_introduction = self.ch.ele('xpath://*[@id="content-area"]/main/div[3]/div/div[1]/div[1]/div/div[1]/div[2]/div[3]/div[3]')
    #     user_introduction = user_introduction.text
    #     print(user_introduction, "********")
    #
    #     """头像链接"""
    #     head_portrait_link = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[1]/div[1]/img')
    #     print(head_portrait_link)
    #     # head_portrait_link = self.ch.ele('xpath://*[@id="content-area"]/main/div[3]/div/div[1]/div[1]/div/div[1]/div[1]/img')
    #     head_portrait_link = str(head_portrait_link).split("src='")[1].split("'")[0]
    #     print(head_portrait_link, "********")
    #     """获取主页数据总览 查看近30天内的数据 只能查看近三天内的数据"""
    #     print("***获取主页数据总览中***")
    #     time.sleep(1)
    #     self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[2]/div[1]/div[2]/div/div/div/button/span').click()
    #     """点击近30天的数据总览按钮"""
    #     self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[2]/div[1]/div[2]/div/div/div[2]/div/div[2]/div').click()
    #
    # """获取近30天内数据总览数据"""
    # added_fans = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[2]/div[2]/div[1]/span[2]')
    # added_fans = added_fans.text
    # homepage_visitor = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[2]/div[2]/div[2]/span[2]')
    # homepage_visitor = homepage_visitor.text
    # homepage_view = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[2]/div[2]/div[3]/span[2]')
    # homepage_view = homepage_view.text
    # interaction = self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[2]/div[2]/div[4]/span[2]')
    # interaction = interaction.text
    #
    #     """用户信息爬取时间"""
    #     current_time = datetime.datetime.now()
    #     current_time = str(current_time)[:-7]
    #
    #     user_info_json = {"spider_user_id": self.user_id, "spider_port": self.PORT, "nickname": nickname,
    #                       "follow": follow, "fans": fans, "receive_likes_collect": receive_likes_collect,
    #                       "xhs_account": xhs_account, "user_introduction": user_introduction,
    #                       "head_portrait_linkstr": head_portrait_link,
    #                       "added_fans": added_fans, "homepage_visitor": homepage_visitor,
    #                       "homepage_view": homepage_view, "interaction": interaction, "spider_time": current_time}

    # print(user_info_json)
    # xhs_user_info_frame = pd.DataFrame([user_info_json])
    # print("***用户信息数据正在写入***")
    # pd.io.sql.to_sql(xhs_user_info_frame, 'xhs_user', self.mysql_config(), schema='live_data', if_exists='append',
    #                  index=False)
    # print("***用户信息数据写入完成***")
    #
    # delete_user_sql = 'DELETE from live_data.xhs_user where spider_user_id=%s'
    # print(self.delete_prod_user(delete_user_sql))
    # return fans,xhs_user_info_frame
    """获取小红书的唯一ID"""

    def listen_user_id(self, xhs_account):
        """监听小红书主页用户信息接口 拿到userID"""
        self.ch.listen.start('https://creator.xiaohongshu.com/api/galaxy/user/info')  # 开始监听，指定获取包含该文本的数据包
        print("正在监听用户信息")
        self.ch.refresh()
        time.sleep(2)
        user_info_list = []
        for packet in self.ch.listen.steps(timeout=20):
            print("******************************用户信息获取中******************************")
            print(packet.url)

            user_info = packet.response.body
            user_info = user_info["data"]
            user_info.update({"spider_user_id": self.user_id, "xhs_account": xhs_account})
            user_info_list.append(user_info)

            if "/user/info" in packet.url:
                break

        sql = "select userID from live_data.xhs_user_info"
        user_info_data = self.pymysql_fetchall(sql)
        xhs_user_info_list = []
        for i in user_info_data:
            xhs_user_info_list.append(i[0])
        print(xhs_user_info_list)
        refer_userID = user_info_list[0]["userId"]
        return refer_userID

    def xhs_user_info_listen(self):

        self.ch.refresh()
        self.ch.listen.set_targets('https://creator.xiaohongshu.com/api/galaxy/creator/home/personal_info')
        self.ch.listen.start()

        packege = self.ch.listen.wait(timeout=10, raise_err=True)
        self.ch.listen.stop()
        user_info_json = packege.response.body
        print("2322323232323232323232323232", user_info_json)
        # self.ch.refresh()
        # self.ch.listen.start(
        #     'https://creator.xiaohongshu.com/api/galaxy/creator/home/personal_info')  # 开始监听，指定获取包含该文本的数据包

        # user_info_xhs_json_list = []
        # fans_list = []
        # xhs_account_list = []
        # for packet in self.ch.listen.steps(timeout=3):
        #     user_info_json = packet.response.body
        xhs_account = user_info_json["data"]["red_num"]
        # xhs_account_list.append(xhs_account)
        fans = user_info_json["data"]["fans_count"]
        # fans_list.append(fans)
        follow = user_info_json["data"]["follow_count"]
        receive_likes_collect = user_info_json["data"]["faved_count"]
        head_portrait_linkstr = user_info_json["data"]["avatar"]
        user_introduction = user_info_json["data"]["personal_desc"]
        nickname = user_info_json["data"]["name"]
        xhs_new_json = {"xhs_account": xhs_account, "fans": fans, "follow": follow,
                        "receive_likes_collect": receive_likes_collect,
                        "head_portrait_linkstr": head_portrait_linkstr, "user_introduction": user_introduction,
                        'nickname': nickname}
        print(xhs_new_json)
        # user_info_xhs_json_list.append(xhs_new_json)

        # self.ch.refresh()
        # """三十天数据"""
        # self.ch.listen.start(
        #     'https://creator.xiaohongshu.com/api/galaxy/creator/data/note_detail_new')
        #
        # thirty_json_list = []
        # for packet_thirty in self.ch.listen.steps(timeout=3):
        #     thirty_json = packet_thirty.response.body
        #     added_fans = thirty_json["data"]["thirty"]["rise_fans_count"]
        #     homepage_view = thirty_json["data"]["thirty"]["view_time_avg"]
        #     homepage_visitor = thirty_json["data"]["thirty"]["view_count"]
        #     interaction = '0'
        homepage_view = '0'
        added_fans = '0'
        interaction = '0'
        homepage_visitor = '0'
        current_time = datetime.datetime.now()
        current_time = str(current_time)[:-7]
        thirty_json_list = [{"spider_user_id": self.user_id, "spider_port": self.PORT, "added_fans": added_fans,
                             "homepage_view": homepage_view, "homepage_visitor": homepage_visitor,
                             "interaction": interaction, "spider_time": current_time}]
        # thirty_json_list.append(thirty_json)

        user_thirty_frame = pd.DataFrame(thirty_json_list)

        xhs_user_info_frame_ = pd.DataFrame([xhs_new_json])

        xhs_user_info_frame = pd.concat([xhs_user_info_frame_, user_thirty_frame], axis=1)
        xhs_user_info_frame = xhs_user_info_frame.astype("str")
        delete_user_sql = 'DELETE from live_data.xhs_user where spider_user_id=%s'
        print(self.delete_prod_user(delete_user_sql))
        userID = self.listen_user_id(xhs_account)
        # print(fans,"()()()()()")
        return fans, xhs_user_info_frame, xhs_account, userID

    """获取汇总笔记模块 切换近30天的数据"""

    def notes_switch_thirdy(self):
        print("***开始点击笔记数据***")

        """从主页点击笔记页面数据"""
        self.ch.ele('xpath://*[@id="content-area"]/main/div[1]/div/div[2]/div/div[4]/div/div').click()
        print("ahahhhhhhhhhhhhhh")
        time.sleep(0.5)
        try:
            self.ch.ele('xpath://*[@id="content-area"]/main/div[3]/div/div/div/div/div[2]/span').click()
        except:
            pass

        self.ch.refresh()
        self.ch.listen.start(
            'https://creator.xiaohongshu.com/api/galaxy/creator/data/note_detail_new')  # 开始监听，指定获取包含该文本的数据包

        """监听数据 笔记数据近30天的每天趋势图"""
        count_detail = 0
        aa = []
        for packet in self.ch.listen.steps(timeout=5):
            aa.append(packet.response.body)
            count_detail += 1
            if count_detail == 0:
                break

        # self.ch.listen.set_targets('https://creator.xiaohongshu.com/api/galaxy/creator/data/note_detail_new1111')
        # self.ch.listen.start()
        #
        # packege = self.ch.listen.wait(timeout=10, raise_err=True)
        # self.ch.listen.stop()
        # result1 = packege
        # print(result1,"qunimadebiba!!!")
        # aa = [1]
        if len(aa) == 0:

            return "没有监控到接口"
        else:
            pass
            """笔记模块：笔记数据为近30天数据"""
            ##笔记模块
            # self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[1]/div[2]/div/div/div/button').click()
            # time.sleep(0.5)
            # self.ch.ele('xpath://*[@id="app"]/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div[2]/div').click()

            ##单笔记数据
            # self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[2]/div[2]/div/div/button').click()
            # time.sleep(0.5)
            # self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[2]/div[2]/div/div[2]/div/div[2]/div').click()

        return "笔记页面数据点击完成"

    """获取汇总笔记模块 获取近30天的汇总数据  并插入MYSQL数据库"""

    def notes_data_thirdy(self):
        current_time = datetime.datetime.now()
        current_time = str(current_time)[:-7]
        cc = self.notes_switch_thirdy()
        # print(cc)
        if cc == "没有监控到接口":
            data_result_list = {"spider_user_id": self.user_id, "spider_port": self.PORT, "view_list": '',
                                "view_time_list": '', "like_list": '', "collect_list": '',
                                "comment_list": '', "danmaku_list": '',
                                "home_view_list": '', "rise_fans_list": '',
                                "share_list": '', "spider_time": current_time}

            data_none = pd.DataFrame([{"notes_data_thirty": [data_result_list]}])
            data_none = data_none.astype("str")
            # print(data_none)
            # pd.io.sql.to_sql(data_none, 'xhs_notes_thirty', self.mysql_config(), schema='live_data',
            #                  if_exists='append', index=False)
            return data_none
        if cc == "笔记页面数据点击完成":
            self.ch.refresh()
            self.ch.listen.start(
                'https://creator.xiaohongshu.com/api/galaxy/creator/data/note_detail_new')  # 开始监听，指定获取包含该文本的数据包

            """监听数据 笔记数据近30天的每天趋势图"""
            count_detail = 0
            detail_list_data = []
            for packet in self.ch.listen.steps(timeout=5):
                detail_list_data.append(packet.response.body["data"]["thirty"])

                count_detail = count_detail + 1
                if count_detail == 1:
                    break
                    # detail_list_data.append(packet.response.body)
            notes_thirty_list = []
            for i in detail_list_data:
                view_list = i["view_list"]
                view_time_list = i["view_time_list"]
                like_list = i["like_list"]
                collect_list = i["collect_list"]
                comment_list = i["comment_list"]
                danmaku_list = i["danmaku_list"]
                home_view_list = i["home_view_list"]
                rise_fans_list = i["rise_fans_list"]
                share_list = i["share_list"]

                data_result_list = {"view_list": view_list, "view_time_list": view_time_list, "like_list": like_list,
                                    "collect_list": collect_list, "comment_list": comment_list,
                                    "danmaku_list": danmaku_list,
                                    "home_view_list": home_view_list, "rise_fans_list": rise_fans_list,
                                    "share_list": share_list}
                # print(data_result_list)
                notes_thirty_list.append(data_result_list)

            data = pd.DataFrame([{"notes_data_thirty": notes_thirty_list}])
            # data["spider"]
            data = data.astype("str")
            # print(data)
            # pd.io.sql.to_sql(data, 'xhs_notes_thirty', self.mysql_config(), schema='live_data',
            #                  if_exists='append', index=False)
            return data

        # return notes_json

    """将页码设置成48条每页"""

    def page_config(self):
        """获取页码48条"""
        self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[4]/div[1]/div/div/div/div/input').click()
        time.sleep(1)
        self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[4]/div[1]/div/div/div/div').click()

    """获取图文模块总共有多少个笔记 并有多少页码"""

    def Pagination_Page(self):
        """获取图文总条数和页码数"""
        page_num_total = self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[4]/div[1]')
        notes_num = page_num_total.text.split("共 ")[1].split(" 条")[0]

        page_num = page_num_total.text.split('，')[1].split(' 页')[0]
        return int(notes_num), int(page_num)

    """获取图文笔记的详情信息"""

    def notes_details_requests(self):
        """将页码设置成48条每页"""
        self.page_config()
        time.sleep(1)
        """切换到图文模块"""
        # time.sleep(2)
        # self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[2]/div[1]/div/button[3]/span').click()
        time.sleep(1)

        """调用上边方法 Pagination_Page"""
        notes_num, page_num = self.Pagination_Page()

        """抓取笔记图文数据"""
        """用户信息爬取时间"""
        current_time = datetime.datetime.now()
        current_time = str(current_time)[:-7]
        NOTES_TUWEN_LIST = []
        for i in range(1, page_num + 1):
            self.ch.ele("x://input[@class='dyn css-xf7229 css-1hsmx34']").input("{}\n".format(i))
            self.ch.ele("x://input[@class='dyn css-xf7229 css-1hsmx34']").clear(by_js=True)
            for j in range(1, notes_num + 1):
                notes_title = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[1]/div[2]/span[1]'.format(j))
                notes_release_time = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[1]/div[2]/span[2]'.format(j))
                notes_release_time = notes_release_time.text.split('发布于 ')[1]
                notes_view = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[1]/li[1]/b'.format(j))
                notes_comment_count = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[2]/li[1]/b'.format(j))
                notes_like_count = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[1]/li[2]/b'.format(j))
                notes_share_count = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[2]/li[2]/b'.format(j))
                notes_collect_count = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[1]/li[3]/b'.format(j))
                notes_fans_hi = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[2]/li[3]/b'.format(j))
                notes_cover_link = self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[1]/div[1]'.format(j))
                notes_cover_link = str(notes_cover_link).split('image: url("')[1].split('")')[0]
                notes_tuwen_json = {"spider_user_id": self.user_id, "notes_title": notes_title.text,
                                    "notes_release_time": notes_release_time, "notes_view": notes_view.text,
                                    "notes_comment_count": notes_comment_count.text,
                                    "notes_like_count": notes_like_count.text,
                                    "notes_collect_count": notes_collect_count.text,
                                    "notes_share_count": notes_share_count.text,
                                    "notes_collect_count": notes_collect_count.text,
                                    "notes_fans_hi": notes_fans_hi.text, "notes_cover_link": notes_cover_link,
                                    "spider_time": current_time, "notes_type": "tuwen"}
                # print(notes_tuwen_json)
                NOTES_TUWEN_LIST.append(notes_tuwen_json)
        return NOTES_TUWEN_LIST

    """获取图文模块总共有多少个笔记 并有多少页码"""

    def video_page(self):
        """获取图文总条数和页码数"""
        page_num_total = self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[4]/div[1]')
        notes_num = page_num_total.text.split("共 ")[1].split(" 条")[0]

        page_num = page_num_total.text.split('，')[1].split(' 页')[0]
        return int(notes_num), int(page_num)

    """抓取视频数据"""

    def notes_video_page_requests(self):
        print("***视频笔记总页码获取中***")
        self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[2]/div[1]/div/button[2]/span').click()
        """获取页码48条"""
        self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[4]/div[1]/div/div/div/div/input').click()
        time.sleep(1)
        """点击视频笔记模块的48条每页的按钮 切换到每页48条"""
        self.ch.ele('48条/页').click()

        """获取视频笔记模块中的页码和总条数"""
        page_num_total = self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[4]/div[1]')
        video_notes_num = page_num_total.text.split("共 ")[1].split(" 条")[0]

        video_page_num = page_num_total.text.split('，')[1].split(' 页')[0]
        # print(video_notes_num, video_page_num)

        return int(video_notes_num), int(video_page_num)

    """获取视频笔记的详情信息"""

    def video_notes_detail_info(self):
        print("***视频笔记详情数据抓取中***")
        video_notes_num, video_page_num = self.notes_video_page_requests()
        current_time = datetime.datetime.now()
        current_time = str(current_time)[:-7]
        NOTES_VIDEO_LIST = []

        for i in range(1, video_page_num + 1):
            self.ch.ele("x://input[@class='dyn css-xf7229 css-1hsmx34']").input("{}\n".format(i))
            self.ch.ele("x://input[@class='dyn css-xf7229 css-1hsmx34']").clear(by_js=True)
            for j in range(1, video_notes_num + 1):
                notes_release_time = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[1]/div[2]/span[2]'.format(j))
                notes_title = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[1]/div[2]/span[1]'.format(j))
                notes_view = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[1]/li[1]/b'.format(j))
                notes_comment_count = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[2]/li[1]/b'.format(j))
                notes_viewing_time = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[1]/li[2]/b'.format(j))
                notes_bullet_chat = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[2]/li[2]/b'.format(j))
                notes_like_count = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[1]/li[3]/b'.format(j))
                notes_share_count = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[2]/li[3]/b'.format(j))
                notes_collect_count = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[1]/li[4]/b'.format(j))
                notes_fans_hi = self.ch.ele(
                    'xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[2]/ul[2]/li[4]/b'.format(j))
                notes_cover_link = self.ch.ele('xpath://*[@id="app"]/div/div[2]/div[3]/div[{}]/div[1]/div[1]'.format(j))
                notes_cover_link = str(notes_cover_link).split('image: url("')[1].split('")')[0]
                notes_video_json = {"spider_user_id": self.user_id, "notes_title": notes_title.text,
                                    "notes_release_time": notes_release_time.text, "notes_view": notes_view.text,
                                    "notes_comment_count": notes_comment_count.text,
                                    "notes_like_count": notes_like_count.text,
                                    "notes_share_count": notes_share_count.text,
                                    "notes_collect_count": notes_collect_count.text,
                                    "notes_fans_hi": notes_fans_hi.text, "notes_viewing_time": notes_viewing_time.text,
                                    "notes_bullet_chat": notes_bullet_chat.text, "notes_cover_link": notes_cover_link,
                                    "spider_time": current_time, "notes_type": "video"}

                # print(notes_video_json)
                NOTES_VIDEO_LIST.append(notes_video_json)
        return NOTES_VIDEO_LIST

    """图文数据和视频数据汇总 并入库"""

    def video_tuwen_total(self):
        print("***开始汇总合并数据***")
        """图文笔记抓取"""
        NOTES_NOTES_LIST = self.notes_details_requests()

        """视频笔记抓取"""
        NOTES_VIDEO_LIST = self.video_notes_detail_info()

        NOTES_NOTES_FRAME = pd.DataFrame(NOTES_NOTES_LIST)
        NOTES_VIDEO_FRAME = pd.DataFrame(NOTES_VIDEO_LIST)
        data_frame = pd.concat([NOTES_VIDEO_FRAME, NOTES_NOTES_FRAME], axis=0)
        data_frame = data_frame.astype("str")
        # print(data_frame)
        print("***笔记详情数据正在入库......")
        pd.io.sql.to_sql(data_frame, 'xhs_notes_detail', self.mysql_config(), schema='live_data', if_exists='append',
                         index=False)
        print("***笔记详情数据入库成功***")
        return '***图文笔记和视频笔记全部抓取成功***'

    """删除列"""

    def del_colu(self, frame):
        del_columns = ["color", "set_color", "set_title", "set_value"]

        for i in del_columns:
            for j in frame:
                del j[i]
        return frame

    """监听模块"""
    """粉丝数据模块 使用监听方法抓取，监听粉丝模块里边的两个包"""

    # def fans_listen_data(self):
    #     self.ch.refresh()
    #     time.sleep(1)
    #     """点击粉丝数据模块按钮"""
    #     self.ch.ele('xpath://*[@id="content-area"]/main/div[1]/div/div[2]/div/div[5]').click()
    #
    #     self.ch.listen.start(targets=['https://creator.xiaohongshu.com/api/galaxy/creator/data/fans_portrait_new',
    #                                   "data/fans_source"])  # 开始监听，指定获取包含该文本的数据包
    #
    #     count = 0
    #     """获取当前时间，作为爬虫抓取数据的时间"""
    #     current_time = datetime.datetime.now()
    #     current_time = str(current_time)[:-7]
    #     result = {}
    #     """监听数据 监听两个数据包"""
    #     for packet in self.ch.listen.steps():
    #
    #         count += 1
    #         """监听到粉丝汇总图的接口"""
    #         if packet.url == "https://creator.xiaohongshu.com/api/galaxy/creator/data/fans_portrait_new":
    #             listen_result = packet.response.body
    #             gender = listen_result["data"]["gender"]
    #             age = listen_result["data"]["age"]
    #             city = listen_result["data"]["city"]
    #             interest = listen_result["data"]["interest"]
    #
    #             gender = self.del_colu(gender)
    #             age = self.del_colu(age)
    #             city = self.del_colu(city)
    #             interest = self.del_colu(interest)
    #             result_json = {"gender": gender, "age": age, "city": city, "interest": interest}
    #             result.update(result_json)
    #
    #         """监听到粉丝来源的接口"""
    #         if packet.url == "https://creator.xiaohongshu.com/api/galaxy/creator/data/fans_source":
    #             listen_result_source = packet.response.body
    #             fans_source = listen_result_source["data"]
    #             fans_source = self.del_colu(fans_source)
    #
    #             fans_source_result_json = {"fans_source": fans_source}
    #             result.update(fans_source_result_json)
    #         if count == 2:
    #             break
    #
    #     fans_page_result = pd.DataFrame([result])
    #     fans_page_result["spider_user_id"] = self.user_id
    #     fans_page_result["spider_time"] = current_time
    #     fans_page_result = fans_page_result.astype("str")
    #     print(fans_page_result)
    #
    #     pd.io.sql.to_sql(fans_page_result, 'xhs_fans_info', self.mysql_config(), schema='live_data',
    #                      if_exists='append', index=False)
    #
    #     """最后一个模块抓取完成，关闭浏览器"""
    #     self.ch.close()
    #     return "粉丝模块抓取完成"

    """粉丝模块监控"""

    def fans_listen_thirty(self, fans):
        print("粉丝书为", fans)

        # time.sleep(1)
        # self.ch.refresh()
        """点击粉丝数据模块按钮"""
        self.ch.ele('xpath://*[@id="content-area"]/main/div[1]/div/div[2]/div/div[6]/div/div/span').click()

        # self.ch.refresh()

        # time.sleep(3)
        # self.ch.listen.start('https://creator.xiaohongshu.com/api/galaxy/creator/data/fans/overall_new')  # 开始监听，指定获取包含该文本的数据包
        self.ch.listen.start(targets=['https://creator.xiaohongshu.com/api/galaxy/creator/data/fans_portrait_new',
                                      "data/fans_source",
                                      'https://creator.xiaohongshu.com/api/galaxy/creator/data/fans/overall_new'])  # 开始监听，指定获取包含该文本的数据包

        """获取当前时间，作为爬虫抓取数据的时间"""
        current_time = datetime.datetime.now()
        current_time = str(current_time)[:-7]
        """监听数据 监听两个数据包"""
        fans_thirty_list = []
        result = {}
        count = 0
        # self.ch.refresh()
        # time.sleep(5)
        for packet in self.ch.listen.steps(timeout=15):
            count += 1
            if '万' in str(fans):
                fans = 51
            else:
                fans = fans
            # print(fans,"*********")
            if int(fans) < 50 and packet.url == "https://creator.xiaohongshu.com/api/galaxy/creator/data/fans/overall_new":
                print("粉丝数小于50")

                # if packet.url == "https://creator.xiaohongshu.com/api/galaxy/creator/data/fans/overall_new":
                print("***************", count)
                print("获取到了")
                listen_result_thirty = packet.response.body
                rise_fans_list = listen_result_thirty["data"]["thirty"]["rise_fans_list"]
                leave_fans_list = listen_result_thirty["data"]["thirty"]["leave_fans_list"]
                fans_list = listen_result_thirty["data"]["thirty"]["fans_list"]
                listen_result_thirty_json = {"rise_fans_list": rise_fans_list, "leave_fans_list": leave_fans_list,
                                             "fans_list": fans_list}
                # print(listen_result_thirty_json)
                result.update(listen_result_thirty_json)

                fans_thirty_list.append(listen_result_thirty_json)
                # print("****",len(fans_thirty_list))

            if len(fans_thirty_list) == 1:
                break
            if int(fans) >= 50:

                if packet.url == "https://creator.xiaohongshu.com/api/galaxy/creator/data/fans_portrait_new":
                    listen_result = packet.response.body
                    gender = listen_result["data"]["gender"]
                    age = listen_result["data"]["age"]
                    city = listen_result["data"]["city"]
                    interest = listen_result["data"]["interest"]

                    gender = self.del_colu(gender)
                    age = self.del_colu(age)
                    city = self.del_colu(city)
                    interest = self.del_colu(interest)
                    result_json = {"gender": gender, "age": age, "city": city, "interest": interest}
                    result.update(result_json)

                """监听到粉丝来源的接口"""
                if packet.url == "https://creator.xiaohongshu.com/api/galaxy/creator/data/fans_source":
                    listen_result_source = packet.response.body
                    fans_source = listen_result_source["data"]
                    fans_source = self.del_colu(fans_source)

                    fans_source_result_json = {"fans_source": fans_source}
                    result.update(fans_source_result_json)

                if packet.url == "https://creator.xiaohongshu.com/api/galaxy/creator/data/fans/overall_new":
                    listen_result_thirty1 = packet.response.body
                    # print(listen_result_thirty1)
                    rise_fans_list = listen_result_thirty1["data"]["thirty"]["rise_fans_list"]
                    leave_fans_list = listen_result_thirty1["data"]["thirty"]["leave_fans_list"]
                    fans_list = listen_result_thirty1["data"]["thirty"]["fans_list"]
                    listen_result_thirty_json = {"rise_fans_list": rise_fans_list, "leave_fans_list": leave_fans_list,
                                                 "fans_list": fans_list}
                    # print(listen_result_thirty_json)
                    result.update(listen_result_thirty_json)

                if count == 3:
                    break
        #
        dataframe_thirty = pd.DataFrame([{"fans_thirty": result}])
        # dataframe_thirty["spider_user_id"] = self.user_id
        # dataframe_thirty["spider_time"] = current_time
        dataframe_thirty = dataframe_thirty.astype("str")
        # print(dataframe_thirty)
        # pd.io.sql.to_sql(dataframe_thirty, 'xhs_fans_thirty', self.mysql_config(), schema='live_data',
        #                  if_exists='append', index=False)
        print("粉丝计算完成")
        return dataframe_thirty

    """登录二维码状态监控"""

    def login_stauts_listen(self):
        self.ch.listen.start(
            'https://customer.xiaohongshu.com/api/cas/customer/web/qr-code?service=https:%2F%2Fcreator.xiaohongshu.com')

        login_status_list = []
        for packet in self.ch.listen.steps():
            print(packet.url)
            # print(packet.response.body["data"])
            status_value = str(packet.response.body["data"]["status"])
            if status_value == "2":
                self.redis_conn.set('xhs_qrcode_status:{}'.format(self.user_id), '2')
                self.redis_conn.expire('xhs_qrcode_status:{}'.format(self.user_id), 300)
                print("等待扫码中")
            if status_value == "3":
                self.redis_conn.set('xhs_qrcode_status:{}'.format(self.user_id), '3')
                self.redis_conn.expire('xhs_qrcode_status:{}'.format(self.user_id), 300)

                print("扫码成功")

            if status_value == "4":
                self.redis_conn.set('xhs_qrcode_status:{}'.format(self.user_id), '4')
                self.redis_conn.expire('xhs_qrcode_status:{}'.format(self.user_id), 300)
                print("授权失败")
                return "授权超时或扫码失败"

            if status_value == '1':
                self.redis_conn.set('xhs_qrcode_status:{}'.format(self.user_id), '1')

                self.redis_conn.expire('xhs_qrcode_status:{}'.format(self.user_id), 300)
                print("登录成功")
                break
        return "小红书用户登录成功"

    """判断用户是否在数据库中 不在为新用户 在的话为老用户"""

    def user_info(self):
        engine2 = self.mysql_config()
        data_frame = pd.DataFrame([{"spider_user_id": str(self.user_id), "spider_port": str(self.PORT)}])
        # print(data_frame)
        # pd.io.sql.to_sql(data_frame, 'xhs_user_info', engine2, schema='live_data',if_exists='append',index=False)
        return data_frame

    """获取cookies"""

    def get_cookies(self):
        cookies_str = ''
        for i in self.ch.cookies(all_domains=False, all_info=True):
            my_ordered_dict = OrderedDict(i)

            # 取出前两个键值对
            first_two_items = list(my_ordered_dict.items())[:2]
            key_value_str = ''
            count_key_value = 0
            for i in first_two_items:
                count_key_value = count_key_value + 1
                if count_key_value == 1:
                    key_value_str = key_value_str + i[1] + '='
                else:
                    key_value_str = key_value_str + i[1]
            cookies_str = cookies_str + key_value_str + ";"
        # print(cookies_str)
        return cookies_str

    def dy_headers(self, cookies):

        headers = {
            "referer": "https://creator.xiaohongshu.com/new/note-manager",
            "Cookie": cookies
        }
        return headers

    # def notes_man(self):
    #
    #     notes_list_all = []
    #     for i in range(0, 100):
    #         self.ch.ele('xpath://*[@id="content-area"]/main/div[1]/div/div[2]/div/div[2]/div/div').click()
    #
    #         url = "https://creator.xiaohongshu.com/api/galaxy/creator/note/user/posted?tab=1&page={}".format(i)
    #         cookies = self.get_cookies()
    #         aa = requests.get(url, headers=self.dy_headers(cookies))
    #         data = json.loads(aa.text)
    #         # print(aa.text)
    #         page = data["data"]['page']
    #         # json_user_id = {"spider_user_id": self.user_id}
    #         json_info = data["data"]
    #         # json_info.update(json_user_id)
    #         notes_list_all.append(json_info)
    #
    #         # print(type(page))
    #         if page == -1:
    #             print("页码到此为止")
    #             break
    #     current_time = datetime.datetime.now()
    #     current_time = str(current_time)[:-7]
    #     notes_result = {"data_desc": notes_list_all}
    #     data_frame = pd.DataFrame([notes_result])
    #     data_frame = data_frame.astype("str")
    #     print(data_frame)
    #     return data_frame

    def notes_man(self):

        notes_list_all = []
        tags_list = []

        url_test = "https://creator.xiaohongshu.com/api/galaxy/creator/note/user/posted?tab=1&page=0"
        cookies = self.get_cookies()
        aa_test = requests.get(url_test, headers=self.dy_headers(cookies))
        re = json.loads(aa_test.text)
        if re["data"]["tags"] == []:
            return "no notes"
        else:
            tags = re["data"]["tags"]
            # print(tags)
            notes_num_page = int(tags[0]["notes_count"])
            if notes_num_page <= 11:
                notes_num_page = 1
            else:
                notes_num_page = int(notes_num_page / 11) + 1
        from datetime import datetime
        for i in range(0, notes_num_page):
            # self.ch.ele('xpath://*[@id="content-area"]/main/div[1]/div/div[2]/div/div[2]/div/div').click()

            url = "https://creator.xiaohongshu.com/api/galaxy/creator/note/user/posted?tab=1&page={}".format(i)

            cookies = self.get_cookies()
            aa = requests.get(url, headers=self.dy_headers(cookies))
            data = json.loads(aa.text)
            # page = data["data"]['page']
            tags = data["data"]["tags"]
            tags_list.append(tags)
            a1 = []
            for j in data["data"]["notes"]:
                # print(type(j["time"]))
                date_format = "%Y-%m-%d %H:%M"
                date_re = datetime.strptime(j["time"], date_format)
                # print(date_re)
                # print(type(date_re))
                current_time = datetime.now()
                day_num = (current_time - date_re).days
                # print(day_num,"时间差为************")

                if day_num >= 90:
                    pass
                else:
                    js_json = {"id": j["id"], "time": j["time"], "images_list": j["images_list"]}
                    notes_list_all.append(js_json)

        # print(notes_list_all)

        page_num = int(tags_list[0][0]["notes_count"])
        if page_num <= 12:
            page_num = 1
        else:
            page_num = page_num / 12
            if '.' in str(page_num):
                page_num = int(str(page_num).split(".")[0]) + 1
            else:
                page_num = page_num

        notes_list_all.extend(tags_list[0])
        notes_result = {"data_desc": notes_list_all}
        data_frame = pd.DataFrame([notes_result])
        data_frame = data_frame.astype("str")
        # print(data_frame)
        print("笔记计算完成")
        return data_frame, int(page_num)

    # def notes_de(self,page_num):
    #     # print(page_num,"()()()()()()()")
    #     desc_list = []
    #     # int(page_num)+1
    #     # if int(page_num)>=35:
    #     #     page_num = 35
    #     # else:
    #     #     page_num = int(page_num)
    #     from datetime import datetime
    #     for i in range(1,page_num+1):
    #         # print("到这里了，哈哈哈哈哈哈哈")
    #         url = "https://creator.xiaohongshu.com/api/galaxy/creator/data/note_stats/new?page={}&page_size=12&sort_by=time&note_type=0&time=7&is_recent=false".format(i)
    #         # url = "https://creator.xiaohongshu.com/api/galaxy/creator/datacenter/note/analyze/list?post_begin_time=1732963561014&post_end_time=1735641960014&type=0&page_size=10&page_num=1"
    #         cookies = self.get_cookies()
    #         notes_desc = requests.get(url, headers=self.dy_headers(cookies))
    #         print(notes_desc.text,"fsfsfsfsfsfsfdfsfsfsfsfsfsf")
    #
    #         notes_desc = json.loads(notes_desc.text)
    #         aaaa = []
    #         if notes_desc["data"]!={}:
    #             for d in notes_desc["data"]["note_infos"]:
    #                 d_time = int(str(d["post_time"])[0:-3])
    #
    #                 dt_object = datetime.fromtimestamp(d_time)
    #                 current_time = datetime.now()
    #                 day_num = (current_time - dt_object).days
    #                 # print(day_num)
    #                 if day_num >= 90:
    #                     pass
    #                 else:
    #                     aaaa.append(d)
    #
    #                     # break
    #             desc_list.append({"note_infos": aaaa})
    #         else:
    #             pass
    #     # print(len(desc_list))
    #     notes_result = {"data_desc_notes":desc_list}
    #     # print(notes_result)
    #     data_frame = pd.DataFrame([notes_result])
    #     data_frame = data_frame.astype("str")
    #     print("notes_计算完毕")
    #     print(data_frame)
    #     return data_frame

    def excel_result(self, excel_local):

        file_path = excel_local

        df = pd.read_excel(file_path, header=None)  # 先不指定header，因为我们要删除第一行

        # 删除第一行（索引为0的行）
        df = df.iloc[1:]

        # 由于我们删除了第一行，现在需要重新设置列名
        # 假设我们知道新的表头应该是什么（这里是根据你提供的表格列出的）
        df.columns = ['title', 'post_time', 'type', 'read_count', 'like_count', 'comment_count', 'fav_count',
                      'increase_fans_count', 'share_count', 'view_time_avg', 'danmaku_count']
        df = df.iloc[1:]
        return df

    def time_format_conversion(self, x):
        from datetime import datetime

        # 给定的日期和时间字符串
        date_time_str = x

        # 定义日期和时间格式
        date_time_format = "%Y年%m月%d日%H时%M分%S秒"

        # 使用strptime方法将字符串转换为datetime对象
        date_time_obj = datetime.strptime(date_time_str, date_time_format)

        # 打印转换后的datetime对象
        return str(date_time_obj)

    def delete_detailed_data(self, parent_folder_path):

        import shutil

        # 父文件夹路径
        # parent_folder_path = os.getcwd()+"/data_folder_dir/{}".format('fanfan_74557')

        # 要删除的子文件夹名（相对于父文件夹的路径）

        # 构造子文件夹的完整路径
        subfolder_path = os.path.join(parent_folder_path)

        # 检查子文件夹是否存在
        if os.path.exists(subfolder_path):
            # 递归删除子文件夹及其所有内容
            shutil.rmtree(subfolder_path)
            print("内容已成功删除。")
        else:
            print(f"文件夹不存在。")

    def get_current_time_date(self):

        from datetime import datetime

        # 获取当前日期和时间
        now = datetime.now()

        # 提取日期部分
        current_date = now.date()

        print("当前日期是:", current_date)
        return str(current_date)

    def get_agos_nine_date(self):
        from datetime import datetime, timedelta

        # 获取当前日期
        current_date = datetime.now().date()

        # 计算前90天的日期
        date_90_days_ago = current_date - timedelta(days=90)

        # 打印结果
        print(str(date_90_days_ago))
        return str(date_90_days_ago)

    def notes_de(self):

        try:
            delete_user_sql = 'DELETE from live_data.xhs_content_detail where spider_user_id=%s'
            print(self.delete_prod_user(delete_user_sql))

            curr_date = self.get_current_time_date()
            agos_nine_date = self.get_agos_nine_date()

            """进入内容分析页面"""
            time.sleep(2)
            self.ch.ele('xpath://*[@id="content-area"]/main/div[1]/div/div[2]/div/div[5]/div/div/span').click()
            time.sleep(2)
            self.ch.refresh()
            time.sleep(2)
            self.ch.ele('xpath://*[@id="pane-note-data"]/div/div/div[1]/div/div[2]/div[2]/div/i[2]').click.multi(4)
            self.ch.ele('xpath://*[@id="pane-note-data"]/div/div/div[1]/div/div[2]/div[2]/div/i[2]').click.multi(4)

            self.ch.listen.set_targets('list?post_begin_time')
            self.ch.listen.start()
            self.ch.ele('xpath://*[@id="pane-note-data"]/div/div/div[1]/div/div[2]/div[2]/div/input[1]').input(
                '{}\n'.format(agos_nine_date))
            self.ch.ele('xpath://*[@id="pane-note-data"]/div/div/div[1]/div/div[2]/div[2]/div/input[2]').input(
                '{}\n'.format(curr_date))

            packege = self.ch.listen.wait(timeout=10, raise_err=True)
            self.ch.listen.stop()
            result1 = packege.response.body
            notes_total = result1['data']["total"]
            print(notes_total)
            result1_frame = pd.DataFrame(result1["data"]["note_infos"])
            result1_frame['view_time_avg'] = result1_frame['view_time_avg'].fillna(0)
            result1_frame['increase_fans_count'] = result1_frame['increase_fans_count'].fillna(0)
            result1_frame['like_count'] = result1_frame['like_count'].fillna(0)
            result1_frame['share_count'] = result1_frame['share_count'].fillna(0)
            result1_frame['comment_count'] = result1_frame['comment_count'].fillna(0)
            result1_frame['read_count'] = result1_frame['read_count'].fillna(0)
            result1_frame['danmaku_count'] = result1_frame['danmaku_count'].fillna(0)
            result1_frame['fav_count'] = result1_frame['fav_count'].fillna(0)

            result1_frame["spider_user_id"] = self.user_id
            pd.io.sql.to_sql(result1_frame, 'xhs_content_detail', self.mysql_config(), schema='live_data',
                             if_exists='append', index=False)

            flaot_result = notes_total / 10
            if str(flaot_result).split('.')[1] != '0':
                flaot_result = int(flaot_result) + 1
            else:
                flaot_result = int(flaot_result)

            if flaot_result == 2:
                self.ch.listen.set_targets('list?post_begin_time')
                self.ch.listen.start()
                self.ch.ele('xpath://*[@id="pane-note-data"]/div/div/div[3]/div/ul/li[2]').click.multi(8)

                packege = self.ch.listen.wait(timeout=10, raise_err=True)
                self.ch.listen.stop()
                result2 = packege.response.body
                result2_frame = pd.DataFrame(result2["data"]["note_infos"])
                result2_frame["spider_user_id"] = self.user_id
                result2_frame['view_time_avg'] = result2_frame['view_time_avg'].fillna(0)
                result2_frame['increase_fans_count'] = result2_frame['increase_fans_count'].fillna(0)
                result2_frame['like_count'] = result2_frame['like_count'].fillna(0)
                result2_frame['share_count'] = result2_frame['share_count'].fillna(0)
                result2_frame['comment_count'] = result2_frame['comment_count'].fillna(0)
                result2_frame['read_count'] = result2_frame['read_count'].fillna(0)
                result2_frame['danmaku_count'] = result2_frame['danmaku_count'].fillna(0)
                result2_frame['fav_count'] = result2_frame['fav_count'].fillna(0)

                pd.io.sql.to_sql(result2_frame, 'xhs_content_detail', self.mysql_config(), schema='live_data',
                                 if_exists='append', index=False)

            if flaot_result > 2:
                # time.sleep(3)
                # self.ch.ele('xpath://*[@id="pane-note-data"]/div/div/div[3]/div/ul/li[{}]'.format(i)).click()

                for i in range(2, flaot_result + 1):
                    self.ch.listen.set_targets('list?post_begin_time')
                    self.ch.listen.start()
                    # self.ch.ele('@aria-label="第 {} 页"'.format(i)).click()
                    self.ch.ele("t:li@@tx()={}".format(i)).click()

                    packege = self.ch.listen.wait(timeout=10, raise_err=True)

                    self.ch.listen.stop()
                    result3 = packege.response.body
                    result3_frame = pd.DataFrame(result3["data"]["note_infos"])
                    result3_frame["spider_user_id"] = self.user_id
                    result3_frame['view_time_avg'] = result3_frame['view_time_avg'].fillna(0)
                    result3_frame['increase_fans_count'] = result3_frame['increase_fans_count'].fillna(0)
                    result3_frame['like_count'] = result3_frame['like_count'].fillna(0)
                    result3_frame['share_count'] = result3_frame['share_count'].fillna(0)
                    result3_frame['comment_count'] = result3_frame['comment_count'].fillna(0)
                    result3_frame['read_count'] = result3_frame['read_count'].fillna(0)
                    result3_frame['danmaku_count'] = result3_frame['danmaku_count'].fillna(0)
                    result3_frame['fav_count'] = result3_frame['fav_count'].fillna(0)

                    pd.io.sql.to_sql(result3_frame, 'xhs_content_detail', self.mysql_config(), schema='live_data',
                                     if_exists='append', index=False)

            xhs_content_detail = pd.read_sql(
                "select * from xhs_content_detail where spider_user_id='{}'".format(self.user_id), self.mysql_config())
            json_list = xhs_content_detail.to_dict(orient='records')
            json_array_str = json.dumps(json_list, ensure_ascii=False)
            print(json_array_str)
            notes_result = {"data_desc_notes": json_array_str}

            print(json_array_str)
            data_frame = pd.DataFrame([notes_result])
            data_frame = data_frame.astype("str")
            print("notes_计算完毕")
            print(data_frame)
            return data_frame
        except:
            notes_result = {"data_desc_notes": ''}

            data_frame = pd.DataFrame([notes_result])
            # self.ch.close()
            return data_frame

    """爬虫主程序"""

    def xhs_huizong(self):
        # print(self.PORT)
        # print(self.user_id)
        # logging.debug(f"Input user={self.user_id}, port={self.PORT}")
        # logging.info(f"CURRENT COMPUTING MACHINE={'B'}")

        print('用户', self.user_id, '此次的端口是', self.PORT)
        # time.sleep(5)
        time.sleep(1)
        self.ch.get('https://creator.xiaohongshu.com/new/home')
        time.sleep(2)
        user_it_exist = self.user_state()
        data_frame = self.user_info()
        if user_it_exist == "用户不存在":
            # logging.info("当前用户为新用户:",self.user_id)

            data_frame["spider_login_code"] = '0'
            pd.io.sql.to_sql(data_frame, 'xhs_spider_config', self.mysql_config(), schema='live_data',
                             if_exists='append', index=False)

        print("走到这里了")
        time.sleep(6)
        login_url_is = self.ch.url
        if login_url_is == "https://creator.xiaohongshu.com/login?source=&redirectReason=401&lastUrl=%252Fnew%252Fhome" or login_url_is == "https://creator.xiaohongshu.com/login?source=&redirectReason=401&lastUrl=%252Fcreator%252Fnotes":
            print("登录过期或未登录")
            self.ch.close()

            """删除过期账号，方便用于重新授权登录"""

            sql = 'update live_data.xhs_spider_config set spider_login_code = %s where spider_user_id = %s'
            self.pymysql_update(sql, num='0')
            # current_time = datetime.datetime.now()
            # current_time = str(current_time)[:-7]
            data = pd.read_sql(
                "select spider_user_id,userName,xhs_account from live_data.xhs_user_info where spider_user_id='{}'".format(
                    self.user_id), self.mysql_config())
            data_list = data.values.tolist()
            if data_list == []:
                # logging.info(f"User No Exits Xhs_User: {data_list}")

                print("用户表不存在")
                # self.ch.close()
                return "exit"
            else:
                error_list = [{"user_id": data_list[0][0], "nickname": data_list[0][1], "xhs_account": data_list[0][2]}]
                # print(error_list)
                data_error = pd.DataFrame(error_list)
                data_error["type"] = "2"
                data_error["win"] = 'B'
                data_error = data_error.astype("str")

                # print(data_error)
                # data_delete_user = {
                #     "env": 0,
                #     "biztype": 1,
                #     "xhs_account": self.user_id
                # }
                #
                # delete_account_url = "http://127.0.0.1:5006/xhs/unbind/DeleteUser"
                #
                # delete_account_data = requests.post(delete_account_url, json=data_delete_user)
                # print(delete_account_data.text)
                pd.io.sql.to_sql(data_error, 'xhs_user_error_login', self.mysql_config(), schema='live_data',
                                 if_exists='append', index=False)
                pd.io.sql.to_sql(data_error, 'xhs_user_error_login_beifen', self.mysql_config(), schema='live_data',
                                 if_exists='append', index=False)

                return "exit"
        else:
            # logging.info(f"Current User No Login: {self.user_id}")

            self.redis_conn.set('xhs_qrcode:{}'.format(self.user_id), 'no_login')
            self.redis_conn.expire('xhs_qrcode:{}'.format(self.user_id), 300)

            print("不用登录******")
        """默认登录的情况下 返回首页抓取数据"""
        time.sleep(3)
        # self.ch.get('https://creator.xiaohongshu.com/creator/home')
        # self.ch.ele('xpath://*[@id="content-area"]/main/div[1]/div/div[2]/div/div[1]/div/div').click()
        # self.ch.ele('xpath://*[@id="content-area"]/main/div[1]/div/div[2]/div/div[1]/div/div/span').click()
        time.sleep(1)
        fans_num = self.xhs_user_info_listen()
        print(fans_num)
        # fans_num = self.xhs_user_info_requests()
        print("""***开始切换近30天内汇总笔记***""")
        self.notes_switch_thirdy()

        print("""***开始获取近30天内汇总笔记***""")

        """汇总笔记模块，获取笔记近30天的汇总数据"""
        notes_thirty = self.notes_data_thirdy()

        fans_thirty = self.fans_listen_thirty(fans_num[0])
        time.sleep(2)
        hz = self.notes_man()
        print(hz, "llllllllllll")
        if hz == "no notes":
            # logging.info(f"Current User No Notes: {self.user_id}")

            notes_result_1 = {"data_desc_notes": ''}
            data_frame_1 = pd.DataFrame([notes_result_1])

            notes_result_0 = {"data_desc": ''}
            data_frame_0 = pd.DataFrame([notes_result_0])
            data_huizong = pd.concat([fans_num[1], data_frame_0, notes_thirty, fans_thirty, data_frame_1], axis=1)

            # print(data_huizong)
            data_huizong["type"] = '1'
            data_huizong["userID"] = fans_num[2]
            pd.io.sql.to_sql(data_huizong, 'xhs_user', self.mysql_config(), schema='live_data', if_exists='append',
                             index=False)
            self.ch.close()
            self.redis_conn.set('xhs_qrcode_status:{}'.format(self.user_id), "200")
            self.redis_conn.expire('xhs_qrcode_status:{}'.format(self.user_id), 300)
            return {"code": 200, "msg": "程序执行完成，数据已全部入库"}
        else:
            print("走到这里了，噢噢噢噢噢噢噢噢哦哦")
            notes_desc_info = self.notes_de()
            print("这篇顶顶顶顶顶顶顶")
            data_huizong = pd.concat([fans_num[1], hz[0], notes_thirty, fans_thirty, notes_desc_info], axis=1)
            print(data_huizong)
            data_huizong["type"] = '1'
            data_huizong["userID"] = fans_num[2]
            # logging.info(f"Xhs_User result: {data_huizong}")
            print("插入数据库")
            print(data_huizong)
            print("aasdsdsdsdsdsdsdsdsdsdsdsdsds")
            # df_reset = data_huizong.reset_index(drop=True)
            # df_reset.to_csv("test_04.csv")
            pd.io.sql.to_sql(data_huizong, 'xhs_user', self.mysql_config(), schema='live_data', if_exists='append',
                             index=False)
            print("插入数据库成功")
            self.ch.close()
            print({"code": 200, "msg": "程序执行完成，数据已全部入库"})
            return {"code": 200, "msg": "程序执行完成，数据已全部入库"}

    def delete_prod_user(self, sql):

        con = pymysql.connect(host='rm-bp17r99475xuauk63yo.mysql.rds.aliyuncs.com', port=3306, user='live_data',
                              passwd='CJMg2tsHg7sj6bo0UTKq0a4T!fPDpJ', db='live_data', charset='utf8')
        # 获取操作数据的对象 cursor
        cursor = con.cursor()
        cursor.execute(sql, self.user_id)
        # sql_result = cursor.fetchall()
        # 提交事务
        con.commit()
        # 关闭Cursor
        cursor.close()
        # 关闭链接
        con.close()
        # time.sleep(2)
        return "删除成功"

    def pymysql_update_total(self, sql, data, c_time, userID, user_id):

        con = pymysql.connect(host='rm-bp17r99475xuauk63yo.mysql.rds.aliyuncs.com', port=3306, user='live_data',
                              passwd='CJMg2tsHg7sj6bo0UTKq0a4T!fPDpJ', db='live_data', charset='utf8mb4')
        # 获取操作数据的对象 cursor
        cursor = con.cursor()
        cursor.execute(sql, (data, c_time, userID, user_id))
        # sql_result = cursor.fetchall()
        # 提交事务
        con.commit()
        # 关闭Cursor
        cursor.close()
        # 关闭链接
        con.close()

        return "用户{}值成功,当前状态值".format(self.user_id)

    def push_data(self):
        # data = pd.read_sql(
        #
        #     """
        #     select * from live_data.xhs_user as a LEFT JOIN  live_data.xhs_notes_thirty as b on a.spider_user_id= b.spider_user_id
        # left JOIN live_data.xhs_fans_thirty as c ON a.spider_user_id= c.spider_user_id where b.spider_time>=CURDATE() and c.spider_time>=CURDATE()
        #     """
        #
        #     , self.mysql_config())

        data = pd.read_sql("select * from live_data.xhs_user", self.mysql_config())

        data_list = [data.loc[i].to_dict() for i in data.index.values]

        # for i in data_list:
        #     json_data = {"spider_user_id": i["spider_user_id"], "xhs_account": i["xhs_account"], "data": i}
        #
        #     data_total = pd.DataFrame([json_data])
        #     data_total = data_total.astype("str")
        #     print(data_total)
        #     data_total["type"] = '1'
        #     pd.io.sql.to_sql(data_total, 'xhs_total_data', self.mysql_config(), schema='live_data',
        #
        #                      if_exists='append', index=False)  # aaaa = [{"data": data_list}]

        for i in data_list:
            json_data = {"userID": i["userID"], "spider_user_id": i["spider_user_id"], "xhs_account": i["xhs_account"],
                         "data": i}
            sql_cha = "select * from live_data.xhs_total_data where spider_user_id='{}' and type='{}'".format(
                i["spider_user_id"], '1')
            sql_cha_data = self.pymysql_fetchall(sql_cha)
            # print(sql_cha_data)
            if sql_cha_data == ():
                # print("走到这里了为空，**************************")
                # sql = 'update live_data.xhs_total_data set spider_user_id = %s and xhs_account = %s and data=%s and type=%s where spider_user_id=%s'
                # current_time = datetime.datetime.now()
                # current_time = str(current_time)[:-7]
                # self.pymysql_update_total(sql, i["spider_user_id"],i["xhs_account"],str(i),'0',i["spider_user_id"])
                data_total = pd.DataFrame([json_data])
                data_total = data_total.astype("str")
                # print(data_total)
                data_total["type"] = '1'
                # data_total["userID"] = i["userID"]
                pd.io.sql.to_sql(data_total, 'xhs_total_data', self.mysql_config(), schema='live_data',
                                 if_exists='append', index=False)  # aaaa = [{"data": data_list}]
            else:
                # print("走到这里了，不为空","********************************************")
                # print("更改数据")
                spider_user_id = i["spider_user_id"]
                iii = str(i)
                # print(iii)
                # if i["spider_user_id"] == self.user_id:
                #     print("走到这了")
                # logging.info(f"Current User xhs_total_data Data: {i}")
                current_time = datetime.datetime.now()

                sql = 'update live_data.xhs_total_data set data = %s , time=%s,userID=%s where spider_user_id=%s and type="1"'
                self.pymysql_update_total(sql, iii, current_time, i["userID"], spider_user_id)

        # self.push_data_xhs()

        return "推送完成"

    def xhs_main_app(self):
        print(self.user_id)
        console_info = self.xhs_huizong()
        if console_info == "exit":
            # self.ch.close()
            # logging.info(f"Current User No PUsh Data: {self.user_id}")

            print("不推送数据")
        else:

            self.push_data()
        return console_info


def pymysql_fetchall(sql):
    con = pymysql.connect(host='rm-bp17r99475xuauk63yo.mysql.rds.aliyuncs.com', port=3306, user='live_data',
                          passwd='CJMg2tsHg7sj6bo0UTKq0a4T!fPDpJ', db='live_data', charset='utf8')
    # 获取操作数据的对象 cursor
    cursor = con.cursor()
    cursor.execute(sql)
    sql_result = cursor.fetchall()
    # 提交事务
    con.commit()
    # 关闭Cursor
    cursor.close()
    # 关闭链接
    con.close()

    return sql_result


def mysql_config11():
    user = 'live_data'
    user = 'live_data'
    password = quote_plus('CJMg2tsHg7sj6bo0UTKq0a4T!fPDpJ')
    # passwd ='merchantsasd123!@%&'
    host = 'rm-bp17r99475xuauk63yo.mysql.rds.aliyuncs.com'
    # port1 ='3306'
    dbname2 = 'live_data'
    engine2 = create_engine(f"mysql+pymysql://{user}:{password}@{host}:3306/{dbname2}?charset=utf8mb4")
    return engine2


def push_data_xhs():
    data = {
        "env": 2
    }
    url = "http://127.0.0.1:8085/tencent/python/pushXhsRpaData"
    r = requests.post(url, json=data)
    r_status = r.text
    print(r.text)
    # logging.info(f"XHS Total Data Push Status: {r_status}")

    return r.text


def error_push():
    data = {
        "env": 2,

        "secretKey": "Nqfep27LYY3ESEldvALHGnv6Ds56Cs4k"
    }
    # url = "url = "http://127.0.0.1:8085/tencent/python/pushXhsRpaData""
    url = "http://127.0.0.1:8085/tencent/python/xhs/user/exitLoginUser"
    r = requests.post(url, json=data)
    print(r.text)
    r_error_status = r.text
    # logging.info(f"XHS Error Login Push Status: {r_error_status}")

    return r.text


def read_xhs_spider_config():
    print("****任务正在进行中****")
    # truncate_errorlogin_sql = "truncate table live_data.xhs_user_error_login"
    # print("用户登录过期表清空成功",pymysql_fetchall(truncate_errorlogin_sql))

    # truncate_table = pd.read_sql("truncate table live_data.xhs_user_error_login",mysql_config11())
    start_time = time.time()
    # sql = "select spider_user_id from live_data.xhs_user_info"
    # sql_count = "select count(*) from live_data.xhs_user_info"

    sql = "select user_id from live_data.xhs_by_day_error where win='B'"
    sql_count = "select count(*) from live_data.xhs_by_day_error where win='B'"
    # sql = "select user_id from live_data.xhs_by_day_error where win='B'"
    # sql_count = "select count(*) from live_data.xhs_by_day_error where win='B'"

    # sql = """

    # select * from live_data.xhs_spider_config where spider_user_id in ('132b1d63bc2d4e0f864d0a214dc73584', 'ad8eef8018d94dda994953be21838c09', '9c5fff781fec46fea727371738f03d88', '6d0c5770ede547ac9d2013985fc1a92c', '4b8bc3edab0e4c51902a48f8551feb60', '6f105dc57d9d4aebac4fa0524ffe1faa', '0a71388a6ced41208f96b0677f455a37', 'bcaa27f727bb4f5db5b19fa8e7449cee', '2d41ce6c75834a7788eba9ab4569961b', 'eed1a364e7214b278823c6323af32813', 'd93f98b1c8fc479da36bec87d8e59c3d', '6b00648806244153ba229299311994da', 'd3e990352c434106877b0462a023225a', 'dad729cf622b4763a57607c673401c4f', 'a28a32ebfa164fcfa6bbce902eb8f4d3', 'ef8957ba41fe49dfb2e4ae6fe072ea38', '5797807808504c1bab2549d4338b5292', '47bdba0bce1b48b28ef49c354a2abaff', 'a5d8036a4eb242d5ac266fe9606b16de', '1fdbe62c4fd2415bbcdeb9a0bcb12394', 'e4cf2fe2592f4f1f8df72e4a4c6aef04', 'ef8941fe49dfb2e4ae6fe072ea38', '905c90a0029546dc841e7655a795c563', 'eeaeedae565b4ca6baf548aef6cdcb79', '7db7334dd92e4898992b5947b0852a47', '0f18cd1c180c4b61a6c94039626d197e', 'ef894312a38', '2166ced0a1f44515aa6454f7d3a2da6f', 'dba9c5c9f72b404cbab4a4999507d2c6', '9efed44c1f4f4fdc945c34ed608650e5', '3a76ba717a0d499d8f7a0985b9002cad', 'ad5c9545e57d4d5e8cf3bb0bffffff00', '4900381a887c44a59a9f5987e057bd58', '6e619f0ea2804b22b40be645bf54096a', '6027f0652f154f28a4cde5b14706e280', 'b9eefab7f2a64bd28cad2f9b87e17d95', 'df5b37dc4ab84d3f924f7ed4d5950c82', 'ba42d26170db4adc91b4e8d9c97a3ff1', 'fef91edfbca741f9baa208863201b29f', '0f4707b0045e438fa6bafc7b602dfb28', '0d16db70749446948450b1caa3668921', '90f850e766d14c2f88da62d2d4d42218', 'dcc5089f607747d38c4d73c7b61b2b19', 'b739ab673088430eb34e01b68850e76e', 'eec6129ca31e408694999fb5c8741762', '13f253a32863466a916935f199fa0a29', 'e7826a9683f04afb8f6d6e74a4e579aa', '25ca0bab08ff43bebfe452b64c25fff3', 'a44341d4201f48fdb5d4e4182f3fcd93', '4b2bc98609974098b545e98390320b78', 'e6dd00d73086479190a1d92fa6e489c7', 'abf346f786f44498a7caf636ed93e666', 'b799232a18db4c9ba2085602e93183b5', '7e56d6fa33fd4bfc908bdeba5c64e80c', 'f07cf4fe6e504b8a8c6120f9e4be463f', '6b4ad25a5d64452d8ec94d251b181e25', 'f67d84da292c4c6fa3a51b18b794e705', 'a973ce2acd404bf490ed267338095bae', '2340d9f0503a4904b5eefa944991a3e9', '1ecc0d2602ec4ba9810c654a2378e72e', '23aecbfb1ac84fc6acac5e2af7aa1665', 'b242840924674382b348b18f1cc87b23', 'd25693e0e47e4990be0884fc8cd8914b', 'a898ae04044d4d4c83b0560dcb38256e', 'f6f4679ac3e842b69e4502d796e11b2a', 'fc43210dc9fd4ec8ab2e4f5e3172fafd', 'cea424fb960f4f899b91fbd3a7902489', '1e19ffb886504067abf11297747d8a9f', '49ddb357936347079881febb72f08377', '1a05c867a077482f822193c407d45684', 'e560ad2b29d04a438940967c4182e96c', '7c52bae5999b4f5d8cf539b4497f6d5a', '1c52b38e1c3947d4812a01b97568f2a7', '5b7983fdc3b54341983202630722ab43', 'b38fbf52f95444169afee104df509294', 'bea5ac3896fa4bc88bc72b5f5a73f43f', 'b2fa78a21d9a4235851a8afc2813a9ff', '185ccff50a0a40e7b759cf23e4fc3467', 'd026af8c6e2d47e8a267e8e215d5d5bb', '375427c7c3ee48e2bbda4fcee48a9f69', '43732ba5577a48b1988265602a0223c1', '95413ddaad6c43599b5d38a470cff8e9', '515d2bd26a2e471cbc361958b0f1eea7')
    # """
    xhs_config = pymysql_fetchall(sql)
    sql_count_b = pymysql_fetchall(sql_count)
    sql_count_b = int(sql_count_b[0][0])
    from tqdm import tqdm
    success_user_list_com = []
    error_user_list_com = []
    for a, i in zip(tqdm(range(sql_count_b)), xhs_config):
        try:
            time.sleep(1)
            xhs = XHS_XPIDER(user_id=i[0])
            aa = xhs.xhs_main_app()
            print(aa)

            success_user_list_com.append(i[0])
        except:
            error_user_list_com.append(i[0])
            error = [{"user_id": i[0]}]
            data_error = pd.DataFrame(error)
            data_error["win"] = "B"
            # logging.info(f"Current User Task Error: {i[0]}")

            pd.io.sql.to_sql(data_error, 'xhs_by_day_error_again', mysql_config11(), schema='live_data',
                             if_exists='append', index=False)

    # logging.info(f"XHS SUCCESS LIST USER: {success_user_list_com}")
    # logging.info(f"XHS SUCCESS LIST USER LEN: {len(success_user_list_com)}")
    # logging.info(f"XHS ERROR LIST USER: {error_user_list_com}")
    # logging.info(f"XHS ERROR LIST USER LEN: {len(error_user_list_com)}")
    error_login_frame = pd.read_sql("select * from live_data.xhs_user_error_login", mysql_config11())
    error_login_list = [error_login_frame.loc[i].to_dict() for i in error_login_frame.index.values]
    # logging.info(f"Login Exit User: {error_login_list}")

    print(push_data_xhs())
    print(error_push())
    print(datetime.datetime.now())
    return "小红书每日计算完成"


# "6ad38fc364ee48a6aa3b9a6a238be271"
# print(read_xhs_spider_config())
# read_xhs_spider_config


#