import json
import time
import requests
from time import sleep
from core.api_client import ApiClient as AC

"""
    通过api请求百度的各类接口
    传入cookie
    获取文章列表、获取账号信息、删除未通过的文章。。。。。
"""


class BaiduCuttlefish:

    def __init__(self, cookie_str="[]",proxy=None):#,**kwargs
        # print(f"插入的cookie:{cookie_str}")
        self.cookie_str = cookie_str
        if proxy:
            self.proxies = {'http': proxy, 'https': proxy}
        else:
            self.proxies = None
        self.base_url = "https://cuttlefish.baidu.com"
        # self.kwargs = kwargs
        self.wkts = int(time.time()*1000)
        self.user_info = {}
        self.headers = self._get_headers()
        self.article_token = ""

    def _get_headers(self):
        cookies = json.loads(self.cookie_str)
        cookie_h = '; '.join([f"{cookie['name']}={cookie['value']}" for cookie in cookies])
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
            'Cookie': cookie_h,
            "Accept": "application/json, text/plain, */*",
            'Referer': 'https://cuttlefish.baidu.com/'
        }
        return headers

    # 获取所有基本信息---店铺id及文档数量等，还需要研究
    def get_all_info(self):
        self.get_user_baseinfo()
        sleep(1)
        self.get_shop_income()
        sleep(1)
        self.get_task_status()
        sleep(1)
        self.get_is_done()
        # sleep(1)
        # self.get_is_task_open()
        sleep(1)
        self.get_wenku_info()
        sleep(1)
        self.get_is_task_finish()

    def get_is_task_open(self):
        url = self.base_url + "/user/interface/getquerypacklist?cid=99&pn=0&rn=20&word=&tab=1"
        sleep(0.5)
        response = requests.get(url, headers=self.headers,proxies=self.proxies)
        res = response.json()

        try:
            print(f"获取到{len(res['data']['queryList'])}个任务")
            if len(res['data']['queryList']):
                self.user_info["is_task_able"] = 1
            else:
                self.user_info['is_task_able'] = 0
        except Exception as e:
            print(f"error:{e}")

    def get_is_task_finish(self):
        #原力计划是否完成
        url = self.base_url + "/doc/upload/uploaddaylimit?isMajorTask=1"
        sleep(0.5)
        response = requests.get(url, headers=self.headers,proxies=self.proxies)
        res = response.json()
        # print(res)
        try:
            if res['data']['day_limit']:
                self.user_info["is_task_finish"] = 1
            else:
                self.user_info['is_task_finish'] = 0
        except Exception as e:
            print(f"error:{e}")

        url = self.base_url + "/doc/upload/uploaddaylimit"
        sleep(0.5)
        response = requests.get(url, headers=self.headers,proxies=self.proxies)
        res = response.json()
        # print(response.text)
        try:
            if res['data']['day_limit']:
                self.user_info["is_vipdoc_finish"] = 1
            else:
                self.user_info['is_vipdoc_finish'] = 0
        except Exception as e:
            print(f"error:{e}")

    def get_is_task_finish_and_return(self):
        #原力计划是否完成
        url = self.base_url + "/doc/upload/uploaddaylimit?isMajorTask=1"
        sleep(0.5)
        response = requests.get(url, headers=self.headers,proxies=self.proxies)
        res = response.json()
        try:
            if res['data']['day_limit']:
                self.user_info["is_task_finish"] = 1
                return True
            else:
                self.user_info['is_task_finish'] = 0
                return False
        except Exception as e:
            print(f"error:{e}")
            return False

    def get_which_task_page_able_return(self):
        #那一页的任务刷新了可用
        url = "https://cuttlefish.baidu.com/user/interface/getquerypacklist"
        cid_values = [99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
        for cid in cid_values:
            params = {
                'cid': cid,
                'pn': 1,
                'rn': 20,  # 每页数量固定为20
                'word': '',
                'tab': 1
            }
            response = requests.get(url, headers=self.headers, params=params)
            res = response.json()
            try:
                if res['data']['queryList']:
                    print(cid)
                    return cid
            except Exception as e:
                print(f"获取任务列表error:{e}")
            sleep(6)
        return 100

    #用户名、uid信息
    def get_user_baseinfo(self):
        url = self.base_url+"/ndecommtob/fetchmain?_wkts_="+ str(self.wkts)
        response = requests.get(url, headers=self.headers,proxies=self.proxies)
        sleep(0.5)
        if response.status_code == 200:
            res = response.json()
            # print(res)
            # # if res["status"]["code"] != 0:
            # #     print(f"百度API接口请求错误（{res["status"]["code"]}）：{res["status"]["msg"]}")
            # #     exit()
            try:
                self.user_info["uid"] = res['pageData']['upUserInfo']['data']['uid']
                self.user_info["uname"] = res['pageData']['upUserInfo']['data']['uname']
                self.user_info["ticket_num"] = res['pageData']['vipInfo']['proDownTicketTotal']#下载特权数量
            except Exception as e:
                print(f"获取用户信息出现错误{e},{res}")
        else:
            print(f"请求失败： {response.status_code}，错误信息：{response.text}")

    #店铺收入总额
    def get_shop_income(self):
        url = self.base_url + "/user/shopfufei/shopincome"
        sleep(0.5)
        res = self.get_requests(url)
        if not res['code']:
            self.user_info["income"] = res['data']['data']['total']
        else:
            print(f"-----请求{url}失败；{res['data']}-----")


    #查询4个状态
    def get_is_done(self):
        url = self.base_url + "/user/interface/gettargetruledata"
        sleep(0.5)
        res = self.get_requests(url)
        if not res['code']:
            res = res['data']
            self.user_info["one_isdone"] = res['data']['ruleProcess']['onlinedCount']['isDone']
            self.user_info["two_isdone"] = res['data']['ruleProcess']['passPercent']['isDone']
            self.user_info["three_isdone"] = res['data']['ruleProcess']['shopOpenDays']['isDone']
            self.user_info["four_isdone"] = res['data']['ruleProcess']['vipDlCount']['isDone']
        else:
            print(f"-----请求{url}失败；{res['data']}-----")


   # 查询任务完成情况
    def get_task_status(self):
        url = self.base_url + "/user/interface/getquerypacklist?cid=99&pn=0&rn=20&word=&tab=1"
        sleep(0.5)
        res = self.get_requests(url)
        if not res['code']:
            self.user_info["task_days"] = res['data']['data']['uploadDayNum']
            self.user_info["task_finish_num"] = res['data']['data']['userFinishTaskNum']
        else:
            print(f"-----请求{url}失败；{res['data']}-----")

    # 文库-个人账号获取的信息
    def get_wenku_info(self):
        url = "https://wenku.baidu.com/user/interface/myinfo"
        sleep(0.5)
        res = self.get_requests(url)
        if not res['code']:
            self.user_info["be_download_num"] = res['data']['data']['docBeDownload']
            self.user_info["doc_num"] = res['data']['data']['docNum']['docTotal']
        else:
            print(f"-----请求{url}失败；{res['data']}-----")

    # 查询文章清单
    def get_article(self,total_page= 20):
        all_docs = []
        page_size = 10
        page_num = 0
        while True:
            # print(f"开始获取=====第{page_num + 1}页=====数据")
            if page_num >= total_page:
                break
            url = f'https://cuttlefish.baidu.com/nshop/doc/getlist?sub_tab=1&pn={page_num}&rn={page_size}&query=&doc_id_str=&time_range=&buyout_show_type=1&needDayUploadUserCount=1'
            res = self.get_requests(url)
            sleep(3)
            if not res['code']:
                data = res['data']
                print(f"获取到=====第{page_num + 1}页=====数据")
                if not data or "data" not in data or "doc_list" not in data["data"]:
                    break
                if not self.article_token:
                    self.article_token = data['data']['token']
                doc_list = data["data"]["doc_list"]
                all_docs.extend(doc_list)
                # print(f"第{page_num+1}页数据==========长度{len(doc_list)}")
                if len(doc_list) < page_size:
                    break
                if len(doc_list) == 0:
                    break
            else:
                print(f"-----请求{url}失败；{res['data']}-----")
            page_num += 1

        return all_docs

    # 查询指定task页，获取所有数据
    def check_task_is_available(self, task):
        url = self.base_url + f"/user/interface/getquerypacklist?cid={task['article_type']}&pn={task['page_number']}&rn=20&word=&tab=1"
        response = requests.get(url, headers=self.headers,proxies=self.proxies)
        res = response.json()
        try:
            # print(res)
            #首先判断，res['data']是否有['queryList']存在
            if res['data']['queryList']:
                if res['data']['queryList'][int(task['page_sort_number']) - 1]['status'] == 1 and res['data']['queryList'][int(task['page_sort_number']) - 1]['queryId'] == task['query_id']:#1-未完成
                    return True
                else:
                    # print(f"-----任务{task['task_name']}已完成，跳过-----")
                    print(f"-----任务{task['task_name']}被隐藏，跳过-----")
                    return False
        except Exception as e:
            print(f"出现错误:{e}")
            return False

    # 删除未完成任务

    def del_fail_article(self, total_page=3,is_update=False,ac=None):
        total_stats = {1: 0, 2: 0, 3: 0, 4: 0}
        all_docs = self.get_article(total_page=total_page)
        for doc in all_docs:
            doc_status = doc.get("doc_status")
            if doc_status in total_stats:
                total_stats[doc_status] += 1
            if doc_status == 4:
                doc_id = doc.get("doc_id")
                url1 = f'https://cuttlefish.baidu.com/user/submit/newdocdelete?token={self.article_token}&new_token={self.article_token}&fold_id_str=0&doc_id_str={doc_id}&skip_fold_validate=1'
                if is_update:
                    # print(doc,type(doc))
                    # print(f"删除失败的文章{doc.get('title')}")
                    print(ac.get_data(endpoint=f"/api/wenku/titles/fail_num_add?article_name={doc.get('title')}&reason={str(doc.get('reason_new'))}"))
                print(f"开始删除:{doc['title']}")
                self.get_requests(url=url1,type=1)
                # if not res['code']:
                #     print(f"-----删除《{doc['title']}》成功！-----")
                # else:
                #     print(f"-----删除《{doc['title']}》失败；{res['data']}-----")
                sleep(5)
        return total_stats

    #所有的请求封装一下
    def get_requests(self,url,type=0,max_retries=3):
        result = {
            "code": 1,
            "data": ""
        }
        last_error = ""
        for retry in range(max_retries + 1):
            try:
                response = requests.get(url, headers=self.headers,proxies=self.proxies)
                if response.status_code == 200:
                    res = response.json()
                    if type:
                        return res
                    if res["status"]["code"] == 0:
                        result['code'] = 0
                        result['data'] = res
                        return result
                    else:
                        result['data']=f"百度API接口请求错误（{res["status"]["code"]}）：{res["status"]["msg"]}"
                        return result
                else:
                    print(f"本次请求{url}失败")
                    result['data']=f"本次请求{url}失败，（{response.status_code}）：{response.text}"
            except Exception as e:
                print(f"请求错误：{e}")
                result['data']=f"请求错误：{e}"
                last_error = str(e)
                # 如果不是最后一次尝试，则休眠后重试
            if retry < max_retries:
                time.sleep(3)  # 简单示例中等待1秒后重试
                # 所有尝试均失败后返回最终错误信息
            result['data'] = f"所有请求均失败，最后一次请求错误：{last_error}"
            return result

if __name__ == "__main__":
    # cookie_str = """
    # [{"domain": ".baidu.com", "expiry": 1749943597, "httpOnly": false, "name": "H_PS_PSSID", "path": "/", "sameSite": "Lax", "secure": false, "value": "60334_60297_60352"}, {"domain": ".baidu.com", "httpOnly": true, "name": "BDUSS", "path": "/", "sameSite": "Lax", "secure": false, "value": "VVRbkpQMjI3YlRUdDI2bWpvOTY3bk5tdFlPYm85OEdwT0NRUGtQVjVGV3JYcFJtRUFBQUFBJCQAAAAAAAAAAAEAAAC1qrAkufqx6rrDyMsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKvRbGar0WxmR"}, {"domain": ".baidu.com", "expiry": 1749943571, "httpOnly": false, "name": "ZFY", "path": "/", "sameSite": "None", "secure": true, "value": "2WyCPJsCmYHOkT6PVr0LlBTStpAeE9qPeuXnjZ0ZmT4:C"}, {"domain": ".baidu.com", "expiry": 1718493971, "httpOnly": false, "name": "BA_HECTOR", "path": "/", "sameSite": "Lax", "secure": false, "value": "a98400852l0la1c00ha4ah84bkpl8a1j6pkcj1v"}, {"domain": ".baidu.com", "expiry": 1749943570, "httpOnly": false, "name": "BAIDUID_BFESS", "path": "/", "sameSite": "None", "secure": true, "value": "843C30B8C789E6F534DCF5FADB7C835B:FG=1"}, {"domain": ".baidu.com", "httpOnly": true, "name": "BDUSS_BFESS", "path": "/", "sameSite": "None", "secure": true, "value": "VVRbkpQMjI3YlRUdDI2bWpvOTY3bk5tdFlPYm85OEdwT0NRUGtQVjVGV3JYcFJtRUFBQUFBJCQAAAAAAAAAAAEAAAC1qrAkufqx6rrDyMsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKvRbGar0WxmR"}, {"domain": "www.baidu.com", "httpOnly": false, "name": "BD_HOME", "path": "/", "sameSite": "Lax", "secure": false, "value": "1"}, {"domain": "www.baidu.com", "expiry": 1719271598, "httpOnly": false, "name": "BD_UPN", "path": "/", "sameSite": "Lax", "secure": false, "value": "12314753"}, {"domain": ".baidu.com", "expiry": 1749943570, "httpOnly": false, "name": "BAIDUID", "path": "/", "sameSite": "Lax", "secure": false, "value": "843C30B8C789E6F534DCF5FADB7C835B:FG=1"}, {"domain": ".baidu.com", "expiry": 1752967570, "httpOnly": false, "name": "PSTM", "path": "/", "sameSite": "Lax", "secure": false, "value": "1718407569"}, {"domain": ".baidu.com", "expiry": 1752967570, "httpOnly": false, "name": "BIDUPSID", "path": "/", "sameSite": "Lax", "secure": false, "value": "843C30B8C789E6F57BA55E8B36CEA48B"}]
    # """
    cookie_str = """
    [{"domain": ".baidu.com", "expiry": 1750062936, "httpOnly": false, "name": "H_PS_PSSID", "path": "/", "sameSite": "Lax", "secure": false, "value": "60237_60278_60297_60327_60320_60339"}, {"domain": ".baidu.com", "httpOnly": true, "name": "BDUSS", "path": "/", "sameSite": "Lax", "secure": false, "value": "JqcDBSamx5aDBXRHRvV3FSN294QTZBTmdKNU51fn5yfjAxZkxMMGVRUFhNSlptRUFBQUFBJCQAAAAAAAAAAAEAAACkNqgPw9XA7LXE0dvA4QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANejbmbXo25mVU"}, {"domain": ".baidu.com", "expiry": 1750062738, "httpOnly": false, "name": "ZFY", "path": "/", "sameSite": "None", "secure": true, "value": ":Bi4C:ALVJj93rlDlTQdXSegLm8oUSBiZnGwqvW0sHsvk:C"}, {"domain": ".baidu.com", "expiry": 1718613138, "httpOnly": false, "name": "BA_HECTOR", "path": "/", "sameSite": "Lax", "secure": false, "value": "8o2g240021ah0heh0g2105a080vp3r1j6t8oj1v"}, {"domain": ".baidu.com", "expiry": 1750062738, "httpOnly": false, "name": "BAIDUID_BFESS", "path": "/", "sameSite": "None", "secure": true, "value": "26F11FA9C17ED6157998718A4B02FA0E:FG=1"}, {"domain": ".baidu.com", "httpOnly": true, "name": "BDUSS_BFESS", "path": "/", "sameSite": "None", "secure": true, "value": "JqcDBSamx5aDBXRHRvV3FSN294QTZBTmdKNU51fn5yfjAxZkxMMGVRUFhNSlptRUFBQUFBJCQAAAAAAAAAAAEAAACkNqgPw9XA7LXE0dvA4QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANejbmbXo25mVU"}, {"domain": "www.baidu.com", "httpOnly": false, "name": "BD_HOME", "path": "/", "sameSite": "Lax", "secure": false, "value": "1"}, {"domain": ".baidu.com", "expiry": 1750062738, "httpOnly": false, "name": "BAIDUID", "path": "/", "sameSite": "Lax", "secure": false, "value": "26F11FA9C17ED6157998718A4B02FA0E:FG=1"}, {"domain": "www.baidu.com", "expiry": 1719390936, "httpOnly": false, "name": "BD_UPN", "path": "/", "sameSite": "Lax", "secure": false, "value": "12314753"}, {"domain": ".baidu.com", "expiry": 1753086738, "httpOnly": false, "name": "PSTM", "path": "/", "sameSite": "Lax", "secure": false, "value": "1718526738"}, {"domain": ".baidu.com", "expiry": 1753086738, "httpOnly": false, "name": "BIDUPSID", "path": "/", "sameSite": "Lax", "secure": false, "value": "26F11FA9C17ED615562C8CCE0C8CC4FD"}]
    """
    bc = BaiduCuttlefish(cookie_str=cookie_str)
    # print(bc.wkts)
    # bc.get_all_info()
    # print(bc.user_info)
    print(bc.get_article(total_page=1))
    # print(bc.del_fail_article(total_page=4))