# 导入方法函数
import modules.mysql_packages as st  # 数据库的相关函数集
import modules.Flybook_packages as fb  # 飞书的相关函数集
import modules.host_packsges as hp
import modules.spider_packages as sp
import modules.mkzt_packages as mp
# 导入库
import datetime
import schedule
import time
import requests
import json

# 爬虫程序
def spider_data():
    # 获取当前的日期、时间、时间戳
    now_date = datetime.datetime.now().strftime('%Y-%m-%d')
    now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    timestamp = int(time.time())

    print(f"【{now_time}】开始执行程序！！！\n")

    # 定义各主体的名称、ID
    bd_id = "7504188565971468305"
    bd_name = "爆单而已"
    qjd_id = "7418397321836331024"
    qjd_name = "Wonder Island LLC"
    mkzt_id = "7510229531278622721"
    mkzt_name = "米壳主账号WindX"

    #  连接数据库
    connection = st.create_database_connection("47.107.69.10", "root", "xxyo7qf1fn", "bksy_company",port=4728)


    # 从数据库中获取爆单、奇迹岛主体的cookie
    bd_cookie_dict = hp.get_cookie_dict(connection, "爆单")
    qjd_cookie_dict = hp.get_cookie_dict(connection, "奇迹岛")
    # 根据接口规则设置爆单、奇迹岛 url
    bd_url = f"https://business.tiktok.com/api/v3/bm/statistics/op/analytic/data?org_id={bd_id}&attr_source=&source_biz_id=&attr_type=web"
    qjd_url = f"https://business.tiktok.com/api/v3/bm/statistics/op/analytic/data?org_id={qjd_id}&attr_source=&source_biz_id=&attr_type=web"
    # 根据规则改写爆单、奇迹岛 headers
    bd_headers = {'accept': 'application/json, text/plain, */*',
                  'accept-language': 'zh-CN,zh;q=0.9',
                  'content-type': 'application/json',
                  'origin': 'https://business.tiktok.com',
                  'referer': f'https://business.tiktok.com/manage/reporting?org_id=' + bd_id + f'&select_track=1&tab=ads&overviewFilter=' + '{"time":{"source":["' + now_date + f'"]' + "}" + "}",
                  'sec-ch-ua': '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
                  'sec-ch-ua-mobile': '?0',
                  'sec-ch-ua-platform': '"Windows"',
                  'sec-fetch-dest': 'empty',
                  'sec-fetch-mode': 'cors',
                  'sec-fetch-site': 'same-origin',
                  'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36',
                  'x-csrftoken': bd_cookie_dict["csrftoken"]
                  }

    qjd_headers = {
        "accept": "application/json, text/plain, */*",
        "accept-language": "zh-CN,zh;q=0.9",
        "content-type": "application/json",
        "origin": "https://business.tiktok.com",
        "priority": "u=1, i",
        'referer': f'https://business.tiktok.com/manage/reporting?org_id=' + qjd_id + f'&select_track=1&tab=ads&overviewFilter=' + '{"time":{"source":["' + now_date + f'"]' + "}" + "}",
        "sec-ch-ua": '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": '"Windows"',
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-origin",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
        "x-csrftoken": qjd_cookie_dict["csrftoken"]
    }

    # 改写爆单、奇迹岛body
    bd_body = hp.get_bd_body(now_date, 1, bd_id, bd_name)
    qjd_body = hp.get_bd_body(now_date, 1, qjd_id, qjd_name)

    # 开始执行请求，获取数据

    # 获取爆单主体账户信息
    print(f"{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}  开始发送请求获取【爆单而已】账户数据......\n")
    bd_data_list = []
    # 获取首页数据，提取出首页数据、统计数据、总页数
    bd_response = requests.post(bd_url, headers=bd_headers, cookies=bd_cookie_dict, json=bd_body)
    total_page = json.loads(bd_response.text)["data"]["pagination"]["page_count"] # 总页数
    data_table_1 = json.loads(bd_response.text)["data"]["table"] # 第一页数据
    statistics_data = json.loads(bd_response.text)["data"]["statistics"] #  统计数据
    # 将首页数据转化为字典列表，并依次获取剩余页的数据
    data_list = [{"ID":f"BD_{row['advertiser_id']}{timestamp}","账户ID": row["advertiser_id"],
                  "账户名称": row["advertiser_name"],
                  "主体": bd_name,
                  "更新时间": now_time,
                  "总消耗": row["stat_cost"],
                  "转化数": row["time_attr_convert_cnt"],
                  "CPA_转化成本": row["time_attr_conversion_cost"],
                  "ROSE_支付完成广告支出回报率_站内": row["time_attr_onsite_shopping_roas"],
                  "CTR":f'{row["ctr"]}',
                  "CVR":f'{row["time_attr_conversion_rate"]}'
                  } for index,row in enumerate(data_table_1)]
    bd_data_list += data_list
    # 如果总页数大于1，则依次获取剩余页的数据
    if total_page > 1:
        for page_int in range(2, total_page + 1):
            bd_body = hp.get_bd_body(now_date, page_int, bd_id, bd_name)
            bd_response = ""
            for i in range(10):
                try:
                    bd_response = requests.post(bd_url, headers=bd_headers, cookies=bd_cookie_dict, json=bd_body)
                    break
                except Exception as e:
                    print(f"第【{i+1}】次获取爆单数据第【{page_int}】页数据失败！错误信息为：【{e}】\n")
            data_table = json.loads(bd_response.text)["data"]["table"]
            data_list = [{"ID":f"BD_{row['advertiser_id']}{timestamp}","账户ID": row["advertiser_id"],
                      "账户名称": row["advertiser_name"],
                      "主体": bd_name,
                      "更新时间": now_time,
                      "总消耗": row["stat_cost"],
                      "转化数": row["time_attr_convert_cnt"],
                      "CPA_转化成本": row["time_attr_conversion_cost"],
                      "ROSE_支付完成广告支出回报率_站内": row["time_attr_onsite_shopping_roas"],
                      "CTR":f'{row["ctr"]}',
                      "CVR":f'{row["time_attr_conversion_rate"]}'
                      } for index,row in enumerate(data_table)]
            bd_data_list += data_list
    print(f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】 爆单主体 数据获取完成，总共获取到【{len(bd_data_list)}】条数据！\n")
    # 提取出爆单主体统计数据
    bd_count_list = []
    zxh_count = statistics_data["stat_cost"] # 总消耗
    zhs_count = statistics_data["time_attr_convert_cnt"] # 转化数
    cpa_count = statistics_data["time_attr_conversion_cost"] # cpa
    rose_count = statistics_data["time_attr_onsite_shopping_roas"] # rose
    ctr_count = statistics_data["ctr"] # ctr
    cvr_count = statistics_data["time_attr_conversion_rate"] # cvr
    update_time = bd_data_list[0]["更新时间"]
    bd_count_list.append({"更新时间":update_time,"主体":"爆单而已","总消耗":zxh_count,"转化数":zhs_count,"CPA_转化成本":cpa_count,"ROSE_支付完成广告支出回报率_站内":rose_count,"CTR":ctr_count,"CVR":cvr_count})
    # 将账户数据、统计数据分别写入数据库
    st.sql_insert(connection,"TK_整体据统计",bd_count_list)
    st.sql_insert(connection, "爆单data", bd_data_list)

    # 获取奇迹岛主体账户信息
    print(f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】  正在发送请求获取【奇迹岛】账户数据......\n")
    qjd_data_list = []
    # 获取首页数据，提取出首页数据、统计数据、总页数
    qjd_response = requests.post(qjd_url, headers=qjd_headers, cookies=qjd_cookie_dict, json=qjd_body)
    total_page = json.loads(qjd_response.text)["data"]["pagination"]["page_count"] # 总页数
    data_table_1 = json.loads(qjd_response.text)["data"]["table"] # 第一页数据
    statistics_data = json.loads(qjd_response.text)["data"]["statistics"] # 统计数据
    # 将首页数据转化为字典列表，并依次获取剩余页的数据
    data_list = [{"ID":f"BD_{row['advertiser_id']}{timestamp}","账户ID": row["advertiser_id"],
                  "账户名称": row["advertiser_name"],
                  "主体": "奇迹岛",
                  "更新时间": now_time,
                  "总消耗": row["stat_cost"],
                  "转化数": row["time_attr_convert_cnt"],
                  "CPA_转化成本": row["time_attr_conversion_cost"],
                  "ROSE_支付完成广告支出回报率_站内": row["time_attr_onsite_shopping_roas"],
                  "CTR":f'{row["ctr"]}',
                  "CVR":f'{row["time_attr_conversion_rate"]}'
                  } for index,row in enumerate(data_table_1)]
    qjd_data_list += data_list
    # 如果总页数大于1，则依次获取剩余页的数据
    if total_page > 1:
        for page_int in range(2, total_page + 1):
            qjd_body = hp.get_bd_body(now_date, page_int, qjd_id, qjd_name)
            qjd_response = ""
            for i in range(10):
                try:
                    qjd_response = requests.post(qjd_url, headers=qjd_headers, cookies=qjd_cookie_dict, json=qjd_body)
                    break
                except Exception as e:
                    print(f"第【{i+1}】次获取奇迹岛数据第【{page_int}】页数据失败！错误信息为：【{e}】\n")

            data_table = json.loads(qjd_response.text)["data"]["table"]
            data_list = [{"ID":f"BD_{row['advertiser_id']}{timestamp}","账户ID": row["advertiser_id"],
                      "账户名称": row["advertiser_name"],
                      "主体": "奇迹岛",
                      "更新时间": now_time,
                      "总消耗": row["stat_cost"],
                      "转化数": row["time_attr_convert_cnt"],
                      "CPA_转化成本": row["time_attr_conversion_cost"],
                      "ROSE_支付完成广告支出回报率_站内": row["time_attr_onsite_shopping_roas"],
                      "CTR":f'{row["ctr"]}',
                      "CVR":f'{row["time_attr_conversion_rate"]}'
                      } for index,row in enumerate(data_table)]
            qjd_data_list += data_list

    print(f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】  奇迹岛 数据获取完成，总共获取到【{len(qjd_data_list)}】条数据！\n")
    qjd_count_list = []
    # 提取出奇迹岛主体统计数据
    zxh_count = statistics_data["stat_cost"]  # 总消耗
    zhs_count = statistics_data["time_attr_convert_cnt"]  # 转化数
    cpa_count = statistics_data["time_attr_conversion_cost"]  # cpa
    rose_count = statistics_data["time_attr_onsite_shopping_roas"]  # rose
    ctr_count = statistics_data["ctr"]  # ctr
    cvr_count = statistics_data["time_attr_conversion_rate"]  # cvr
    update_time = qjd_data_list[0]["更新时间"]
    qjd_count_list.append({"更新时间": update_time, "主体": "奇迹岛", "总消耗": zxh_count, "转化数": zhs_count,
                          "CPA_转化成本": cpa_count, "ROSE_支付完成广告支出回报率_站内": rose_count, "CTR": ctr_count,
                          "CVR": cvr_count})
    # 将账户数据、统计数据分别写入数据库
    st.sql_insert(connection, "TK_整体据统计", qjd_count_list)
    st.sql_insert(connection, "奇迹岛data", qjd_data_list)

    # 获取米壳主体账户信息
    print(f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】  正在发送请求获取【米壳主体】账户数据......")
    mkzt_data_list = []
    # 从数据库中获取米壳主体cookie
    mkzt_cookie_dict = hp.get_cookie_dict(connection, "米壳主体")
    # 设置url
    mkzt_url = f"https://business.tiktok.com/api/v3/bm/statistics/op/analytic/data?org_id={mkzt_id}&attr_source=&source_biz_id=&attr_type=web"
    # 设置headers
    mkzt_headers = {
        "accept": "application/json, text/plain, */*",
        "accept-language": "zh-CN,zh;q=0.9",
        "content-type": "application/json",
        "origin": "https://business.tiktok.com",
        "priority": "u=1, i",
        'referer': f'https://business.tiktok.com/manage/reporting?org_id=' + mkzt_id + f'&select_track=1&tab=ads&overviewFilter=' + '{"time":{"source":["' + now_date + f'"]' + "}" + "}",        "sec-ch-ua": '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": '"Windows"',
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-origin",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
        "x-csrftoken": mkzt_cookie_dict["csrftoken"]
    }
    # 依次爬取完米壳主体的所有账户数据
    mkzt_body = hp.get_bd_body(now_date, 1, mkzt_id, mkzt_name)
    mkzt_response = requests.post(mkzt_url, headers=mkzt_headers, cookies=mkzt_cookie_dict, json=mkzt_body)

    total_page = json.loads(mkzt_response.text)["data"]["pagination"]["page_count"]  # 总页数
    data_table_1 = json.loads(mkzt_response.text)["data"]["table"]  # 第一页数据
    statistics_data = json.loads(mkzt_response.text)["data"]["statistics"]  # 统计数据
    mkzt_id_name_list = [] # 用于记录账户ID
    # 将所有页数据转化维字典列表
    data_list = [{"ID": f"BD_{row['advertiser_id']}{timestamp}", "账户ID": row["advertiser_id"],
                  "账户名称": row["advertiser_name"],
                  "主体": "米壳主体",
                  "更新时间": now_time,
                  "总消耗": row["stat_cost"],
                  "转化数": row["time_attr_convert_cnt"],
                  "CPA_转化成本": row["time_attr_conversion_cost"],
                  "ROSE_支付完成广告支出回报率_站内": row["time_attr_onsite_shopping_roas"],
                  "CTR": f'{row["ctr"]}',
                  "CPC": f'{row["cpc"]}',
                  "展示次数": row["show_cnt"],
                  "点击量":row["click_cnt"]
                  } for index, row in enumerate(data_table_1)]
    mkzt_id_name_list += [[row["advertiser_id"],row["advertiser_name"]] for index, row in enumerate(data_table_1)]
    mkzt_data_list += data_list
    if total_page >1:
        for page_int in range(2, total_page + 1):
            mkzt_body = hp.get_bd_body(now_date, page_int, mkzt_id, mkzt_name)
            mkzt_response = ""
            for i in range(10):
                try:
                    mkzt_response = requests.post(mkzt_url, headers=mkzt_headers, cookies=mkzt_cookie_dict, json=mkzt_body)
                    break
                except Exception as e:
                    print(f"第【{i+1}】次获取米壳主体数据第【{page_int}】页数据失败！错误信息为：【{e}】\n")
                    # qjd_response = requests.post(qjd_url, headers=qjd_headers, cookies=qjd_cookie_dict, json=qjd_body)

            data_table = json.loads(mkzt_response.text)["data"]["table"]
            data_list = [{"ID": f"BD_{row['advertiser_id']}{timestamp}", "账户ID": row["advertiser_id"],
                          "账户名称": row["advertiser_name"],
                          "主体": "米壳主体",
                          "更新时间": now_time,
                          "总消耗": row["stat_cost"],
                          "转化数": row["time_attr_convert_cnt"],
                          "CPA_转化成本": row["time_attr_conversion_cost"],
                          "ROSE_支付完成广告支出回报率_站内": row["time_attr_onsite_shopping_roas"],
                          "CTR": f'{row["ctr"]}',
                          "CPC": f'{row["cpc"]}',
                          "展示次数": row["show_cnt"],
                          "点击量": row["click_cnt"]
                          } for index, row in enumerate(data_table)]
            mkzt_id_name_list += [[row["advertiser_id"],row["advertiser_name"]] for index, row in enumerate(data_table)]
            mkzt_data_list += data_list

    print(f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】  米壳主体 账号数据获取完成，总共获取到【{len(mkzt_data_list)}】个账户数据！\n")
    mkzt_count_list = []
    # 提取出米壳主体统计数据
    zxh_count = statistics_data["stat_cost"]  # 总消耗
    zhs_count = statistics_data["time_attr_convert_cnt"]  # 转化数
    cpa_count = statistics_data["time_attr_conversion_cost"]  # cpa
    rose_count = statistics_data["time_attr_onsite_shopping_roas"]  # rose
    ctr_count = statistics_data["ctr"]  # ctr
    cpc_count = statistics_data["cpc"]  # cpc
    zsl_count = statistics_data["show_cnt"]#展示次数
    djl_count = statistics_data["click_cnt"]# 点击量
    update_time = mkzt_data_list[0]["更新时间"]
    mkzt_count_list.append({"更新时间": update_time, "主体": "米壳主体", "总消耗": zxh_count, "转化数": zhs_count,
                           "CPA_转化成本": cpa_count, "ROSE_支付完成广告支出回报率_站内": rose_count, "CTR": ctr_count,
                           "CPC": cpc_count,"展示次数":zsl_count,"点击量":djl_count})
    # 将数据插入数据库
    st.sql_insert(connection, "TK_整体据统计", mkzt_count_list)
    st.sql_insert(connection, "米壳主体data", mkzt_data_list)
    # 关闭数据库连接
    connection.close()

# 主程序
def main_task():
    try:
        # 运行爬虫程序，获取到数据
        spider_data()
        print(f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】  正在梳理 爆单、奇迹岛、米壳主体 需要提醒的数据......\n")
        total_list,message_list,mkzt_total = sp.check_alter()
        print(f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】  梳理完毕!\n")
        # 推送飞书

        result = fb.send_lark_text_message("https://open.feishu.cn/open-apis/bot/v2/hook/b9d02153-4903-42d9-88a5-d83dd7926ea9","yNjtHgt8jykfnhaTj15SMg",f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】发送tk_商务中心数据：")
        result = fb.send_flybook_card("https://open.feishu.cn/open-apis/bot/v2/hook/b9d02153-4903-42d9-88a5-d83dd7926ea9", total_list, message_list,"yNjtHgt8jykfnhaTj15SMg")
        result = fb.send_lark_text_message(
            "https://open.feishu.cn/open-apis/bot/v2/hook/b9d02153-4903-42d9-88a5-d83dd7926ea9",
            "yNjtHgt8jykfnhaTj15SMg", f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】发送米壳主体数据：\n【米壳-USB风扇（扒核心6大数据）】")
        result = fb.send_flybook_card3(
            "https://open.feishu.cn/open-apis/bot/v2/hook/b9d02153-4903-42d9-88a5-d83dd7926ea9", [mkzt_total[0]], "yNjtHgt8jykfnhaTj15SMg")
        result = fb.send_lark_text_message(
            "https://open.feishu.cn/open-apis/bot/v2/hook/b9d02153-4903-42d9-88a5-d83dd7926ea9",
            "yNjtHgt8jykfnhaTj15SMg",
            f"【【账号维度-账号消耗排序-账户名称】\n")
        result = fb.send_flybook_card3(
            "https://open.feishu.cn/open-apis/bot/v2/hook/b9d02153-4903-42d9-88a5-d83dd7926ea9", mkzt_total[1:],
            "yNjtHgt8jykfnhaTj15SMg")
    except Exception as error:
        print(f"【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】 主程序运行失败，错误信息为：【{error}】\n【{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}】  程序结束！\n")




# 判断当前时间是否满足执行条件
def is_allowed_time():
    now = datetime.datetime.now()
    # 判断是否在 24 点到次日 0 点 30 分之间
    if now.hour == 0 and now.minute < 30:
        return False
    # 判断是否是整点后的 0、10、20、30、40、50 分
    if now.minute % 10 == 0:
        return True
    return False
# 检查是否满足条件并运行脚本
def check_and_run_task():
    if is_allowed_time():
        main_task()
# 定时运行函数
def setup_schedule():
    # 首次运行
    main_task()
    # 每分钟检查一次是否满足执行条件
    schedule.every(1).minutes.do(check_and_run_task)
    # 首次立即执行一次检查
    check_and_run_task()
    # 循环执行定时任务
    while True:
        schedule.run_pending()
        time.sleep(1)
# 按装订区域中的绿色按钮以运行脚本。
if __name__ == '__main__':
    # 执行定时程序
    setup_schedule()

