import random
import threading
import traceback
from datetime import datetime
from urllib.parse import urlparse
import pandas as pd
import time
from DrissionPage import Chromium, ChromiumOptions, SessionOptions
from DrissionPage.common import Settings
from handle_asset_link import collect_star_info, collect_day_note_info, collect_coope_note_info, collect_traffic_source_info, collect_detail_source_info, collect_waiyi_source_info, collect_fans_profile_info, collect_fans_summary_info
from loguru import logger
from read_search_file import get_complete_sheet1, mock_click_js

lock = threading.Lock()
Settings.set_language("zh_cn")
co = ChromiumOptions(read_file=False)  # 不读取文件方式新建配置对象
so = SessionOptions(read_file=False)
browser = Chromium(addr_or_opts=co, session_options=so)

def log_message(log_callback, message, color=None):
    if callable(log_callback):
        log_callback(message, color) if color else log_callback(message)

def is_number(value):
    return type(value) in (int, float, complex)


def extract_star_id(url_str):
    if not url_str:
        return ''

    parsed = urlparse(url_str)
    path = parsed.path
    parts = path.strip('/').split('/')

    if parts:
        last_part = parts[-1]  # 取最后一个部分
        return last_part

    return ''

def start_sub_account_url(file_path, log_callback=None):
    now = datetime.now().strftime("%m_%d_%H_%M")
    output_file = f'达人信息汇总_{now}.xlsx'

    df_sheet = get_complete_sheet1(file_path)

    if '达人ID' not in df_sheet.columns:
        log_message(log_callback, "表格中缺少达人ID列", "red")
        return

    # 初始化浏览器、获取 cookie...
    pgy_tab = browser.new_tab('https://pgy.xiaohongshu.com/solar/pre-trade/home')
    unit_dict_cookie = pgy_tab.cookies(all_info=False).as_dict()

    all_data = []
    for index, dd in enumerate(df_sheet.itertuples()):
        try:
            star_id = dd.达人ID.strip() if pd.notna(dd.达人ID) else ''
            star_id = extract_star_id(star_id)

            log_message(log_callback, f'正在爬取第【{index + 1}】条数据 - 当前正在处理达人ID:{star_id}', "purple")

            row_data = {}  # 存储当前达人所有字段的数据

            collect_star_info(cookies=unit_dict_cookie, star_id=star_id, r=row_data, log_callback=log_callback)
            collect_day_note_info(cookies=unit_dict_cookie, star_id=star_id, r=row_data, log_callback=log_callback)
            collect_coope_note_info(cookies=unit_dict_cookie, star_id=star_id, r=row_data, log_callback=log_callback)
            collect_traffic_source_info(cookies=unit_dict_cookie, star_id=star_id, r=row_data, log_callback=log_callback)
            collect_detail_source_info(cookies=unit_dict_cookie, star_id=star_id, r=row_data, log_callback=log_callback)
            collect_fans_summary_info(cookies=unit_dict_cookie, star_id=star_id, r=row_data, log_callback=log_callback)
            collect_waiyi_source_info(cookies=unit_dict_cookie, star_id=star_id, r=row_data, log_callback=log_callback)
            collect_fans_profile_info(cookies=unit_dict_cookie, star_id=star_id, r=row_data, log_callback=log_callback)

            all_data.append(row_data)
            time.sleep(random.randint(1, 3))
        except Exception as e:
            star_id = dd.达人ID.strip() if pd.notna(dd.达人ID) else ''

            error_msg = f"{type(e).__name__}: {str(e)}"
            error_trace = traceback.format_exc()  # 获取完整堆栈跟踪
            # 日志记录完整错误
            logger.error(f"错误类型: {type(e).__name__}\n"
                         f"错误详情: {error_msg}\n"
                         f"堆栈跟踪:\n{error_trace}")

            log_message(log_callback=log_callback, message=f"当前达人ID:{star_id}获取信息遇到未知错误，继续执行下一条", color="red")
            continue

    # 最后统一导出
    if all_data:
        df = pd.DataFrame(all_data)
        df.to_excel(output_file, index=False)
        log_message(log_callback, f'✅ 全部数据已保存至：{output_file}, 稍后会自动在当前目录生成该文件', "purple")
    else:
        log_message(log_callback, "❌ 没有数据可保存", "red")

# if __name__ == '__main__':
#     file_path = r"../shared/唤端-创建计划-样例.xlsx"
#
#     browser = Chromium()  ## 临时测试、到时候删掉， 只保留调用create_unit()函数
#     unit_view_tab = browser.latest_tab  ## 临时测试、到时候删掉
#     unit_view_tab.get('https://ad.xiaohongshu.com/aurora/ad/create/campaign/4?isStep=true&AFormGrayFlag=false&vSellerId=67403ef0c2f45c001519af4b')  ## 临时测试、到时候删掉
#
#     sheet1_data = get_complete_sheet1(file_path)
#     for index, row in sheet1_data.iterrows():
#         if index == 0:
#             create_plan(unit_view_tab, row, file_path, df_sheet=sheet1_data)















