import csv
import os
import queue
import threading
import time
import traceback
from asyncio import ALL_COMPLETED, wait
from concurrent.futures import ThreadPoolExecutor
from DataRecorder import Recorder

from DrissionPage import Chromium, ChromiumOptions, SessionOptions
from DrissionPage.common import Settings
from loguru import logger
from read_search_file import get_complete_sheet1
import concurrent.futures

Settings.set_language("zh_cn")
co = ChromiumOptions(read_file=False)  # 不读取文件方式新建配置对象
so = SessionOptions(read_file=False)
browser = Chromium(addr_or_opts=co, session_options=so)

# 创建队列用于传递标签页对象
tab_queue = queue.Queue()
# 线程锁（用于浏览器操作同步）
page_lock = threading.Lock()


# ========== 生产者函数：打开网页并放入队列 ==========
def open_tab(url):
    try:
        if url is None:
            return
        tab = browser.new_tab(url)
        logger.info(f"已打开: {url}")
        tab_queue.put(tab)
        time.sleep(2)
    except Exception as e:
        logger.error(f"打开失败: {url}, 错误: {e}")


# ========== 消费者函数：采集点赞数量 ==========
def scrape_like_count(log_callback=None):
    while True:
        tab = tab_queue.get()  # 增加超时防止死锁
        if tab is None:
            break
        logger.info(f"正在采集:{tab.url}的数据")
        scrape_real_data(tab, log_callback)
        threading.Thread(target=tab.close).start()

def scrape_real_data(tab, log_callback=None):
    try:
        note_title_ele = tab.ele('css:#detail-title', timeout=5)  # 笔记标题
        note_title_str = note_title_ele.text.strip() if note_title_ele else "未找到"

        img_ele_src = '没找到'
        if tab.ele('tag:img@@fetchpriority=auto@@decoding=sync@@crossorigin=anonymous', timeout=1):  # 封面图
            img_ele = tab.ele('tag:img@@fetchpriority=auto@@decoding=sync@@crossorigin=anonymous')
            img_ele_src = img_ele.link if img_ele.link else "未找到"

        note_url = tab.url  # 笔记链接

        like_count = tab.ele('css:.interact-container .like-lottie', timeout=5).next().next().text  # 点赞
        coll_count = tab.ele('tag:span@@id=note-page-collect-board-guide', timeout=5).child('tag:span@@class=count').text  # 收藏
        comment_count = tab.ele('css:.interact-container .chat-wrapper', timeout=5).child(index=-1).text  # 评论

        media_class_str = tab.ele('css:#noteContainer .author', timeout=5).next().attr('class')
        note_str = '视频' if 'video-player-media' in media_class_str else '图文'  # 图文类型

        username_ele = tab.ele('css:.interaction-container .author-container .info .username', timeout=5)  # 达人昵称
        username_ele_str = username_ele.text.strip() if username_ele else "未找到"

        logger.info({'达人昵称': username_ele_str, '笔记标题': note_title_str, '封面图': img_ele_src, '笔记链接': note_url, '点赞': like_count, '评论': comment_count, '收藏': coll_count, '图文类型': note_str})

        log_message(log_callback, f'当前爬取达人昵称:{username_ele_str}', "blue")

        write_excel(coll_count, comment_count, img_ele_src, like_count, note_str, note_title_str, note_url, username_ele_str)

    except Exception as e:
        error_msg = f"程序发生异常:\n{traceback.format_exc()}"
        logger.error(str(e), '\n')
        logger.error(error_msg)


def write_excel(coll_count, comment_count, img_ele_src, like_count, note_str, note_title_str, note_url,
                username_ele_str):
    data = {
        '达人昵称': username_ele_str,
        '笔记标题': note_title_str,
        '封面图': img_ele_src,
        '笔记链接': note_url,
        '点赞': like_count,
        '评论': comment_count,
        '收藏': coll_count,
        '图文类型': note_str
    }
    csv_file = '笔记信息.csv'
    headers = ['达人昵称', '笔记标题', '封面图', '笔记链接', '点赞', '评论', '收藏', '图文类型']
    file_exists = os.path.isfile(csv_file)
    with page_lock:
        with open(csv_file, mode='a', encoding='utf-8', newline='') as f:
            writer = csv.DictWriter(f, fieldnames=headers)
            if not file_exists:
                writer.writeheader()
            writer.writerow(data)


def log_message(log_callback, message, color=None):
    if callable(log_callback):
        log_callback(message, color) if color else log_callback(message)

def start_sub_account_url(file_path, concurrency_num=2, log_callback=None):
    # ========== 多线程执行 ==========
    OPEN_THREAD_COUNT = concurrency_num + 2  # 打开网页线程数略大于采集线程
    SCRAPE_THREAD_COUNT = concurrency_num

    df_sheet = get_complete_sheet1(file_path)

    if '主页链接' not in df_sheet.columns:
        log_message(log_callback, "表格中缺少【主页链接】列", "red")
        return

    # 提取所有主页链接
    url_list = df_sheet['主页链接'].dropna().unique().tolist()
    logger.info(f"共发现 {len(url_list)} 条主页链接")

    with ThreadPoolExecutor(max_workers=OPEN_THREAD_COUNT) as opener_pool, ThreadPoolExecutor(max_workers=SCRAPE_THREAD_COUNT) as scraper_pool:
        # 提交任务：打开网页
        opener_pool_futures = [opener_pool.submit(open_tab, url) for url in url_list]

        time.sleep(1)

        # 提交任务：采集数据
        scraper_pool_futures = [scraper_pool.submit(scrape_like_count, log_callback) for _ in range(SCRAPE_THREAD_COUNT)]

        # 等待线程池1所有任务完成
        concurrent.futures.wait(opener_pool_futures)
        # concurrent.futures.wait(scraper_pool_futures)
        # 发送结束信号给采集线程
        [tab_queue.put(None) for _ in range(SCRAPE_THREAD_COUNT)]
    log_message(log_callback, "所有笔记点赞数抓取完成", "green")

# if __name__ == '__main__':
#     file_path = r'主页链接.xlsx'
#     start_sub_account_url(file_path, concurrency_num=3)
















