from DrissionPage import Chromium
from DrissionPage.common import Settings
import time
import re
import traceback
import pyperclip

Settings.set_language('zh_cn')  # 设置为中文
# 检索关键词
verify_pattern = r'(?=.*鸿蒙)(?=.*next)'
file_path = "D:\workspace\crawler\data\微博.txt"
url_file_path = "D:\workspace\crawler\data\微博-URL.txt"

# 启动或接管浏览器，并创建标签页对象
main_tab = Chromium().latest_tab
# 这之后出现的弹窗都会自动确认
main_tab.browser.set.auto_handle_alert()

def write_append(file_path, texts):
    with open(file_path, 'a', encoding='utf-8') as f:
        if not texts:
            return
        f.writelines(texts)

def get_card_url(card_ele):
    try_count = 5
    while True:
        try:
            card_ele.scroll.to_see()
            menu_icon_container = card_ele.ele('@class=menu s-fr')
            menu_icon_container.ele('@tag()=a').click()
            menu_icon_container.ele('@text()=复制微博地址').click()
            return pyperclip.paste()
        except Exception:
            try_count = try_count - 1
            if try_count <= 0:
                traceback.print_exc()
                break
            else:
                print(f'====>>1秒后尝试获取卡片url,剩余尝试次数:{try_count}')
            time.sleep(1)
    return None

def record_url(file_path, url):
    with open(file_path, 'a', encoding='utf-8') as f:
        f.write(url)
        f.write('\n')

# 循环点击下一页
def cellect_url(main_tab, search_keywords, number, url_list):
    page_number = 1
    while page_number <= number:
        main_tab.get(f'https://s.weibo.com/weibo?q={search_keywords}&page={page_number}')
        main_tab.wait.load_start()
        try:
            main_tab.wait.ele_displayed('@class=main-full')
            card_ele_list = main_tab.eles('@class=card-wrap')
            for card_ele in card_ele_list:
                card_url = get_card_url(card_ele)
                if card_url:
                    url_list.append(card_url)
                    record_url(url_file_path, card_url)
        except Exception:
            traceback.print_exc()
        finally:            
            page_number = page_number + 1

def process_card_detail(main_tab, url):
    main_tab.get(url)
    main_tab.wait.load_start()
    card_desc = ""
    error_count = 0
    max_seconds = int(time.time()) + 60
    last_children_count = 0
    # 最多循环一分钟
    while error_count < 5 and int(time.time()) <= max_seconds:
        try:
            print('获取卡片描述----\n')
            if card_desc == "":
                desc_ele = main_tab.wait.ele_displayed('@class=detail_text_1U10O detail_ogText_2Z1Q8 wbpro-feed-ogText')
                texts = desc_ele.texts()
                if texts:
                    card_desc = "".join(texts)
            print('滚动到底部----\n')
            comment_ele_list = main_tab.eles('@@class=text@@tag()=div')
            if not comment_ele_list:
                print('没有评论，不需要展开，跳出循环...\n')
                break
            current_children_count = len(comment_ele_list)
            if last_children_count != current_children_count:
                print('尝试继续滚动....\n')
                main_tab.wait.ele_displayed('@tag()=html').scroll.to_bottom()
                last_children_count = current_children_count
                time.sleep(1)
                continue
            main_tab.wait.ele_displayed('@tag()=html').scroll.to_bottom()
            # 如果还有更多评论则展开
            print('判断展开更多----\n')
            expand_more = main_tab.ele('@@class=woo-tip-text@@text()= 点击加载更多... ')
            if expand_more:
                expand_more.click()
                time.sleep(1)
                continue
            print('判断跳出循环----\n')
            # 如果没有更多评论，则跳出循环
            if main_tab.ele('@class=Bottom_text_1kFLe'):
                break
            error_count = 0
        except Exception:
            print('滚动评论到底部异常，继续下一次处理')
            traceback.print_exc()
            error_count = error_count + 1
    # 内容描述必须包含鸿蒙next
    if not re.search(verify_pattern, card_desc, re.IGNORECASE):
        print(f'描述：{card_desc}\n')
        print('不包含检索关键词\n')
        return
    try:
        comment_ele_list = main_tab.eles('@@class=text@@tag()=div')
        comment_list = []
        for comment_ele in comment_ele_list:
            comment = "".join(comment_ele.texts())
            if not re.search(r'动效|动画', comment, re.IGNORECASE):
                continue
            comment_list.append(comment)
        if re.search(r'动效|动画', card_desc) or comment_list:
            with open(file_path, 'a', encoding='utf-8') as f:
                f.write("描述：\n")
                f.write(card_desc + "\n")
                f.write("网址：\n")
                f.write(url + "\n")
            for comment in comment_list:
                with open(file_path, 'a', encoding='utf-8') as f:
                    f.write("---------------评论---------------\n")
                    f.write(comment + "\n")
            with open(file_path, 'a', encoding='utf-8') as f:
                f.write('\n')
                f.write('\n')
        else:
            print('内容描述不包含动画且无动画相关评论，不处理')
    except Exception:
        traceback.print_exc()

def read_url_from_file(file_path, url_list):
    with open(file_path, "r", encoding="utf-8") as file:
        while True:
            line = file.readline()
            if not line:  # 读到文件末尾时退出
                break
            print(line.strip())
            url_list.append(line.strip())

def run():
    url_list = []
    # read_url_from_file(url_file_path, url_list)
    cellect_url(main_tab, "鸿蒙Next动画", 33, url_list)
    cellect_url(main_tab, "鸿蒙Next动效", 14, url_list)
    cellect_url(main_tab, "鸿蒙Next", 50, url_list)
    for url in url_list:
        process_card_detail(main_tab, url)

run()
