import os
import re
import time

from DrissionPage import ChromiumPage, ChromiumOptions
from bs4 import BeautifulSoup


def read_chrmoe_path():
    """
    读取chrome配置文件路径
    :return:
    """
    chrome_path = ''
    try:
        # 打开文件并读取内容
        with open('../../Config/chrome_path.txt', 'r', encoding='utf-8') as file:
            lines = file.readlines()
        # 遍历每一行，找到包含'chrome.exe'的行
        for line in lines:
            if 'chrome.exe' in line:
                chrome_path = line.strip()
                print(f'可执行文件路径读取成功：{chrome_path}')
                break
        return chrome_path
    except:
        print('未找到chrome_path可执行文件路径')


def chrom_page():
    """
    打开一个chrome浏览器
    :return: page对象
    """
    chrome_path = read_chrmoe_path()
    try:
        path = fr'{chrome_path}'
        co = ChromiumOptions().auto_port()
        co.set_browser_path(path)
        page = ChromiumPage(co)
        # 设置窗口最大化
        page.set.window.max()
        # 访问网页
        page.get('https://www.link114.cn/')
        return page
    except Exception as e:
        print(f'Chrome启动失败：{e}，请检查chrome_path路径是否正确。')
        return False


def login(page, username, password):
    try:
        page.ele("x://span[@class='floatright member_info']//a").click()  # 点击登录
        page.ele('@name=username').input(username)  # 账号
        time.sleep(2)
        page.ele('@name=passwd').input(password)  # 密码
        time.sleep(2)
        page.ele("x://*[@id='do_submit']").click()  # 点击登录按钮
        time.sleep(2)
        page.ele('x:/html/body/div[2]/a').click()  # 登录成功后进入主页
        time.sleep(2)
        return page
    except Exception as e:
        print(f'登录出现异常:{e}')
        return page


def split_txt_file(file_path, lines_per_file=500):
    """
    分割TXT到文件夹中
    :param file_path: 需要分割的TXT文件路径
    :param lines_per_file: 每个TXT中分割多少条数据
    :return: 包含完整路径的文件名列表
    """
    # 创建保存新文件的目录
    output_dir = f'split_txt_files_{lines_per_file}'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    with open(file_path, 'r', encoding='utf-8') as file:
        lines = file.readlines()
        # 计算需要创建的新文件数量
    num_files = (len(lines) + lines_per_file - 1) // lines_per_file
    # 分割文件并保存
    file_names = []  # 初始化一个空列表来保存文件名
    for i in range(num_files):
        start_idx = i * lines_per_file
        end_idx = min((i + 1) * lines_per_file, len(lines))
        file_contents = lines[start_idx:end_idx]
        # 构建新文件的名称
        file_name = f'split_file_{i + 1}.txt'
        file_path_new = os.path.join(output_dir, file_name)
        with open(file_path_new, 'w', encoding='utf-8') as new_file:
            new_file.writelines(file_contents)
            # 将文件名添加到列表中
        file_names.append(file_name)
        # 输出中文并返回包含新文件名的列表
    print(f"分割完成。在'{output_dir}'目录中创建了 {num_files} 个新的txt文件。")
    return [f'{output_dir}/' + name for name in file_names]  # 返回包含完整路径的文件名列表


def read_txt_file(file_path):
    """
    读取TXT中的内容
    :param file_path: TXT文件路径
    :return: 文件内容
    """
    try:
        with open(file_path, 'r', encoding='utf-8') as file:
            content = file.read()
        return content
    except FileNotFoundError:
        # print(f"文件未找到：{file_path}")
        print(f"文件未找到：{file_path}")
        return None
    except Exception as e:
        print(f"读取文件时发生错误：{e}")
        return None


def click_tijiao(page, content):
    """
    点击提交按钮
    :param page: page
    :param content: 查询的域名 exedesign.cn\nnbwsd.com\nacrel.cn\nnbwsd.com\nhtqyy.com类型的数据
    :return:
    """
    content_list = [domain for domain in content.split('\n')]  # 查询的URL列表
    page.ele('x://*[@id="ip_websites"]').input(content, clear=True)  # 输入域名
    time.sleep(2)
    page.ele('x:/html/body/div[3]/div[1]/p[1]/span').click()  # 全选
    page.listen.start('/www.link114.cn/get.php?func')
    page.ele('x://*[@id="tj"]/a').click()  # 点击提交按钮

    while True:
        tbody_html = page.ele('x://*[@id="result_table"]/tbody').html
        soup = BeautifulSoup(tbody_html, "html.parser")
        all_tr = soup.find('tbody').find_all('tr')[1:]  # 排除表头
        # 检查所有td是否都不包含'/template/images/loading.gif'
        loading_gif_found = False
        for tr in all_tr:
            for td in tr.find_all('td'):
                if '/template/images/loading.gif' in str(td):
                    loading_gif_found = True
                    break  # 如果找到，则跳出内层循环
            if loading_gif_found:
                break  # 如果当前tr中有td包含指定字符串，则跳出外层循环
            else:
                temp_list = []
                for td in tr:
                    if td.text:
                        temp_list.append(td.text)
                    else:
                        try:  # 有的tr中带有图片，图片中的title就是想要的数据
                            temp_list.append(tr.find('img').attrs['title'])
                        except:
                            if 'backlink_zz' in str(td):
                                temp_list.append('失败:重查')
                            elif 'checkbox' in str(td):
                                pass
                print(temp_list)
                print('----------------------------------------')
        # 检查是否所有td都不包含'/template/images/loading.gif'
        if not loading_gif_found:
            break  # 如果所有td都不包含该字符串，则退出循环
        time.sleep(5)  # 等待5秒再次检查


if __name__ == '__main__':
    page = chrom_page()
    is_Login = True  # 代表需要登陆
    if is_Login:
        username = '1003206180'
        password = '1003206180'
        page = login(page, username, password)
        lines_per_file = 200
    else:
        lines_per_file = 500
    txt_lists = split_txt_file('C:\\Users\\admin\Desktop\\500.txt',
                               lines_per_file)
    content = read_txt_file('split_txt_files_200/split_file_1.txt')
    click_tijiao(page, content)
