# -*- coding: utf-8 -*-
"""
 ----------------------------------------
|File Name:     etree_test
|Author:        WYT
|date:          2021/6/6
 ----------------------------------------
|  Change Activity:
|                   2021/6/6:
-----------------------------------------
"""
import re

from lxml import etree

# tree = etree.parse('../page/rdDataMatch.html', etree.HTMLParser())
# # result = etree.tostring(tree)
# tr_list = tree.xpath('//form[@id="DataEprProject_list"]//div[@class="tableRegionDivBody"]//tbody/tr')
# # tr_list = tree.cssselect("#DataEprProject_list table tr")
# for tr in tr_list:
#     rd_id_str = tr.xpath("./td[2]/text()")
#     rd_name_str = tr.xpath("./td[3]/text()")
#     print("{}:{}".format(rd_id_str, rd_name_str))
#
# # 匹配下一页 url
# next_list = tree.xpath('//form[@id="DataEprProject_list"]//div[@class="page"]/ul/li[4]/a')
# pre_list = tree.xpath('//form[@id="DataEprProject_list"]//div[@class="page"]/ul/li[2]/a')
# next_a = {} if len(next_list) < 1 else next_list[0]
# next_onclick = next_a.attrib.get("onclick")
# print(next_a.attrib.get("onclick"))
#
# next_url_pattern = """.*?setAttribute\(['|"]action['|"],'(.*?)'.*?"""
# next_res = re.findall(next_url_pattern, next_onclick, re.S)
# print(next_res)
#
# next_page_pattern = """.*?value=['|"](\d+)['|"];.*?"""
# next_page_res = re.findall(next_page_pattern, next_onclick, re.S)
# print(next_page_res)
#
# pre_a = {} if len(pre_list) < 1 else pre_list[0]
# print(pre_a.attrib.get("onclick"))


# tree = etree.parse('../page/IARiprIecAdd.html', etree.HTMLParser())
# result = etree.tostring(tree)

# res = tree.xpath("//form[@id='dataEprCycxForm']/input[@id='dataEprCycx.id']/@value")
# res = tree.xpath("//form[@id='dataEprCycxForm']/input[@id='dataEprCycx.id']")
# print(res)
# print(dir(res))

# text = "handleFile(this,'8a5419edc90b11eba90a00163e827061','10','2_4')"
# data_id_res = re.findall(".*?'((?=.*[a-zA-Z])(?=.*[0-9])[A-Za-z0-9].*?)'.*?", text)
# print(data_id_res)
#
# type_res = re.findall(".*?,'(\d+)',.*?", text)
# print(type_res)
#
# sub_type_res = re.findall(".*?,'(\d+_\d+)'", text)
# print(sub_type_res)

# tree = etree.parse('../page/rdcostList.html', etree.HTMLParser())
# result = etree.tostring(tree)
#
# res = tree.xpath("//ul[@id='attendance']/li/a")
# for i in res:
#     href = i.attrib.get("href")
#     param_str = href[href.index("?") + 1:]
#     param_dict = {p.split("=")[0]: p.split("=")[1] for p in param_str.split("&")}
#     print(href)
#     print(param_str)
#     print(param_dict)
from base.BaseMethod import BM
from settings import log_path, chrome_driver_path
from util.Logger import Logger
from util.g import g
from util.selenium import webdriver
from util.selenium.webdriver import ChromeOptions, DesiredCapabilities
from util.selenium.webdriver.common.by import By

li_list = ['IP01:一种测量电路', 'IP02:一种防雷电的长距离以太网网桥', 'IP03:一种搬运装置', 'IP04:奥德鲁尺寸分析系统软件v1.0', 'IP05:一种胶盖压装治具', 'IP06:一种注脂机构', 'IP07:奥德鲁轴承注脂压盖多功能机控制软件V1.0', 'IP08:一种轴承的匀脂机构', 'IP09:奥德鲁PLC数据一键备份软件V1.0', 'IP10:一种轴承的装配检测设备', 'IP11:一种轴承的检测机构', 'IP12:一种轴承振动检测机构', 'IP13:奥德鲁轴承多功能检测机控制软件V1.0', 'IP14:奥德鲁扭矩分析系统软件V1.0', 'IP15:一种轴向游隙检测机构', 'IP16:一种径向游隙检测装置', 'IP17:一种轴承径向游隙测量设备用送料及夹紧装置', 'IP18:一种环类零件的下料装置', 'IP19:奥德鲁双轴振动分析软件V1.0', 'IP20:奥德鲁轴承合套铆压一体机控制软件V1.0', 'IP21:奥德鲁智能合套配对软件V1.0', 'IP22:一种轴承旋转测振装置', 'IP23:一种高度测量装置', 'IP24:奥德鲁DMES系统软件V1.0', 'IP25:一种用于两平行输送轨道之间的搬料装置', 'IP26:一种良品和不良品自动分料装置']


def handler_version(s):
    p = ".*?([vV]+[\d+/.]*\d+).*?"
    res = re.findall(p, s)
    print(res)
    for i in res:
        s = s.replace(i, "")
    return s

# tree = etree.parse('../page/test.html', etree.HTMLParser())
# tr_list = tree.xpath(
#     '//form[@id="DataEprTrans_list"]//div[@class="tableRegionDivBody"]//tbody/tr')
# for tr in tr_list:
#     sta_name_str = BM.get_first(tr.xpath("./td[2]/text()"))
#     sort_num_str = BM.get_first(tr.xpath("./td[10]/text()"))
#     sign_str = "{}:{}".format(sort_num_str, sta_name_str)
#     print(sign_str)
#     # li_list.append(sign_str)
#
# g.logger = Logger(log_file_name=log_path.replace("log.txt", "test_request.log"), log_level=Logger.DEBUG,
#               logger_name="test_request").get_log()
# f = open("../page/test.html", "r", encoding="utf-8")
# text = f.read()
# random_insert_chars_p = ".*?([\r\n].[0-9a-zA-Z]{4,}[\r\n].).*?"
# res = re.findall(random_insert_chars_p, "一种可多方位投料的\r\n2000\r\n螺丝分拣装置", re.S)
# print(res)
# g.logger.info(text)

# tree = etree.parse("../page/科技部政务服务平台.html", etree.HTMLParser())
# content = tree.xpath("string(//div[@class='layui-layer-content'])")
# print(content)

url = "file:///C:/Users/Administrator/Desktop/%E7%A7%91%E6%8A%80%E9%83%A8%E6%94%BF%E5%8A%A1%E6%9C%8D%E5%8A%A1%E5%B9%B3%E5%8F%B0/%E7%A7%91%E6%8A%80%E9%83%A8%E6%94%BF%E5%8A%A1%E6%9C%8D%E5%8A%A1%E5%B9%B3%E5%8F%B0.html"
option = ChromeOptions()
# # # 添加该参数使程序不被浏览器检测到自动化工具。
option.add_experimental_option('excludeSwitches', ['enable-automation'])
option.add_argument("--ignore-certificate-errors")
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities["pageLoadStrategy"] = "none"
driver = webdriver.Chrome(executable_path=chrome_driver_path, options=option)
g.driver = driver
driver.maximize_window()
# 打开登录页面
driver.get(url)
tip_loc = (By.CSS_SELECTOR, ".layui-layer-content")
if BM.isElementExist(element=tip_loc, driver=driver):  #
    tip = driver.find_element(*tip_loc).text
    print(tip)
