import time

from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.edge.service import Service
from selenium.webdriver.edge.options import Options

from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from mysqldata import MyMySQL

# EdgeDriver 的完整路径
edgedriver_path = r'D:\Project\Pycharm\党建项目\NetWorkModels\crawler\msedgedriver.exe'

# 实例化option对象
options = Options()
# 给option对象添加无头参数
options.add_argument("--headless")  # 启用无头模式

# 创建一个 Service 对象来指定 EdgeDriver 的位置
service = Service(edgedriver_path)

# 初始化 Edge WebDriver，使用上面创建的 service 和 options
driver = webdriver.Edge(service=service, options=options)



# 定义Selenium版本的crawl_li_elements函数
def crawl_li_elements_selenium(driver, url):
    # 打开网页


    # 定位到目标<div>元素下的所有<li>元素
    li_elements = driver.find_elements(By.CSS_SELECTOR, '#news_list > li')
    # 提取每个<li>元素的文字和链接，并以字典形式返回
    results = {}
    for li in li_elements:
        text = li.text.strip()  # 提取<li>元素的文字内容

        # 查找<li>元素中的<a>标签
        a_tag = li.find_element(By.TAG_NAME, 'a')
        if a_tag:
            link = a_tag.get_attribute('href')  # 获取链接
            # 如果链接是相对路径，则可能需要构造绝对路径，这里简化为直接获取href属性
        else:
            link = None  # 如果没有链接，则设为None

        results[text] = link  # 以文字为键，链接为值存入字典

    return results


def click_next_active_span_crawl_li_elements_selenium(driver, url):
    driver.get(url)
    mydic = crawl_li_elements_selenium(driver, url)  # 假设这个函数是正确定义的
    for i in range(10):
        # 等待span元素加载完成
        spans = WebDriverWait(driver, 10).until(
            EC.presence_of_all_elements_located((By.CSS_SELECTOR, '#pages > span'))
        )
        # for ss in spans:
        #     print(ss.get_attribute('outerHTML'))
        active_spans = [s for s in spans if 'active' in s.get_attribute('class')]

        if not active_spans:
            print("没有找到带有.active类的span元素")
            break

            # 确定当前激活span在列表中的索引
        active_span = active_spans[0]
        active_index = spans.index(active_span)

        # 检查是否存在下一个span元素
        if active_index < len(spans) - 1:
            next_span = spans[active_index + 1]
            action = ActionChains(driver)
            action.move_to_element(next_span).click().perform()

            # 等待页面加载完成，可以通过判断某个元素是否重新加载或者其他条件来判断
            time.sleep(1.5)
            # 重新获取span列表
            mydic = {**crawl_li_elements_selenium(driver, url), **mydic}
            print(f"已点击索引为 {active_index + 1} 的span元素")
        else:
            print("已经是最后一个span元素，没有下一个可点击的元素了")
            break

    return mydic


def gettext(url):
    #获取body > div.d2txt.clearfix文字内容
    driver.get(url)
    text = driver.find_element(By.CSS_SELECTOR, 'body > div.d2txt.clearfix').text
    return text





ans_categoryA = {
    "讲话": "http://jhsjk.people.cn/result?form=706&else=501",
    "会议": "http://jhsjk.people.cn/result?form=701&else=501",
    "活动": "http://jhsjk.people.cn/result?form=702&else=501",
    "考察": "http://jhsjk.people.cn/result?form=703&else=501",
    "会见": "http://jhsjk.people.cn/result?form=704&else=501",
    "出访": "http://jhsjk.people.cn/result?form=705&else=501",
    "函电": "http://jhsjk.people.cn/result?form=707&else=501",
    "其它": "http://jhsjk.people.cn/result?form=707&else=501",}

ans_categoryB= {
    "经济": "http://jhsjk.people.cn/result?type=101",
    "政治": "http://jhsjk.people.cn/result?type=102",
    "文化": "http://jhsjk.people.cn/result?type=103",
    "社会": "http://jhsjk.people.cn/result?type=104",
    "生态": "http://jhsjk.people.cn/result?type=105",
    "党建": "http://jhsjk.people.cn/result?type=106",
    "国防": "http://jhsjk.people.cn/result?type=107",
    "无关": "", }
# 示例：爬取某个网页的<li>元素信息和链接
url = 'http://jhsjk.people.cn/result?type=101'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/58.0.3029.110 Safari/537.3'
}
ms=MyMySQL(host='100.125.36.112',user='dangjian',password='dangjian',database="dangjian")
ms.connect()
# for key in ans_categoryA.keys():
#     url=ans_categoryA[key]
#     li_info = click_next_active_span_crawl_li_elements_selenium(driver, url)
#     for key2, value in li_info.items():
#         text=gettext(value)
#         mydic={"标题":key2,"类型":key,"领域":'',"文章":text}
#         try:
#             #print(mydic)
#             ms.add(mydic)
#         except Exception as e:
#             print(e)

for key3 in ans_categoryB.keys():
    url=ans_categoryB[key3]
    li_info = click_next_active_span_crawl_li_elements_selenium(driver, url)
    for key4, value in li_info.items():
        text=gettext(value)
        mydic={"标题":key4,"类型":'',"领域":key3,"文章":text}
        try:
            #print(mydic)
            ms.add(mydic)
        except Exception as e:
            print(e)         

