

# res_data = requests.get(url)
# #if res_data.Res
# print(repr(res_data))
# # results = res_data.json()['result']
# # index=0
# # for resultItem in results:
# #     print('##########',index)
# #     index = index+1
# #     itemUrl = baseUrl+resultItem['detailsLink']
# #     print(itemUrl)
# #     # req= requests.get(itemUrl)
# #     # req.encoding = "utf-8"
# #     # soup = BeautifulSoup(req.text, 'html.parser')
# #     # print(soup.title)
# # print('end')

import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from env import cookies

detailUrl = f"http://www.cwl.gov.cn/c/2022/03/27/500071.shtml"
option = webdriver.ChromeOptions()
option.set_headless()
# 添加IP代理
# option.add_argument("--proxy-server=http://183.166.149.193:20005")
s = Service(
    executable_path=r'D:\pf\Lib\site-packages\selenium\webdriver\chrome\chromedriver.exe')
driver = webdriver.Chrome(service=s,chrome_options=option)
driver.get(detailUrl)
for cookie in cookies:
    driver.add_cookie(cookie)
driver.refresh()
#driver.get(detailUrl)
soup = BeautifulSoup(driver.page_source, 'html.parser')
qius = soup.select(".qiu-item.qiu-item-big")
qiu7 = qius[:7]
print(f'1:{qiu7[0].text}')
print(f'2:{qiu7[1].text}')
print(f'3:{qiu7[2].text}')
print(f'4:{qiu7[3].text}')
print(f'5:{qiu7[4].text}')
print(f'6:{qiu7[5].text}')
print(f'7:{qiu7[6].text}')
# for anchor in first10:
#     print(anchor.text)

driver.quit()
