
'''
1.获取upi网站链接
2.网站形式：点击下一页
3.需动态输入关键词，网址不显示
'''

import random
import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
'''
存储参数
'''
url_selector = "div.col-md-12>a.row"
website = 'UPI'
keyword='WTI'
out_path = './url'+website+'-'+keyword+'.txt'
start_num = 3
page_num = 300
'''
日志参数设置
'''
from log_tool import logTool  #调入日志文件
LOG_PATH = "./log_note"#设置日志输出存储路径
log = logTool(LOG_PATH)#传入存储路径
'''
浏览器加载参数
'''
chrome_options = Options()
chrome_options.add_argument('--headless')#不显示浏览器
prefs = {"profile.managed_default_content_settings.images": 2,
"profile.content_settings.plugin_whitelist.adobe-flash-player": 2,
"profile.content_settings.exceptions.plugins.*,*.per_resource.adobe-flash-player": 2,
}
chrome_options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(r"./chromedriver.exe", chrome_options=chrome_options)
#存储url文件
f = open(out_path, 'a')#以追加的形式写入文件
def save(u):
    f.write(u+'\n')

def obtain_url():
    for i in range(start_num,page_num+1):
        #如果加载超时，强制停止
        try:
            driver.get("https://www.upi.com/search/?ss="+keyword+"&s_l=articles&offset="+str(i))  # 可动态转换只需改变一个参数
            driver.set_page_load_timeout(20)
            driver.set_script_timeout(20)
            time.sleep(3)
            urlist = driver.find_elements_by_css_selector(url_selector)  # 选取div下的所有a元素,找到新闻链接
            print(len(urlist))
            for j in range(0, len(urlist)):
                url = urlist[j].get_attribute("href")
                save(url)
            log.info('insert ' + str(i) + 'page successfully '+str(len(urlist))+' records')
            time.sleep(5 + random.random())
        except TimeoutException:
            log.info('time out!'+str(i))
            # pass
            # 报错后就强制停止加载,这里是js控制
            driver.execute_script('window.stop()')
            urlist = driver.find_elements_by_css_selector(url_selector)  # 选取div下的所有a元素,找到新闻链接
            print(len(urlist))
            for j in range(0, len(urlist)):
                url = urlist[j].get_attribute("href")
                save(url)
            log.info('time out insert' + str(i) + ' page successfully'+str(len(urlist))+' records')
    driver.quit()

if __name__ == "__main__":
    # log.info('Start climbing!')
    obtain_url()  # 获取链接


















