import random
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

options= webdriver.ChromeOptions()
options.add_argument('lang=zh_CN.UTF-8')# 设置中文

options.add_argument('user-agent="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4882.400 QQBrowser/9.7.13059.400"')# 设置头部
driver = webdriver.Chrome(options=options)

# 打开目标网页
url = 'https://mp.weixin.qq.com/s/j2w6y1NgTDZ9d_iWfJrrYw'  # 更改为你要爬取的网页
filePath = r'E:\project\py-pro\IP.txt'
encoding = 'utf-8'

def get_random_proxy(filePath,encoding):
    # 打开文件，换行读取
    with open(filePath, "r" ,encoding=encoding) as f:
        file = f.readlines()
    # 遍历并分别存入列表，方便随机选取IP
    item = []
    for proxies in file:
         # 以换行符分割，转换为dict对象
        proxies = eval(proxies.replace('\n',''))
        item.append(proxies)
    # 随机选取一个IP
    proxies = random.choice(item)  
    return  proxies
for i in range(1,10):
    proxies = get_random_proxy(filePath,encoding)
    driver.get(url,proxy=proxies)
    driver.quit()