import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
import time

# 读取log.txt文件中的URL
with open('logbobo-412.txt', 'r') as f:
    urls = f.readlines()

# 创建输出文件
output_file = open('log2.txt', 'w')

# 统计总数
total_urls = len(urls)
processed_urls = 0

# 获取商品名称的函数
def get_product_name(url):
    try:
        response = requests.get(url)
        soup = BeautifulSoup(response.text, 'html.parser')
        product_name_element = soup.select_one('span.goods_zk')
        if product_name_element:
            product_name = product_name_element.get_text().strip()
            return product_name
    except Exception as e:
        print(f"Error processing URL: {url}, {e}")
    return None

# 处理单个URL
def process_url(url):
    global processed_urls
    product_name = get_product_name(url)
    if product_name:
        output_line = f"{url} {product_name}\n"
        output_file.write(output_line)
    processed_urls += 1
    progress = (processed_urls / total_urls) * 100
    print(f"Progress: {progress:.2f}%", end="\r")

# 使用线程池处理URL
start_time = time.time()
with ThreadPoolExecutor(max_workers=30) as executor:
    futures = [executor.submit(process_url, url) for url in urls]

output_file.close()
end_time = time.time()
print(f"\nTotal time: {end_time - start_time:.2f} seconds")
