import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
from bs4 import BeautifulSoup
import threading

MAX_WRITE_COUNT = 100
write_count = 0
lock = threading.Lock()

# 定义关键字列表
keywords = ['国产', '成人', '日韩', '无码', '高清视频', '制服', '诱惑', '人妻']

def send_get_request(url, timeout_sec):
    try:
        response = requests.get(url, timeout=timeout_sec)
        response.raise_for_status()

        # 获取响应的编码
        encoding = response.encoding
        # 如果响应头中没有编码信息，使用默认UTF-8编码
        if not encoding:
            encoding = 'utf-8'

        return response.status_code, response.content.decode(encoding)
    except requests.exceptions.Timeout:
        # 处理响应超时
        return -1, None  # 或者您可以选择其他合适的状态码
    except requests.exceptions.RequestException:
        # 其他请求异常处理
        return -2, None  # 或者其他状态码

def write_to_file(website, result):
    global write_count
    # 检查响应文本是否包含关键字
    if any(keyword in result for keyword in keywords):
        with open("学习资料.txt", "a", encoding="utf-8") as file:
            file.write(f"{website}\n")
        with lock:
            write_count += 1

def process_website_with_result(website):
    global write_count
    if write_count >= MAX_WRITE_COUNT:
        return

    response_code, response_text = send_get_request(f"http://{website}", 5)

    if response_code == 200:
        # 使用BeautifulSoup解析HTML，可以去除一些HTML标签
        soup = BeautifulSoup(response_text, 'html.parser')
        cleaned_text = soup.get_text()

        # 写入包含关键字的结果到文件
        write_to_file(website, cleaned_text)
        print(f"Processed: {website}, result:{cleaned_text}")
    # elif response_code == -1:
    #     print(f"Timeout: {website}")
    # elif response_code == -2:
    #     print(f"Request Exception: {website}")
    # else:
    #     print(f"HTTP Error {response_code}: {website}")

def main():
    websites = []

    for num1 in range(10):
        for num2 in range(10):
            for num3 in range(10):
                for letter in range(ord('a'), ord('z') + 1):
                    website = f"{num1}{num2}{num3}{chr(letter)}{chr(letter)}.com"
                    websites.append(website)

    # 创建一个固定大小的线程池
    thread_count = 200
    with ThreadPoolExecutor(max_workers=thread_count) as executor:
        futures = [executor.submit(lambda website=website: process_website_with_result(website)) for website in websites]

        for future in as_completed(futures):
            # 等待每个线程完成
            pass

    print("All threads completed.")

if __name__ == "__main__":
    main()
