import os
import time

import requests
from bs4 import BeautifulSoup


def get_num_list(num):
    # 此方法适用于 全局代理方式代理访问
    proxy_url = "http://127.0.0.1:7890"  # 为本机代理端口及ip
    os.environ['HTTP_PROXY'] = proxy_url
    os.environ['HTTPS_PROXY'] = proxy_url

    cookies = {
        'ASPSESSIONIDASSAQDCT': 'HPMBIHPCFFHFGIJDCJOFMAFG',
        'ASPSESSIONIDASSDRDDS': 'GFFLNJPCCHPMAAJLGPACBMJG',
    }
    headers = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-TW;q=0.6',
        'cache-control': 'max-age=0',
        'priority': 'u=0, i',
        'referer': 'https://www.iyexs.com/iyezz.asp?id=%D0%ED%CF%C9%D4%BB%B9%FD%C9%DF',
        'sec-ch-ua': '"Not)A;Brand";v="8", "Chromium";v="138", "Google Chrome";v="138"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sec-fetch-dest': 'document',
        'sec-fetch-mode': 'navigate',
        'sec-fetch-site': 'same-origin',
        'sec-fetch-user': '?1',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
        # 'cookie': 'ASPSESSIONIDASSAQDCT=HPMBIHPCFFHFGIJDCJOFMAFG; ASPSESSIONIDASSDRDDS=GFFLNJPCCHPMAAJLGPACBMJG',
    }
    params = {
        'id': f'{num}',
    }
    response = requests.get('https://www.iyexs.com/iye.asp', params=params, cookies=cookies, headers=headers)
    print(response.status_code)
    response.encoding = 'gbk'
    soup = BeautifulSoup(response.text, 'html.parser')
    a_items = (
        soup.find_all(class_="mread")[0].find_all("a")
    )
    first = a_items[1].get('href').replace("https://www.iyexs.com/iyexs.asp?id=", "")
    name_list = []
    for item in a_items:
        if "https://www.iyexs.com/iyedx.asp" in item.get('href'):
            continue
        content = item.get('href').replace("https://www.iyexs.com/iyexs.asp?id=", "")
        name_list.append(content)
    max_number = max(int(x) for x in name_list)
    return first, max_number


def get_desc_by_id(num, title, max_retries=3, retry_delay=1):
    # 此方法适用于 全局代理方式代理访问
    attempt = 0
    success = False
    while attempt <= max_retries:
        try:
            # proxy_url = "http://127.0.0.1:7890"  # 为本机代理端口及ip
            # os.environ['HTTP_PROXY'] = proxy_url
            # os.environ['HTTPS_PROXY'] = proxy_url
            cookies = {
                'ASPSESSIONIDASSAQDCT': 'HPMBIHPCFFHFGIJDCJOFMAFG',
                'ASPSESSIONIDASSDRDDS': 'GFFLNJPCCHPMAAJLGPACBMJG',
                'ASPSESSIONIDAQSDTADS': 'NMGPCMPCLGIDIOKIGCKEFJAL',
                'ASPSESSIONIDAQQBQCCS': 'HEHJIOPCNKLLBIDMBJKHFMEO',
            }

            headers = {
                'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
                'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-TW;q=0.6',
                'priority': 'u=0, i',
                'referer': 'https://www.iyexs.com/iye.asp?id=56593',
                'sec-ch-ua': '"Not)A;Brand";v="8", "Chromium";v="138", "Google Chrome";v="138"',
                'sec-ch-ua-mobile': '?0',
                'sec-ch-ua-platform': '"Windows"',
                'sec-fetch-dest': 'document',
                'sec-fetch-mode': 'navigate',
                'sec-fetch-site': 'same-origin',
                'sec-fetch-user': '?1',
                'upgrade-insecure-requests': '1',
                'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
            }

            params = {
                'id': f'{num}',
            }
            response = requests.get('https://www.iyexs.com/iyexs.asp', params=params, cookies=cookies, headers=headers)
            print(response.status_code)
            response.encoding = 'gbk'
            soup = BeautifulSoup(response.text, 'html.parser')
            head = soup.find(class_="mview").find(class_="max").find("font").text
            content = soup.find(class_="mview").find(class_="content")
            content = f"{content}"
            content = content.replace('<img src="in/', "").replace('.jpg"/>', "")
            content = content.replace('<br/><br/>', "\n")
            content = content.replace('<td class="content" colspan="2">', "")
            content = content.replace('</td>', "")
            if content == "" or content is None or content == "None":
                continue
            # 调用写入文件的函数
            write_to_file(head, content, f"{title}.txt")  # 以 num 命名文件
            success = True
            break
        except requests.exceptions.RequestException as e:
            attempt += 1
            print(f"[请求失败] ID: {num}，错误: {str(e)}，第 {attempt} 次重试...")
            if attempt > max_retries:
                print(f"[请求失败] ID: {num}，已达最大重试次数，跳过此请求。")
                break
            time.sleep(retry_delay)
        except Exception as e:
            print(f"[未知错误] ID: {num}，错误: {str(e)}")
            break
    return success


def write_to_file(head, content, filename="output.txt"):
    output_dir = "output"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    file_path = os.path.join(output_dir, filename)
    try:
        with open(file_path, 'a', encoding='utf-8') as f:
            f.write(f"{head}\n\n")
            f.write(f"{content}\n")
        print(f"文件已成功写入：{file_path}")
    except Exception as e:
        print(f"写入文件失败：{str(e)}")


if __name__ == '__main__':
    f, last = get_num_list("56593")
    title = "test"
    for i in range(int(f), int(last) + 1):
        print(i)
        get_desc_by_id(i, title)
