import requests
from lxml import etree
import time
import os

class opt:
    def __init__(self,num):
        self.etree = etree
        self.num = 0

    def join_total_url(self,etree):
        page_nums = etree.xpath('//div[@class="content"]/div[@class="content_left"]/div/a/text()')
        max_no = 0
        for num in page_nums:
            try:
                int_num = int(num)
            except:
                continue
            # print(int_num)
            max_no = max(max_no, int(int_num))
        # print("最大值为:{}".format(max_no))

        total_urls = []
        for i in range(max_no):
            if i == 0:
                join_url = "http://2a.xy08.my/XiuRen/17300.html"
            else:
                join_url = "http://2a.xy08.my/XiuRen/17300_{}.html".format(i)
            total_urls.append(join_url)
            # print(join_url)
        return total_urls


    def start(self,etree):
        # 设置目标URL
        url = 'http://2a.xy08.my/XiuRen/17300_1.html'
        # 设置请求头，模拟浏览器访问
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
        }
        # 发送GET请求
        response = requests.get(url, headers=headers)
        etree = etree.HTML(response.text)
        total_url = self.join_total_url(etree)
        #print(total_url) #[...]
        self.get_picture(total_url, headers)


    def get_picture(self,url_list, headers):
        times = 0
        urls = []
        for url in url_list:
            times += 1
            time.sleep(1.5)
            # print("保存图片的url中{}".format(url))
            urls.append(self.detail(url,headers))
            # if times == 1:
            #     break
        # print(urls)# [[...],[...],[...]]
        print(urls)#[[],[],[]]
        self.detail_url(urls)

    def detail_url(self,urls_list):
        count = 1
        for urls in urls_list:#   ---> []
            print(urls)  # [...]
            for url in urls: #range(len(urls)):    # url , url ,url
                print("正在加载url {}".format(url))
                self.down_picture(count,url)
            count += 1

    def down_picture(self,count,url):
        current_dir = os.getcwd()
        # 构建 picture 子目录的完整路径
        picture_dir = os.path.join(current_dir, 'picture')
        # 如果目录不存在，则创建它
        header = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"}
        if not os.path.exists(picture_dir):
            os.makedirs(picture_dir)
        #for index, value in enumerate(urls): # range(len(urls)):  # [1.2.3]
        # print(url)
        # exit()
        # filename = urls[i].split('/')[-1]#.split('.').replace("{}_{}".format(idx,i))#picture_url.split('/')[-1]
        # filename = filename.split('.')[0]#65165156   .replace("{}_{}.jpg".format(idx,filename))
        filename = "{}_{}.jpg".format(count,self.num)
        self.num += 1
        # print(filename)
        # exit()
        res = requests.get(url, header).content
        print("图片保存中{}".format(filename))
        with open(os.path.join(picture_dir, filename), 'wb') as f:
            f.write(res)


    def detail(self,url,headers):
        from lxml import etree
        response = requests.get(url, headers=headers)
        etree = etree.HTML(response.text)
        TageP = etree.xpath('//div[3]/div/div/div[5]')  # /html/body/div[3]/div/div/div[5]
        p_urls = []
        for img in TageP:

            picture = img.xpath('./p/img/@src')  # /html/body/div[3]/div/div/div[5]/p[2]/img[1]  []
            for url in picture:
                p_url = "http://2a.xy08.my" + url  # http://2a.xy08.my/uploadfile/202504/30/E8104914184.jpg
                p_urls.append(p_url)
                # print(p_url)
        return p_urls

# TageP = etree.xpath('//div[3]/div/div/div[5]')  # /html/body/div[3]/div/div/div[5]
"""
list_url = []
for img in TageP:
    picture = img.xpath('./p/img/@src')  # /html/body/div[3]/div/div/div[5]/p[2]/img[1]  []
    for url in picture:
        p_url = "http://2a.xy08.my" + url # http://2a.xy08.my/uploadfile/202504/30/E8104914184.jpg
        print(p_url)
"""
# # 检查请求是否成功
# if response.status_code == 200:
#     # 解析HTML内容
#     soup = BeautifulSoup(response.text, 'html.parser')
#
#     # 示例：提取所有图片链接
#     images = soup.find_all('img')
#     for img in images:
#         img_url = img.get('src')
#         print(img_url)  # http://2a.xy08.my/uploadfile/202504/30/E8104914184.jpg
# else:
#     print(f"请求失败，状态码：{response.status_code}")
if __name__ == "__main__":
    o = opt(0)
    o.start(etree)
