###xpath项目：
#需求：
#   1、抓图片and名称
#   2、将图片and名称保存到本地
#   3、从本地发送远程服务器
import requests
from lxml import etree
import logging
import random
from fake_useragent import UserAgent            ##随机生成ua
import os 

######创建日志
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s - %(message)s')

base_url='https://www.4399dmw.com/search/dh-5-0-0-0-0-0-0/'
proxy_file_path = 'D:\\python\\python_data\\Python爬虫篇\\http_proxies.txt'
ua=UserAgent()


######创建代理:
def read_proxy_file(proxy_file_path):
    proxy_list = []
    try:
        with open(proxy_file_path,'r') as file:
            for line in file:
                line.strip()
                if line:
                    proxy_list.append(line.strip())
    except FileExistsError:
        logging.error("文件不存在")
    except Exception as e:
        logging.error(f"发生错误:{e}")
    return proxy_list
#随机获取代理地址
def get_proxy():
    proxy = random.choice(read_proxy_file(proxy_file_path))
    return {'http://':proxy}


######下载图片并保存图片到本地
    #1、请求图片url
    #2、将请求内容保存成图片
def save_image(image_url,image_name):
#建立请求:
    #请求头
    headers={
        "User-Agent":ua.random,
        "Cookie":"UM_distinctid=1931e0dbabc7fc-0a2a39fab5dbc-26011951-144000-1931e0dbabdfcd; a_180_90_index=1; CNZZDATA3217746=cnzz_eid%3D1253278158-1731376037-https%253A%252F%252Fwww.baidu.com%252F%26ntime%3D1731394441; a_980_90_index=1; a_200_90_index=1",
        "Referer":"https://www.4399dmw.com/donghua/"
    }
    try:
        response_image = requests.get(url=image_url,headers=headers,proxies=get_proxy())
        image_title = image_name + ".jpg"
#这里写的是名字在网络中传输的二进制的格式，所以ab、wb都可以
        with open(image_title,"wb") as f:
            f.write(response_image.content)
    except Exception as e:
        logging.error(e)


#####创建目录
def mk_dir(path):
#判断目录是否存在
#   os.path.exists: 判断是否存在路径
#   os.path.join:   连接路径和文件名
    is_exist = os.path.exists(os.path.join('D:\\python\\python_data\\Python爬虫篇\\4399_image',path))
    if not is_exist:
        #创建文件夹
        os.mkdir(os.path.join('D:\\python\\python_data\\Python爬虫篇\\4399_image',path))
        os.chdir(os.path.join('D:\\python\\python_data\\Python爬虫篇\\4399_image',path))
        return True
    else:
        os.chdir(os.path.join('D:\\python\\python_data\\Python爬虫篇\\4399_image',path))
        return True

#####下一页获取
def next_page(html):
    next_url = html.xpath('//a[@class="next"]/@href')
    #拼接real_next_url
    if next_url:
        next_url = 'https://www.4399dmw.com'+ next_url[0]
        return next_url
    else:
        return False

def spider_4399dhp(base_url):
#建立请求:
    #请求头
    headers={
        "User-Agent":ua.random,
        "Cookie":"UM_distinctid=1931e0dbabc7fc-0a2a39fab5dbc-26011951-144000-1931e0dbabdfcd; a_180_90_index=1; CNZZDATA3217746=cnzz_eid%3D1253278158-1731376037-https%253A%252F%252Fwww.baidu.com%252F%26ntime%3D1731394441; a_980_90_index=1; a_200_90_index=1",
        "Referer":"https://www.4399dmw.com/donghua/"
    }
    logging.info("开始爬取： "+base_url)
    #开始请求，是否可以请求成功
    response=requests.get(url=base_url,headers=headers,proxies=get_proxy())
    # print(response.status_code)

#开始使用xpath
    html_text = response.content.decode('utf-8')
    html = etree.HTML(html_text)
    #page
    page = html.xpath('//span[@class="cur"]/text()')
    mk_dir('第'+page[0]+'页')
    #图片xpath
    image = html.xpath('//div[@class="lst"]/a/img/@data-src')
    #名称xpath
    title = html.xpath('//div[@class="lst"]/a/div[@class="u-ct"]/p[@class="u-tt"]/text()')

#图片链接前面拼上http
    real_url = []
    for i in image:
        real_url.append('http:'+i)
    # print(real_url)

#保存图片：
    #zip函数：将两个列表组合起来
    for nurl,ntitle in zip(real_url,title):
        save_image(nurl,ntitle)
    if next_page(html=html):
        spider_4399dhp(next_page(html))
    else:
        logging.warning('已完成，无法找到下一页')


if __name__=="__main__":
    spider_4399dhp(base_url)
