import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin

def download_resources(url, save_dir):
    # 如果保存目录不存在，则创建
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    # 发送 HTTP 请求获取网页内容
    response = requests.get(url)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    # 下载图片
    for img in soup.find_all('img', attrs={'src': lambda x: x.endswith('.gif')}):
        img_url = img.get('src')
        if img_url:
            #print(img_url)
            idx = img_url.rfind('/')
            #print(idx)
            print(img_url[1:idx])
            dir_tmp = img_url[1:idx]

            if not os.path.exists(dir_tmp):
                os.makedirs(dir_tmp)

            if img_url:
                full_img_url = urljoin(url, img_url)
                img_name = os.path.basename(full_img_url)
                img_response = requests.get(full_img_url)
                with open(os.path.join(dir_tmp, img_name), 'wb') as f:
                    f.write(img_response.content)

    # 下载 CSS 文件
   # for link in soup.find_all('link', rel='stylesheet'):
   #     css_url = link.get('href')
   #     if css_url:
   #         full_css_url = urljoin(url, css_url)
   #         css_name = os.path.basename(full_css_url)
   #         css_response = requests.get(full_css_url)
   #         with open(os.path.join(save_dir, css_name), 'wb') as f:
   #             f.write(css_response.content)

def download_one(url):

    end_idx = url.rfind('/');
    start_idx = url.rfind("com/")
    print(start_idx, end_idx)
    save_dir = url[start_idx + 4:end_idx]
    print(save_dir)
    # 如果保存目录不存在，则创建
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    response = requests.get(url)
    response.encoding = 'utf-8'

    file_name = os.path.basename(url)
    print(file_name)
    with open(os.path.join(save_dir, file_name), 'wb') as f:
        f.write(response.content)

def download(url):

    #end_idx = url.rfind('/');
    #start_idx = url.rfind("com/")
    #print(start_idx, end_idx)
    #save_dir = url[start_idx + 4:end_idx]
    #print(save_dir)
    ## 如果保存目录不存在，则创建
    #if not os.path.exists(save_dir):
    #    os.makedirs(save_dir)

    response = requests.get(url)
    response.encoding = 'utf-8'

    file_name = os.path.basename(url)
    print(file_name)
    with open(file_name, 'xb') as f:
        f.write(response.content)
# 使用示例
url1 = 'https://www.mualloy.com/skin/images/tel.png'
url2 = 'https://www.mualloy.com/skin/images/cpbg.jpg'
url3 = 'https://www.mualloy.com/skin/images/tsbg.jpg'
url4 = 'https://www.mualloy.com/skin/images/ts01.gif'
url5 = 'https://www.mualloy.com/skin/images/ts02.gif'
url6 = 'https://www.mualloy.com/skin/images/ts03.gif'
url7 = 'https://www.mualloy.com/skin/images/ts04.gif'
url8 = 'https://www.mualloy.com/skin/images/ts05.gif'
url9 = 'https://www.mualloy.com/skin/images/ts06.gif'
url10 = 'https://www.mualloy.com/skin/images/xgg.jpg'
url11 = 'https://www.mualloy.com/skin/images/ggtel.png'
url12 = 'https://www.mualloy.com/skin/images/ysico1.png'
url13 = 'https://www.mualloy.com/skin/images/ysico2.png'
url14 = 'https://www.mualloy.com/skin/images/ysico3.png'
url15 = 'https://www.mualloy.com/skin/images/ysico4.png'
url16 = 'https://www.mualloy.com/skin/images/ysico5.png'
url17 = 'https://www.mualloy.com/skin/images/ysico6.png'
url18 = 'https://www.mualloy.com/skin/images/01.png'
url19 = 'https://www.mualloy.com/skin/images/02.png'
url20 = 'https://www.mualloy.com/skin/images/03.png'
url21 = 'https://www.mualloy.com/skin/images/04.png'
url22 = 'https://www.mualloy.com/skin/images/05.png'
url23 = 'https://www.mualloy.com/skin/images/06.png'
url24 = 'https://www.mualloy.com/skin/images/jzbg.jpg'
url25 = 'https://www.mualloy.com/skin/images/news_left.gif'
url26 = 'https://www.mualloy.com/skin/images/news_right.gif'
url27 = 'https://www.mualloy.com/skin/images/newsico.gif'
url28 = 'https://www.mualloy.com/skin/images/aboutbg.jpg'
url29 = 'https://www.mualloy.com/skin/images/fico1.png'
url30 = 'https://www.mualloy.com/skin/images/fico2.png'
url31 = 'https://www.mualloy.com/skin/images/fico3.png'
url32 = 'https://www.mualloy.com/skin/images/fico4.png'
url33 = 'https://www.mualloy.com/skin/images/ft1.png'
url34 = 'https://www.mualloy.com/skin/images/ft2.png'
url35 = 'https://www.mualloy.com/skin/images/ft3.png'
url36 = 'https://www.mualloy.com/skin/images/ysico1cur.png'
url37 = 'https://www.mualloy.com/skin/images/ft3.png'
url38 = 'https://www.mualloy.com//favicon.ico'

urls=[]
urls.extend([url31, url32, url33, url34, url35, url36, url37])
#download_resources(url, save_dir)
download(url38)
#for i in range(len(urls)):
#    print(urls[i])
#    download_one(urls[i])
