


# pip3 install bs4
# pip3 install lxml
# pip3 install requests

from bs4 import BeautifulSoup
import requests
from threading import *
import re
import time


wallpeper_save_dir = 'wallpeper_download'

def download_wallpeper(url):
    try:


        print(f'start download: {url}')

        response_img = requests.get(url, timeout=3).content

        name_save = re.findall('_[1-9].*', url)[0]

        open(f"{wallpeper_save_dir}/Wallpeper{name_save}", 'wb').write(response_img)

        print(f'download: {url}')

    except requests.exceptions.ConnectionError:
        pass


def search_get_website(name_wallpeper):
    name_wallpeper = str(name_wallpeper).lower().replace(' ', '+')

    response = requests.get(f'https://wallpaper-mania.com/?s={name_wallpeper}').text
    soup = BeautifulSoup(response, 'lxml')

    find_all_img_a = soup.find_all('a', class_='post-thumbnail')

    for img_a in find_all_img_a:
        find_img_src = img_a.find('img').get('src')

        try:
            find_bad = re.findall('-[1-9].*\.', find_img_src)[0]
        except:
            pass

        clear_url_image = str(find_img_src).replace(str(find_bad), '.')

        #download_wallpeper(clear_url_image)
        t1 = Thread(target=download_wallpeper, args = (clear_url_image,))
        time.sleep(0.5)
        t1.start()


if __name__ == "__main__":
    search_get_website(str(input("Name Wallpeper's download: ")))

