import requests
import os
import re

headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ''Chrome/51.0.2704.63 Safari/537.36'}

def dowmloadPicture(URLs, size, path):
    num = 0
    for url in URLs:
        try:
            if url is not None:
                pic = requests.get(url, headers=headers, timeout=7)
            else:
                continue
        except BaseException:
            print('False, cannot download this picture')
            continue
        else:
            print(pic.status_code)
            string = path + r'/' + str(num) + '.jpg'
            fp = open(string, 'wb')
            fp.write(pic.content)
            fp.close()
            num += 1
            if num >= size:
                return
    return


if __name__ == "__main__":

    dic = 'dataset'
    search = input('Search: ')
    dataset_size = int(input('Dataset size: '))

    if not os.path.exists(dic):
        os.mkdir(dic)

    search_url = 'https://feederwatch.org/?s=' + search
    html = requests.get(search_url, headers=headers).text
    pic_url = []
    url = re.findall(r'https?://[\w\./-]*', html)
    for u in url:
        if u.endswith('.jpg'):
            pic_url += [u]
    print('Total image: {value}'.format(value=len(pic_url)))
    # dowmloadPicture(pic_url, dataset_size, dic)