from os import makedirs

import requests
from bs4 import BeautifulSoup
from contextlib import closing

from os.path import exists

global_index = 1

def download(photo_url, dir_name, filename):
    jpg_filename = dir_name + '/' + filename
    if not exists(jpg_filename):
        with closing(requests.get(url=photo_url, stream=True, timeout=90)) as r:
            with open(jpg_filename, 'ab+') as f:
                for chunk in r.iter_content(chunk_size=1024):
                    if chunk:
                        f.write(chunk)
                        f.flush()


def start_down_pic_by_cc(key, url, detail_name):
    if key is not "":
        dirName = key
    else:
        dirName = 'giphy'
    if not exists(dirName):
        makedirs(dirName)

    print('下载: {}'.format(detail_name))
    download(url, dirName, detail_name)


def just_url(key, page):
    global global_index
    sort_type = ""
    # sort_type = "recent"
    headers = {
        "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
        "accept": "*/*",
        "origin": "https://giphy.com",
        "pragma": "no-cache",
        "referer": "https://giphy.com/",
        "sec-fetch-dest": "empty",
        "sec-fetch-mode": "cors",
        "sec-fetch-site": "same-site",
        "cache-control": "no-cache",
        'content-type': 'application/json',
        'accept-encoding': 'application/json',
        "accept-language": "en-US,en;q=0.9"

    }
    if key is not "":
        api_url = "https://api.giphy.com/v1/gifs/search?offset=" \
                  + str(page) + "&type=gifs&sort=" \
                  + sort_type + "&q=" \
                  + key + "&api_key=Gc7131jiJuvI7IdN0HZ1D7nh0ow5BU6g&pingback_id=5ff2aa5707494db7"
    else:
        api_url = "https://api.giphy.com/v1/gifs/trending?offset=" \
                  + str(page) + "&api_key=Gc7131jiJuvI7IdN0HZ1D7nh0ow5BU6g&pingback_id=5fed29bd0a63d693"
    r = requests.get(api_url, headers=headers)
    print(r.text)
    json_data = r.json()
    print('================= 第' + str(page) + '页==========')
    data = json_data['data']
    pagination = json_data['pagination']
    count = pagination['count']
    offset = pagination['offset']
    if data:
        try:
            for item in data:
                file_name = "(" + str(global_index) + ")" + item['title'] + ".gif"
                global_index = global_index + 1
                images = item['images']
                if images:
                    downsized = images['downsized']
                    if downsized:
                        url = downsized['url']
                        start_down_pic_by_cc(key, url, file_name)
        except requests.exceptions.RequestException:
            pass
        except OSError:
            pass

    just_url(key, count + offset)


if __name__ == '__main__':
    just_url("chow%20chow", 0)
