import datetime
import re
import time
import json
import requests
import os

'''
@作者：gussu-毛虫
@博客：https://blog.csdn.net/weixin_46897073?spm=1011.2124.3001.5343
@声明：如引用请表明出处
@ 已加装断点续传 From admin | q.w.e.a.s@icloud.com
@注意：在自己电脑上运行时，一定要更改headers中的Cooker数据为自己的数据，否则会报错
'''

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/79.0.3945.130 Safari/537.36',
    'Cookie': 'BIDUPSID=690C7D70E77229FD1EB4F07CB773DC10; PSTM=1592993186; '
              'BAIDUID=EBE2C1DA9A2C9903496BF3048F87A868:FG=1; '
              'H_WISE_SIDS'
              '=219946_234020_219942_110085_246767_249015_249892_252810_253463_203518_244956_253516_254340_254733_250606_253213_255290_251133_253569_252129_255957_255890_251461_253990_256315_256320_256350_229154_255179_245042; BDUSS=daN043VWdJaUt0aUoxY0o2alJIV0h0R2wxTFVIQXphZ1RCSnZiMjZoMjM1OFJrRVFBQUFBJCQAAAAAAQAAAAEAAAAcaEJ4us699cfnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALdanWS3Wp1kSm; BDUSS_BFESS=daN043VWdJaUt0aUoxY0o2alJIV0h0R2wxTFVIQXphZ1RCSnZiMjZoMjM1OFJrRVFBQUFBJCQAAAAAAQAAAAEAAAAcaEJ4us699cfnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALdanWS3Wp1kSm; H_PS_PSSID=36542_39226_39223_39194_39037_39198_39240_39208_39233_26350_39138_39137_39101; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BAIDUID_BFESS=EBE2C1DA9A2C9903496BF3048F87A868:FG=1; delPer=0; PSINO=7; BDRCVFR[feWj1Vr5u3D]=mk3SLVN4HKm; BA_HECTOR=840kak0l012h2h002l05a5091ielmad1o; BDRCVFR[k2U9xfnuVt6]=mk3SLVN4HKm; BDRCVFR[X_XKQks0S63]=mk3SLVN4HKm; userFrom=www.baidu.com; ab_sr=1.0.1_N2RmYTZmYmFmMWM1YjFjMThlMDEzMjBhZmQ2OGM0ZjJlMTVjOGRlMGZjZjlmMDMyNTg5MDk0YzBiMDBjMWI1NTJjZGYyY2ZjZGMwNjAzYmMzYTcyMmE1Y2IyYjQ5YzY4ODA4YjE1ZjQxMjc3YmMxOTIzZGE3MTZjMzQ2YjYxYjQ4YjJjZmJkNWUyMjkxZmJmZTc0YjY4YmMxMTEzNDczNw==; BDRCVFR[dG2JNJb_ajR]=mk3SLVN4HKm'}
num = 0
url = []


def one(keyword, page):
    global url
    for i in range(1, page + 1):
        url1 = 'https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word={}&pn={}'.format(keyword, 1000)# 一千页
        data = requests.get(url1, headers=headers)
        pat = '"objURL":"https://(.*?)"'
        link = re.compile(pat).findall(data.text)
        print(f"* {datetime.datetime.now()}-- [GIT] * https://" + link[0])
        url = link + url
    print(f"* {datetime.datetime.now()}-- [ALL]", len(url))
    # yield url


def tow(keyword, page):
    global url, num
    urls = []
    if keyword not in os.listdir(
            fr'C:\Users\Maply\.mcode\virtualFS\projects\default\学习\01-爬虫\IMG'):  # 如果桌面没有此文件夹，创建对应的文件夹
        try:
            os.makedirs(fr"C:\Users\Maply\.mcode\virtualFS\projects\default\学习\01-爬虫\IMG\{keyword}")
        except FileNotFoundError:
            pass
    p = 0
    try:
        p = open(fr"IMG/{keyword}/log.json", "r")
        p = json.loads(p.read())["len"]
        print(f"* {datetime.datetime.now()}-- [P] {p}")
    except:
        pass
    len_url = len(url)
    num = int(p)
    num_ok = 0
    for i in range(int(p), int(len_url)):
        image_url = 'https://' + url[i]  # 补全链接
        num = num + 1
        try:
            print(f"* {datetime.datetime.now()}-- [GIT] " + image_url)
            data = requests.get(image_url, headers=headers)
            urls.append(image_url)
            with open(r'C:\Users\Maply\.mcode\virtualFS\projects\default\学习\01-爬虫\IMG\\' + keyword + '\\' + str(
                    num) + '.jpg', 'wb') as file:
                file.write(data.content)
            print(f'* {datetime.datetime.now()}-- [NUM] <{num - p}${num}>')
        except:
            print(f"* {datetime.datetime.now()}-- [ERROR] " + image_url)

        # print(f"* {datetime.datetime.now()}-- [SLEEP] <1>")
        # time.sleep(1)
        print('*' * 600)
        if num >= int(page) + int(p):
            break
    return urls, num


if __name__ == "__main__":
    keyword = input(f'* {datetime.datetime.now()}-- [INPUT] 输入要下载的内容：')
    n = input(f'* {datetime.datetime.now()}-- [INPUT] 下载个数：')
    one(keyword, int(n))
    urls, nums = tow(keyword, int(n))
    print(f"* {datetime.datetime.now()}-- [GIT] 200 OK")
    log = {}
    p = 0
    try:
        log = open(fr"IMG/{keyword}/log.json", "r").read()
        p = int(json.loads(log)["len"])
        print(f"* {datetime.datetime.now()}-- [P] {p}")
        log = json.loads(log)
    except:
        pass
    for i in urls:
        p += 1
        log[p] = i
    log["len"] = n
    log = json.dumps(log)  # log 内只储存成功url
    with open(r'C:\Users\Maply\.mcode\virtualFS\projects\default\学习\01-爬虫\IMG\\' + keyword + "\log.json",
              "w") as this:
        this.write(log)
