# **********************OBJ INFO**************************
# Author:YuKalix
# @Time    : 2019-9-19 16:15
# @Site    : 52ziyu.cn
# @File    : test.py
# @software: PyCharm
# *********************************************************

import requests
from urllib import parse

from time import sleep
from tqdm import tqdm
import json
import time
import threading


class SpiderImages:
    def __init__(self, key):
        self.key = key
        self.url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={0}' \
                   '&cl=&lm=&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&hd=&latest=&copyright=&word={1}' \
                   '&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=&fr=&expermode=&force=&pn='.format(key, key)
        print(self.url)

    def request(self, url):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0',
        }
        response = requests.get(url, headers=headers)
        return response

    def get_all_images_url_list_and_save(self):
        pn = 0
        all_images_url_list = []
        response = self.request(self.url).text
        content = json.loads(response)

        while True:
            pn += 30
            url = self.url + str(pn)
            response = self.request(url).text
            # try用于处理返回数据处理异常
            try:
                content = json.loads(response)
                # 用于处理读取完成
                if content['listNum'] == 0:
                    print('读取文件结束')
                    break
            except:
                print('json文件读取错误')
                print('错误链接:', url)

            data = content['data']

            for item in data:
                if not item.__contains__("thumbURL"):
                    continue
                item_url = item['thumbURL']
                all_images_url_list.append(item_url)
                # 控制动态上涨输出
                time.sleep(0.001)
                print('\r目前已经获取{}条数据'.format(len(all_images_url_list)), end='')

        print('搜索到的数据约有{}张'.format(len(all_images_url_list)))
        # 将今天读取的文件列表写入文件
        file_name = '../../data_set/unsigned_set/{}.txt'.format(parse.unquote(self.key))
        with open(file_name, 'w+', encoding='utf-8') as f:
            print('已经写入', file_name)
            for item in all_images_url_list:
                f.write(item)
                f.writelines('\n')

    def read_and_download(self):
        download_error_nums = 0
        n = int(input('请输入需要下载多少张图片:'))
        all_images_url_list = []
        # 获取今天的文件
        f = open('../../data_set/unsigned_set/{}.txt'.format((parse.unquote(self.key))), 'r')
        for line in f:
            all_images_url_list.append(line.replace('\n', ''))
        f.close()
        for item in tqdm(all_images_url_list[:n]):
            t = threading.Thread(target=self.download, args=(item,))
            t.start()
            # 开启多线程下载 注释t.join()即可
            t.join()

    def download(self, item):
        sleep(0.1)
        file_name = '../../data_set/unsigned_set/' + item.split('/')[-1:][0][-20:]
        try:
            content = requests.get(item).content
            with open(file_name, 'wb')as f:
                f.write(content)
        except:
            print('下载失败:', item)


if __name__ == '__main__':
    # key = input("请输入搜索关键词: ")
    # key = parse.quote(key)
    # S = SpiderImages(key)
    # S.get_all_images_url_list_and_save()
    # S.read_and_download()

    url = 'https://ss1.bdstatic.com/70cFvXSh_Q1YnxGkpoWK1HF6hhy/it/u=2640852555,3558204518&fm=26&gp=0.jpg'
    # file_name = '../../../../data_set/unsigned_set/' + url.split('/')[-1:][0]
    file_name = '../../../../data_set/unsigned_set/' + url.split('/')[-1:][0]
    print( url.split('/')[-1:][0][-25:])
    import os
    os.remove('')
    # content = requests.get(url).content
    # print(file_name)
    # with open(file_name, 'wb')as f:
    #     f.write(content)
