#
# from scrapy import cmdline
#
#
# if __name__ == '__main__':
#     cmdline.execute("scrapy crawl FutianTeacherSpider".split())
import os

from apscheduler.schedulers.blocking import BlockingScheduler
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings

from goodDesignImage2018Spider.job.downloadGoodDesignImgJob import downloadGoodDesignImgJob
import requests as req
import json

# 启动爬虫
from goodDesignImage2018Spider.util.file_operation import FileOperation


def start_crawl(b):
    if b:
        process = CrawlerProcess(get_project_settings())
        for spider_name in process.spider_loader.list():
            process.crawl(spider_name)
        process.start()


# 启动定时器下载爬的图片
def start_download_job(b):
    if not b:
        return
    scheduler = BlockingScheduler()
    # 向调度器中添加定时任务
    scheduler.add_job(call_download_method, 'cron', second='*/59')
    # 启动定时任务调度器工作
    scheduler.start()


def call_download_method():
    job = downloadGoodDesignImgJob()
    job.download_record()


# if奖接口写到JSON文件
def if_design_img_write_to_json(b):
    if not b:
        return
    file_path = "F:/mycode/py/goodDesignImage2018Spider/goodDesignImage2018Spider/file/2021/"
    suffix = '.json'
    url = "https://ifworlddesignguide.com/api/v2/articles/collections/394?cursor={0}&lang=en&count=200&orderby=date&filter=%7B%22filters%22:[]%7D"
    cursor = 0
    i = 0
    while True:
        new_url = url.replace("{0}", str(cursor))
        response = req.get(new_url)
        ret_json = response.json()
        ret_data = ret_json['data']
        if not ret_data:
            print("沒有了")
            break

        json_file_path = file_path + str(i) + suffix
        with open(json_file_path, 'wb') as f:
            f.write(response.content)
            f.close()

        i = i + 1
        cursor = cursor + 200


base_url = "F:/goodDesign/IF/"
if __name__ == '__main__':
    if_design_img_write_to_json(False)

    file_list = FileOperation.walk_dir("F:/mycode/py/goodDesignImage2018Spider/goodDesignImage2018Spider/file")
    print(file_list)
    for file in file_list:
        with open(file) as f:
            file_json = json.load(f)
            datas = file_json["data"]
            for data in datas:
                # 名称
                headline = data['headline']
                headline = FileOperation.validateTitle(headline)
                # 获奖年份
                year_str = data['award']['name']

                # 获奖素材
                medias = data['media']
                for media in medias:
                    if 'image' != media['type']:
                        continue
                        # 创建父级文件夹
                    parent_path = base_url + year_str + "/" + headline
                    if not os.path.exists(parent_path):
                        os.makedirs(parent_path)
                    #  下载
                    down_url = media['href']
                    down_url = down_url.replace("conv_", "")
                    # 替换为高清的
                    down_url_list = list(down_url.split("/"))
                    down_url_list.insert(-1, "oex_large")
                    down_url = "/".join(down_url_list)
                    file_name = down_url_list[-1]
                    if os.path.exists(parent_path + "/" + file_name):
                        print("已经下载过啦")
                        continue
                    try:
                        print("开始下载--", down_url)
                        response = req.get(down_url)
                        with open(parent_path + "/" + file_name, 'wb') as f2:
                            f2.write(response.content)
                    except BaseException:
                        print("异常嘛，先跳过")
                        continue
