#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2020-04-22 06:01:24
# Project: compressedPicImages
# 打靶专用

import os
import re

import time
from datetime import datetime
from datetime import timedelta

from pyspider.libs.base_handler import *

IMG_PATTERN = re.compile('https?://(?P<URLSCHEMA>[^/]+)/(?P<FILEPATH>\S+)/(?P<FILENAME>[^/]+\.(jpg|png|jpeg))$', re.I)
DIR_PATH = '/opt/pyspider/data'
IMG_ROOT_PATH = DIR_PATH + "/compressedPic/images"


def extract_image_name(url):
    filename = ''
    filepath = ''
    if IMG_PATTERN.search(url):
        t = IMG_PATTERN.search(url).groupdict()
        # print(t["FILENAME"])
        filename = t["FILENAME"]
        filepath = t["FILEPATH"]
    return (filename, filepath)


def add_second(date_str,add_count=1):
    date_list = time.strptime(date_str, "%Y%m%d%H%M%S")
    y, m, d, H, M, S = date_list[:6]
    delta = timedelta(seconds=add_count)
    date_result = datetime(y, m, d, H, M, S) + delta
    date_result = date_result.strftime("%Y%m%d%H%M%S")
    return date_result


class Handler(BaseHandler):
    crawl_config = {
    }

    def __init__(self):
        self.deal = Deal()

    @every(minutes=24 * 60)
    def on_start(self):
        # headers = {}
        # headers["Content-Type"] = "application/x-www-form-urlencoded"
        # headers["User-Agent"] = "PostmanRuntime/7.24.1"
        # headers["Accept"] = "*/*"
        # headers["Accept-Encoding"] = "gzip, deflate, br"
        # headers["Connection"] = "keep-alive"
        # data = {}
        # data["page"] = "1"
        # self.crawl('http://v.jspang.com:8088/baixing/wxmini/homePageBelowConten', data=data, callback=self.index_page,
        #            headers=headers, method="POST")
        self.crawl('https://www.baidu.com/', callback=self.index_page)

    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        img_root = "https://images.baixingliangfan.cn/compressedPic/"
        st = "20190122093838"
        et = "20190122103038"
        while st < et:
            for i in range(0, 10000):
                dir_path = self.deal.mkDir("")
                file_name = st + "_" + str(i) + ".jpg"
                img_url = img_root + file_name
                self.crawl(img_url, callback=self.save_img,
                           save={'img_url': img_url, 'dir_path': dir_path, 'file_name': file_name})
            st = add_second(st)

    # 保存图片
    @catch_status_code_error
    def save_img(self, response):
        if response.status_code != 200:
            return {"error": response.status_code}
        else:
            content = response.content
            dir_path = response.save['dir_path']
            file_name = response.save['file_name']
            img_url = response.save['img_url']
            file_path = dir_path + '/' + file_name
            self.deal.saveImg(content, file_path)
            return {"img_url": img_url, "file_name": file_name, "file_path": file_path}


class Deal:
    def __init__(self):
        self.path = IMG_ROOT_PATH
        if not self.path.endswith('/'):
            self.path = self.path + '/'
        if not os.path.exists(self.path):
            os.makedirs(self.path)

    def mkDir(self, path):
        path = path.strip()
        dir_path = self.path + path
        exists = os.path.exists(dir_path)
        if not exists:
            os.makedirs(dir_path)
            return dir_path
        else:
            return dir_path

    def saveImg(self, content, path):
        f = open(path, 'wb')
        f.write(content)
        f.close()

    def saveBrief(self, content, dir_path, name):
        file_name = dir_path + "/" + name + ".txt"
        f = open(file_name, "w+")
        f.write(content.encode('utf-8'))

    def getExtension(self, url):
        extension = url.split('.')[-1]
        return extension


