#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2020-04-22 06:01:24
# Project: homePageContent
import sys
import os
import json
import re
from pyspider.libs.base_handler import *

IMG_PATTERN = re.compile('https?://(?P<URLSCHEMA>[^/]+)/(?P<FILEPATH>\S+)/(?P<FILENAME>[^/]+\.(jpg|png|jpeg))$', re.I)
DIR_PATH = '/opt/pyspider/data'
IMG_ROOT_PATH = DIR_PATH + "/images"


def list_dictionary(d, n_tab=-1, *args, **kwargs):
    if isinstance(d, list):
        for i in d:
            kwargs['source_list'] = list_dictionary(i, n_tab, source_list=kwargs['source_list'])
    elif isinstance(d, dict):
        n_tab += 1
        for key, value in d.items():
            tup1 = ()
            # print("{}key:{}".format("\t" * n_tab, key))
            if key == 'image' or key == 'PICTURE_ADDRESS':
                tup1 = tup1 + (key,)
            kwargs['source_list'] = list_dictionary(value, n_tab, source_list=kwargs['source_list'], tup1=tup1)
    else:
        # print("{}{}".format("\t" * n_tab, d))
        if len(kwargs['tup1']) > 0:
            fileprops = extract_image_name(d)
            if fileprops[0] != '':
                kwargs['tup1'] = kwargs['tup1'] + (d,) + fileprops
                kwargs['source_list'].append(kwargs['tup1'])
    return kwargs['source_list']


def load_json_file_dict(file_name):
    p = r"{}/{}".format(DIR_PATH, file_name)
    if os.path.exists(p):
        if sys.version_info.major > 2:
            f = open(p, 'r', encoding='utf-8')
        else:
            f = open(p, 'r')
        dict_data = json.load(f)
        # or
        # dict_data = json.loads(f.read())
        # print(dict_data)
        return dict_data


def extract_image_name(url):
    filename = ''
    filepath = ''
    if IMG_PATTERN.search(url):
        t = IMG_PATTERN.search(url).groupdict()
        # print(t["FILENAME"])
        filename = t["FILENAME"]
        filepath = t["FILEPATH"]
    return (filename, filepath)


class Handler(BaseHandler):
    crawl_config = {
    }

    def __init__(self):
        self.deal = Deal()

    @every(minutes=24 * 60)
    def on_start(self):
        # self.crawl('http://v.jspang.com:8088/baixing/wxmini/homePageContent', callback=self.index_page)
        self.crawl('https://www.baidu.com/', callback=self.index_page)

    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        source_list = []
        dict_data = load_json_file_dict('data.json')
        # print(dict_data)
        source_list = list_dictionary(dict_data, n_tab=-1, source_list=source_list)
        print(source_list)
        for each in source_list:
            dir_path = self.deal.mkDir(each[3])
            file_name = each[2]
            img_url = each[1]
            self.crawl(img_url, callback=self.save_img,
                       save={'img_url': img_url, 'dir_path': dir_path, 'file_name': file_name})

    # 保存图片
    def save_img(self, response):
        content = response.content
        dir_path = response.save['dir_path']
        file_name = response.save['file_name']
        img_url = response.save['img_url']
        file_path = dir_path + '/' + file_name
        self.deal.saveImg(content, file_path)
        return {"img_url": img_url, "file_name": file_name, "file_path": file_path}


class Deal:
    def __init__(self):
        self.path = IMG_ROOT_PATH
        if not self.path.endswith('/'):
            self.path = self.path + '/'
        if not os.path.exists(self.path):
            os.makedirs(self.path)

    def mkDir(self, path):
        path = path.strip()
        dir_path = self.path + path
        exists = os.path.exists(dir_path)
        if not exists:
            os.makedirs(dir_path)
            return dir_path
        else:
            return dir_path

    def saveImg(self, content, path):
        f = open(path, 'wb')
        f.write(content)
        f.close()

    def saveBrief(self, content, dir_path, name):
        file_name = dir_path + "/" + name + ".txt"
        f = open(file_name, "w+")
        f.write(content.encode('utf-8'))

    def getExtension(self, url):
        extension = url.split('.')[-1]
        return extension


