#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2020-04-22 06:01:24
# Project: bxHomePageBelowConten
import sys
import os
import json
import re
from pyspider.libs.base_handler import *

HTML_ORG_PATTERN = re.compile('https?://\S+\.(?:jpg|png|jpeg)', re.I)
IMG_PATTERN = re.compile('https?://(?P<URLSCHEMA>[^/]+)/(?P<FILEPATH>\S+)/(?P<FILENAME>[^/]+\.(jpg|png|jpeg))$', re.I)
DIR_PATH = '/opt/pyspider/data'
IMG_ROOT_PATH = DIR_PATH + "/images"
RE_IMAGE = re.compile("image\S*", re.I)


def list_dictionary(d, n_tab=-1, *args, **kwargs):
    if isinstance(d, list):
        for i in d:
            kwargs['source_list'] = list_dictionary(i, n_tab, source_list=kwargs['source_list'])
    elif isinstance(d, dict):
        n_tab += 1
        for key, value in d.items():
            imageProp = ()
            # print("{}key:{}".format("\t" * n_tab, key))
            if RE_IMAGE.search(key) or key == 'PICTURE_ADDRESS' or key == 'comPic':
                imageProp = imageProp + (key,)
            kwargs['source_list'] = list_dictionary(value, n_tab, source_list=kwargs['source_list'],
                                                    imageProp=imageProp)
    else:
        # print("{}{}".format("\t" * n_tab, d))
        if len(kwargs['imageProp']) > 0:
            imageProps = extract_image_prop(d)
            if imageProps[0] != '':
                kwargs['imageProp'] = kwargs['imageProp'] + (d,) + imageProps
                kwargs['source_list'].append(kwargs['imageProp'])
    return kwargs['source_list']


def list_html(html):
    source_list = []
    for img_url in HTML_ORG_PATTERN.findall(html):
        file_name, dir_path = extract_image_prop(img_url)
        source_list.append(('goodsDetail', img_url, file_name, dir_path))
    return source_list


def load_json_file_dict(file_name):
    p = r"{}/{}".format(DIR_PATH, file_name)
    if os.path.exists(p):
        if sys.version_info.major > 2:
            f = open(p, 'r', encoding='utf-8')
        else:
            f = open(p, 'r')
        dict_data = json.load(f)
        # or
        # dict_data = json.loads(f.read())
        # print(dict_data)
        return dict_data


def extract_image_prop(url):
    filename = ''
    filepath = ''
    if IMG_PATTERN.search(url):
        t = IMG_PATTERN.search(url).groupdict()
        # print(t["FILENAME"])
        filename = t["FILENAME"]
        filepath = t["FILEPATH"]
    return filename, filepath


class Handler(BaseHandler):
    crawl_config = {
    }

    def __init__(self):
        self.deal = Deal()
        self.headers = {"Content-Type": "application/x-www-form-urlencoded",
                        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, "
                                      "like Gecko) Chrome/81.0.4044.122 Safari/537.36",
                        "Accept": "*/*", "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive"}

    @every(minutes=24 * 60)
    def on_start(self):
        for page in range(1, 7):
            data = {"page": str(page)}
            url = "https://wxmini.baixingliangfan.cn/baixing/wxmini/homePageBelowConten?page={}".format(str(page))
            self.crawl(url, data=data, callback=self.index_page,
                       headers=self.headers, method="POST")

    # @config(age=30 * 60)
    def index_page(self, response):
        dict_data = response.json
        # print(dict_data)
        for goodsInfo in dict_data['data']:
            # print(goodsInfo["name"], goodsInfo["goodsId"])
            detailGoodsInput = {"goodId": goodsInfo["goodsId"]}
            url = "https://wxmini.baixingliangfan.cn/baixing/wxmini/getGoodDetailById?goodId={}"\
                .format(goodsInfo["goodsId"])
            self.crawl(url, data=detailGoodsInput,
                       callback=self.index_page2, headers=self.headers, method="POST",
                       save={'goodsInfo': goodsInfo})

    def index_page2(self, response):
        dict_data = response.json  # getGoodDetailById 的内容
        # print(dict_data)

        goodsInfo = response.save['goodsInfo']  # homePageBelowConten 的内容
        # print("goodsInfo:", goodsInfo)

        source_list1 = []
        source_list1 = list_dictionary(goodsInfo, n_tab=-1, source_list=source_list1)
        # print("FROM homePageBelowConten source_list:", source_list1)

        source_list2 = []
        source_list2 = list_dictionary(dict_data, n_tab=-1, source_list=source_list2)
        # print("FROM getGoodDetailById source_list:", source_list2)

        source_list3 = list_html(dict_data['data']['goodInfo']['goodsDetail'])
        # print("FROM getGoodDetailById goodsDetail:", source_list3)

        source_list = source_list1 + source_list2 + source_list3
        # print("index_page2 -> RESULT source_list:", source_list)
        for each in source_list:
            dir_path = self.deal.mkDir(each[3])
            file_name = each[2]
            img_url = each[1]
            self.crawl(img_url, callback=self.save_img,
                       save={'img_url': img_url, 'dir_path': dir_path, 'file_name': file_name,
                             'getGoodDetailById': dict_data, 'goodsInfo': goodsInfo})

    # 保存图片
    @catch_status_code_error
    def save_img(self, response):
        if response.status_code != 200:
            # print("save_img ERROR:", response.status_code)
            return {"error": response.status_code}
        else:
            content = response.content
            dir_path = response.save['dir_path']
            file_name = response.save['file_name']
            # print("save_img SUCCESS:", dir_path, file_name)
            img_url = response.save['img_url']
            file_path = dir_path + '/' + file_name
            self.deal.saveImg(content, file_path)
            return {'img_url': img_url,
                    'file_name': file_name,
                    'file_path': file_path,
                    'getGoodDetailById': response.save['getGoodDetailById'],
                    'goodsInfo': response.save['goodsInfo']
                    }


class Deal:
    def __init__(self):
        self.path = IMG_ROOT_PATH
        if not self.path.endswith('/'):
            self.path = self.path + '/'
        if not os.path.exists(self.path):
            os.makedirs(self.path)

    def mkDir(self, path):
        path = path.strip()
        dir_path = self.path + path
        exists = os.path.exists(dir_path)
        if not exists:
            os.makedirs(dir_path)
            return dir_path
        else:
            return dir_path

    def saveImg(self, content, path):
        f = open(path, 'wb')
        f.write(content)
        f.close()

    def saveBrief(self, content, dir_path, name):
        file_name = dir_path + "/" + name + ".txt"
        f = open(file_name, "w+")
        f.write(content.encode('utf-8'))

    def getExtension(self, url):
        extension = url.split('.')[-1]
        return extension
