import scrapy
import lzstring
import re

# from designdb_units.imgePathUnit import GetRealImagePathUrl
from ..items import DesigndbSpiderItem


def GetRealImagePathUrl(url):
    try:
        st2 = url
        pattern = "KEY.*?(?=(&|$))"
        result = re.search(pattern, st2)
        Key1 = result.group().split("-")[0].split("KEY=")[1]
        Key122 = re.sub('[a-zA-Z]', "", Key1)
        Key1221 = re.sub(pattern, "KEY=" + str(int(Key122)), st2)
        return Key1221
    except:
        print("替换报错：：", url)


class WipoSpider(scrapy.Spider):
    x = lzstring.LZString()
    name = "wipoSpider"
    allowed_domains = ["designdb.wipo.int", "www.wipo.int", "www3.wipo.int", "branddb.wipo.int", "www3.wipo.int"]
    page_index = 1  # 第几页
    page_index = 1  # 第几页
    total = 0  # 好多数据
    page_size = 50  # 每页多少
    total_page = 0  # 好多页
    KeyWord = 'sunglasses'

    DataQiJson = '''{"p":{"search":{"sq":[{"te":"%s","fi":"PROD"}]},"rows":%d,"start":%d},"type":"design","la":"en","qi":"1-/n+pivZ3SVr+IwKq0ci5cqvjQEgk6rGxDuWMCVxrf7c="}'''

    # start_urls = ['https://designdb.wipo.int/designdb/jsp/select.jsp']
    # post_url = 'https://designdb.wipo.int/designdb/en'
    post_url = 'https://designdb.wipo.int/designdb/jsp/select.jsp'
    def __init__(self, keyWord=None, *args, **kwargs):
        super(WipoSpider, self).__init__(*args, **kwargs)
        self.query = keyWord  # 传入query

    def start_requests(self):
        print('下载器开始请求网络.....')
        post_url = 'https://designdb.wipo.int/designdb/jsp/select.jsp'
        # 创建表单数据
        # 请求参数动态处理
        QzData = self.x.compressToBase64(
            (self.DataQiJson % (self.KeyWord, self.page_size, self.page_size * self.page_index)))
        print(self.x.decompresFromBase64(QzData))
        data = {
            # 查询参数从数据库查询
            'qz': QzData
        }
        print(data)
        # 现在可以在这个方法中截获调度器的调度,把调度器创建get请求的操作变成创建post请求对象
        # scrapy 的下载器常用的有两种,Request对象和FormRequest对象,分别用于处理get和post请求

        # 发起post请求
        yield scrapy.FormRequest(url=post_url, formdata=data, callback=self.parse_post)

    def getDataInfoPage(self, response):
        item = response.meta['item']
        item['images'] = response.xpath(
            "//img[not(contains(@src, 'data:image')) and not(contains(@src, '.gif'))]/@src").getall()
        # print("\nitem['images']:::::",item['images'])
        for index in range(len(item['images'])):
            imgitem = item['images'][index]
            # 判断当前string 是否包含../jsp/
            if imgitem.find('data:image') > -1:
                continue
            if imgitem.find('../jsp/') > -1:
                # 正则表达式匹配 KEY=  & 里面的内容
                imgitem = GetRealImagePathUrl(imgitem.replace("../jsp/", "https://designdb.wipo.int/designdb/jsp/"))
            # #  判断当前string 是否包含.high
            if imgitem.find('.high') > -1:
                imgitem = imgitem.replace(".high", "-th")

            item['images'][index] = imgitem

        # print("每个Item1", item['images'])
        yield item

    def parse_post(self, response):
        print(response.json())
        data = response.json()
        result = data['response']['docs']
        print(data['response']['docs'])
        # # 解析数据
        self.total = data['response']['numFound']
        if self.page_index == 1:
            self.total_page = self.total / self.page_size if self.total % self.page_size == 0 else self.total / self.page_size + 1

        for DataItem in result:
            # print("哪里报错了", DataItem)
            item = DesigndbSpiderItem()
            item['ID'] = DataItem['ID']
            item['name'] = DataItem['SOURCE'] + " " + DataItem['ID']
            item['source'] = DataItem['SOURCE']
            if "HOL" in DataItem:
                item['holder'] = DataItem['HOL'][0]
            else:
                item['holder'] = ''
            item['regtime'] = DataItem['RD']
            if 'IMG' in DataItem:
                item['imgcout'] = len(DataItem['IMG'])
            else:
                item['imgcout'] = 0
                item['images'] = []
            item['code'] = DataItem['ID']
            item['type'] = self.KeyWord
            item['image_paths'] = []
            # 二级页面请求 返回Model
            # 二级页面url
            getDateInfoUrl = '''https://designdb.wipo.int/designdb/jsp/getData.jsp?qi=%s&SOURCE=%s&LANG=en&XML=%s&KEY=%s'''
            yield scrapy.Request(
                url=(getDateInfoUrl % (
                    '1-yMz6VFsC7YkmYonCfWW4heHdfWx/AZUGs3iQqnSsqbg=',
                    DataItem['SOURCE'],
                    DataItem['DOC'],
                    item['ID'].split(".")[1])),
                callback=self.getDataInfoPage, meta={'item': item})
            # yield item
        # 请求下一页

        if self.page_index <= self.total_page:  # 发起请求的条件
            self.page_index += 1
            QzData = self.x.compressToBase64(
                (self.DataQiJson % ('sunglasses', self.page_size, self.page_size * self.page_index)))
            print(self.x.decompresFromBase64(QzData))
            data = {
                # 查询参数从数据库查询
                'qz': QzData
            }
            print("翻页：", self.page_index, data)
            # 手动发起请求,调用parse再去解析
            yield scrapy.FormRequest(url=self.post_url, formdata=data, callback=self.parse_post)
