import json
import os
import re

from lxml import etree

from PageObjectRequests import base_url
from settings import materials_path
from util import http_utils
from util.pdfSplit.compress_pdf import CompressPdf
from util.pdfSplit.pdf_split import get_file_oss
from util.selenium.webdriver.common.by import By

from base.BaseMethod import BM
from util.DataQuery.FormDataDecorator import FormDataDecorator

# # # # 填写   知识产权汇总表
from util.g import g
from util.sys_dict import iprType, iprGetway


class WriteIPForm():
    form_btn_loc = (By.LINK_TEXT, '知识产权汇总表')

    content_iframe_loc = (By.CSS_SELECTOR, "#innocomFrame")

    delete = False

    # 写代码时方便提示, 基本方法完成后删除
    # driver = driver
    def __init__(self, driver=None):
        self.driver = driver
        self.size = 2
        self.flag = False
        setattr(g, self.class_name, list())

    @property
    def class_name(self):
        return self.__class__.__name__

    def handler_version(self, s):
        p = ".*?([vV]+[\d+/.]*\d+).*?"
        res = re.findall(p, s)
        for i in res:
            s = s.replace(i, "")
        return s

    @FormDataDecorator
    def main(self, ipr_id='', ipr_type='', auth_num='', ipr_name='', ipr_getway='', auth_date='', is_nip=False,
             oss_path=None):
        try:
            class_name = self.class_name
            li_list = getattr(g, class_name)
            # 获取 知识产权页面 url
            if not self.flag:
                ele = BM.get_ele(driver=self.driver, ele_loc=self.form_btn_loc, time=6)
                detail_url = ele.get_attribute("href")
                BM.get_cookie(driver=self.driver)
                if not g.all_optimized:
                    BM.switch_to_frame(driver=self.driver, ele_loc=self.content_iframe_loc)
                detail_url = detail_url if detail_url.startswith("https://") else \
                    base_url + (detail_url if detail_url.startswith("/") else "/" + detail_url)
            if not self.flag:
                # 获取已上传的 ip 数据
                res = None
                text = None
                self.flag = True
                while True:
                    if not res:
                        res = http_utils.send_get(detail_url, driver=self.driver)
                    text = res.get("text")
                    # 替换掉国网随机插入的字符串
                    random_insert_chars_p = ".*?([\r\n].[0-9a-zA-Z]{4,}[\r\n].).*?"
                    res = re.findall(random_insert_chars_p, text, re.S)
                    for i in res:
                        text = text.replace(i, "")
                    tree = etree.HTML(text)
                    # 获取 已上传记录
                    tr_list = tree.xpath(
                        '//form[@id="DataEprIntellectualPropert_list"]//div[@class="tableRegionDivBody"]//tbody/tr')
                    for tr in tr_list:
                        ip_id_str = BM.get_first(tr.xpath("./td[2]/text()"))
                        ip_name_str = BM.get_first(tr.xpath("./td[3]/text()"))
                        sign_str = "{}:{}".format(ip_id_str, ip_name_str)
                        li_list.append(sign_str)

                    # 匹配下一页 url
                    next_list = tree.xpath(
                        "//form[@id='DataEprIntellectualPropert_list']//div[@class='page']/ul/li[4]/a")
                    next_a = BM.get_first(next_list)
                    next_onclick = None if not next_a else next_a.attrib.get("onclick")
                    if not next_onclick:
                        # 处理 知识产权名称中的版本号
                        li_list = [self.handler_version(li) for li in li_list]
                        setattr(g, class_name, li_list)
                        break
                    else:
                        # 获取下一页url
                        next_url_pattern = """.*?setAttribute\(['|"]action['|"],'(.*?)'.*?"""
                        next_res = re.findall(next_url_pattern, next_onclick, re.S)
                        next_url = BM.get_first(next_res)
                        next_page_pattern = """.*?value=['|"](\d+)['|"];.*?"""
                        next_page_res = re.findall(next_page_pattern, next_onclick, re.S)
                        next_page = BM.get_first(next_page_res)
                        if not next_url or not next_page:
                            break
                        next_url = (base_url + next_url) if not next_url.startswith("https://") else next_url
                        query_data = {
                            "ec_i": "",
                            "DataEprIntellectualPropert_list_crd": "10",
                            "DataEprIntellectualPropert_list_p": next_page,
                            "DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_pzscqbh": "",
                            "DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_psqxmmc": "",
                            "DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_plb": "",
                            "DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_psqh": "",
                            "dataInnocomId": "",
                            "type": "",
                            "DataEprIntellectualPropert_list_rd": "10"
                        }
                        ec_i_pattern = '.*?"ec_i".*?value="(.*?)".*?'
                        ec_i_res = re.findall(ec_i_pattern, text, re.S)
                        query_data["ec_i"] = "".join(ec_i_res)
                        pxmbh_pattern = '.*?"DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_pzscqbh".*?value="(.*?)".*?'
                        pxmbh_res = re.findall(pxmbh_pattern, text, re.S)
                        query_data["DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_pzscqbh"] = "".join(pxmbh_res)
                        pxmmc_pattern = '.*?"DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_psqxmmc".*?value="(.*?)".*?'
                        pxmmc_res = re.findall(pxmmc_pattern, text, re.S)
                        query_data["DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_psqxmmc"] = "".join(pxmmc_res)
                        pjsly_pattern = '.*?"DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_plb".*?value="(.*?)".*?'
                        pjsly_res = re.findall(pjsly_pattern, text, re.S)
                        query_data["DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_plb"] = "".join(pjsly_res)
                        psqh_pattern = '.*?"DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_psqh".*?value="(.*?)".*?'
                        psqh_res = re.findall(psqh_pattern, text, re.S)
                        query_data["DataEprIntellectualPropert_list_a_dataEprIntellectualPropert_psqh"] = "".join(psqh_res)
                        dataInnocomId_pattern = '.*?"dataInnocomId".*?value="(.*?)".*?'
                        dataInnocomId_res = re.findall(dataInnocomId_pattern, text, re.S)
                        query_data["dataInnocomId"] = BM.get_first(dataInnocomId_res)
                        type_pattern = '.*?"type".*?value="(.*?)".*?'
                        type_res = re.findall(type_pattern, text, re.S)
                        query_data["type"] = BM.get_first(type_res)
                        res = http_utils.send_post(next_url, data=query_data, driver=self.driver)
                # 获取已上传的 nip数据
                while True:
                    if not text:
                        res = http_utils.send_get(detail_url, driver=self.driver)
                        text = res.get("text")
                    # 替换掉国网随机插入的字符串
                    random_insert_chars_p = ".*?([\r\n].[0-9a-zA-Z]{4,}[\r\n].).*?"
                    res = re.findall(random_insert_chars_p, text, re.S)
                    for i in res:
                        text = text.replace(i, "")
                    tree = etree.HTML(text)
                    # 获取 已上传记录
                    tr_list = tree.xpath(
                        '//form[@id="DataNotAuditIp_list"]//div[@class="tableRegionDivBody"]//tbody/tr')
                    for tr in tr_list:
                        ip_id_str = BM.get_first(tr.xpath("./td[2]/text()"))
                        # print(ip_id_str)
                        li_list.append(ip_id_str)

                    # 匹配下一页 url
                    next_list = tree.xpath(
                        "//form[@id='DataNotAuditIp_list']//div[@class='page']/ul/li[4]/a")
                    next_a = BM.get_first(next_list)
                    next_onclick = None if not next_a else next_a.attrib.get("onclick")
                    if not next_onclick:
                        # 处理 知识产权名称中的版本号
                        li_list = [self.handler_version(li) for li in li_list]
                        setattr(g, class_name, li_list)
                        break
                    else:
                        # 获取下一页url
                        next_url_pattern = """.*?setAttribute\(['|"]action['|"],'(.*?)'.*?"""
                        next_res = re.findall(next_url_pattern, next_onclick, re.S)
                        next_url = BM.get_first(next_res)
                        next_page_pattern = """.*?value=['|"](\d+)['|"];.*?"""
                        next_page_res = re.findall(next_page_pattern, next_onclick, re.S)
                        next_page = BM.get_first(next_page_res)
                        if not next_url or not next_page:
                            break
                        next_url = (base_url + next_url) if not next_url.startswith("https://") else next_url
                        query_data = {
                            "ec_i": "",
                            "DataNotAuditIp_list_crd": "10",
                            "DataNotAuditIp_list_p": next_page,
                            "DataNotAuditIp_list_a_dataNotAuditIp_pzscqbh": "",
                            "DataNotAuditIp_list_a_dataNotAuditIp_psqxmmc": "",
                            "DataNotAuditIp_list_a_dataNotAuditIp_plb": "",
                            "DataNotAuditIp_list_a_dataNotAuditIp_psqrq": "",
                            "DataNotAuditIp_list_a_dataNotAuditIp_psqh": "",
                            "DataNotAuditIp_list_a_dataNotAuditIp_phdfs": "",
                            "dataInnocomId": "",
                            "type": "",
                            "DataNotAuditIp_list_rd": "10"
                        }
                        tree = etree.HTML(text)
                        query_data["ec_i"] = BM.get_first(tree.xpath("//input[@name='ec_i']/@value"))
                        query_data["DataNotAuditIp_list_a_dataNotAuditIp_pzscqbh"] = BM.get_first(tree.xpath(
                            "//input[@name='DataNotAuditIp_list_a_dataNotAuditIp_pzscqbh']/@value"))
                        query_data["DataNotAuditIp_list_a_dataNotAuditIp_psqxmmc"] = BM.get_first(tree.xpath(
                            "//input[@name='DataNotAuditIp_list_a_dataNotAuditIp_psqxmmc']/@value"))
                        query_data["DataNotAuditIp_list_a_dataNotAuditIp_plb"] = BM.get_first(tree.xpath(
                            "//input[@name='DataNotAuditIp_list_a_dataNotAuditIp_plb']/@value"))
                        query_data["DataNotAuditIp_list_a_dataNotAuditIp_psqrq"] = BM.get_first(tree.xpath(
                            "//input[@name='DataNotAuditIp_list_a_dataNotAuditIp_psqrq']/@value"))
                        query_data["DataNotAuditIp_list_a_dataNotAuditIp_psqh"] = BM.get_first(tree.xpath(
                            "//input[@name='DataNotAuditIp_list_a_dataNotAuditIp_psqh']/@value"))
                        query_data["DataNotAuditIp_list_a_dataNotAuditIp_phdfs"] = BM.get_first(tree.xpath(
                            "//input[@name='DataNotAuditIp_list_a_dataNotAuditIp_phdfs']/@value"))
                        query_data["dataInnocomId"] = BM.get_first(tree.xpath("//input[@name='dataInnocomId']/@value"))
                        query_data["type"] = BM.get_first(tree.xpath("//input[@name='type']/@value"))
                        res = http_utils.send_post(next_url, data=query_data, driver=self.driver)
                        text = res.get("text")

            ipr_id = str(ipr_id).zfill(2)
            ipr_id_sign = "IP" + ipr_id
            if is_nip:
                ipr_id_sign = "NIP" + ipr_id
            sign = "{}:{}".format(ipr_id_sign, ipr_name)
            if self.handler_version(sign) in li_list:
                g.logger.info("知识产权({}) 已存在".format(sign))
                return

            # 获取知识产权新增页面
            add_url = "https://gqqy.chinatorch.org.cn/xonlinereport/inforeport/DataInnocom/addDataEprIntellectualPropert.do"
            if not li_list:
                BM.get_cookie(driver=self.driver)
            add_res = http_utils.send_post(url=add_url, driver=self.driver)
            token = add_res.get("token")
            text = add_res.get("text")

            if not li_list:
                if not g.all_optimized and BM.isElementExist(element=self.content_iframe_loc, driver=self.driver):
                    # 全部优化完成可以不走这一步
                    BM.switch_to_frame(driver=self.driver, ele_loc=self.content_iframe_loc)

            if auth_num.startswith('CN'):
                auth_num = auth_num.replace('CN', 'ZL')
            if ipr_type in ["发明专利（非国防专利）","实用新型专利","外观设计专利","发明专利（国防专利）"]:
                if not auth_num.startswith("ZL"):
                    auth_num = "ZL{}".format(auth_num)

            # 知识产权有效性数据
            ip_data = None
            # 获取知识产权有效性
            findIpBySqh_url = "https://gqqy.chinatorch.org.cn/xonlinereport/inforeport/DataInnocom/findIpBySqh.do"
            find_data = {
                "dataEprIntellectualPropert.Plb": iprType.get(ipr_type),
                "dataEprIntellectualPropert.Psqh": auth_num.strip()
            }
            find_res = http_utils.send_post(url=findIpBySqh_url, data=find_data, driver=self.driver)
            find_text = find_res.get("text")
            # print(find_text)
            find_json = json.loads(find_text)
            g.logger.info("find_json: {}".format(find_json))
            # 是否必须要传附件
            must_file = False
            msg = ""
            # 请求成功则替换 授权日期 fromIpId propertyNameyc
            if find_text and find_json and find_json.get("msg") == "success":
                ip = find_json.get("ip")
                ip_data = ip
                isAdd = ip.get("isAdd")
                if isAdd is False:
                    # 4
                    msg = "{}-贵企业名称与权利人名称({})不一致，请核对专利号/著作权号是否输入正确。如果输入正确，请继续输入该知识产权的所有信息后，点击上传附件（必须包括知识产权证明材料和市场监督管理局提供的企业名称变更证明材料），地方高新技术企业认定机构将会重点核查本知识产权的归属。".format(g.entname, ip.get("cpa"))
                    g.logger.error(msg)
                    must_file = True
                if ip.get("addStatus") == "无效":
                    # 0
                    msg = "该知识产权无效，请核对专利号/著作权号，如果输入正确，请上传证明材料，科技主管部门会重点审查本知识产权。"
                    g.logger.error(msg)
                    must_file = True
            if find_json.get("msg") == "failure_null":
                # 2
                must_file = True
                g.logger.error("佰腾未返回数据")
            if find_json.get("msg") == "failure":
                # 3
                must_file = True
                g.logger.error("接口响应异常")

            file_path = None
            if must_file:
                file_path = os.path.join(materials_path, "replace.pdf")
            if g.need_file:
                if oss_path:
                    file_path = BM.get_material_path() + "\\IP\\{}_{}.pdf".format("ip", ipr_id)
                    if is_nip:
                        file_path = BM.get_material_path() + "\\IP\\{}_{}.pdf".format("nip", ipr_id)
                    is_exist = BM.check_file(file_path=file_path, size=self.size)
                    if is_exist is False:
                        oss_file = get_file_oss(oss_path=oss_path, local_path=file_path + ".pdf")
                        BM.wait_file(file_path=oss_file)
                        CompressPdf(file_path=oss_file, max_size=self.size).compress()
                        BM.wait_file(file_path=file_path)

            # 知识产权新增接口
            save_url = "https://gqqy.chinatorch.org.cn/xonlinereport/inforeport/DataInnocom/saveOrUpdateDataEprIntellectualPropert.do"
            data = {
                "struts.token.name": "token",
                "token": token,
                "dataEprIntellectualPropert.Pzscqbh": ipr_id,
                "dataEprIntellectualPropert.Plb": iprType.get(ipr_type),
                "dataEprIntellectualPropert.Psqh": auth_num.strip(),
                "dataEprIntellectualPropert.Psqxmmc": ipr_name.strip(),
                "dataEprIntellectualPropert.Phdfs": iprGetway.get(ipr_getway),
                "dataEprIntellectualPropert.Psqrq": auth_date,
                # 附件
                # "upload: (binary)",
                "dataEprIntellectualPropert.id": "",
                "dataInnocom.id": "",
                "dataInnocomId": "",
                "dataId": "4da10a8fc1fb11eba90a00163e827061",
                "dataEprIntellectualPropert.dataId": "",
                "dataEprIntellectualPropert.entId": "",
                "dataEprIntellectualPropert.createDate": "",
                "dataEprIntellectualPropert.fromIp": "",
                "dataEprIntellectualPropert.pssxmbh": "",
                "dataEprIntellectualPropert.cpa": "",
                "curYear": "0"
            }

            # 页面携带请求参数
            dataEprIntellectualPropert_id_pattern = '.*?"dataEprIntellectualPropert.id".*?value="(.*?)".*?'
            dataEprIntellectualPropert_id_res = re.findall(dataEprIntellectualPropert_id_pattern, text, re.S)
            data["dataEprIntellectualPropert.id"] = "".join(dataEprIntellectualPropert_id_res)
            dataInnocom_id_pattern = '.*?"dataInnocom.id".*?value="(.*?)".*?'
            dataInnocom_id_res = re.findall(dataInnocom_id_pattern, text, re.S)
            data["dataInnocom.id"] = BM.get_first(dataInnocom_id_res)
            dataInnocomId_pattern = '.*?"dataInnocomId".*?value="(.*?)".*?'
            dataInnocomId_res = re.findall(dataInnocomId_pattern, text, re.S)
            data["dataInnocomId"] = "".join(dataInnocomId_res)
            dataId_pattern = '.*?"dataId".*?value="(.*?)".*?'
            dataId_res = re.findall(dataId_pattern, text, re.S)
            data["dataId"] = "".join(dataId_res)
            dataEprIntellectualPropert_dataId_pattern = '.*?"dataEprIntellectualPropert.dataId".*?value="(.*?)".*?'
            dataEprIntellectualPropert_dataId_res = re.findall(dataEprIntellectualPropert_dataId_pattern, text, re.S)
            data["dataEprIntellectualPropert.dataId"] = "".join(dataEprIntellectualPropert_dataId_res)
            dataEprIntellectualPropert_entId_pattern = '.*?"dataEprIntellectualPropert.entId".*?value="(.*?)".*?'
            dataEprIntellectualPropert_entId_res = re.findall(dataEprIntellectualPropert_entId_pattern, text, re.S)
            data["dataEprIntellectualPropert.entId"] = "".join(dataEprIntellectualPropert_entId_res)
            dataEprIntellectualPropert_createDate_pattern = '.*?"dataEprIntellectualPropert.createDate".*?value="(.*?)".*?'
            dataEprIntellectualPropert_createDate_res = re.findall(dataEprIntellectualPropert_createDate_pattern, text, re.S)
            data["dataEprIntellectualPropert.createDate"] = "".join(dataEprIntellectualPropert_createDate_res)
            dataEprIntellectualPropert_fromIp_pattern = '.*?"dataEprIntellectualPropert.fromIp".*?value="(.*?)".*?'
            dataEprIntellectualPropert_fromIp_res = re.findall(dataEprIntellectualPropert_fromIp_pattern, text, re.S)
            data["dataEprIntellectualPropert.fromIp"] = "".join(dataEprIntellectualPropert_fromIp_res)
            dataEprIntellectualPropert_pssxmbh_pattern = '.*?"dataEprIntellectualPropert.pssxmbh".*?value="(.*?)".*?'
            dataEprIntellectualPropert_pssxmbh_res = re.findall(dataEprIntellectualPropert_pssxmbh_pattern, text, re.S)
            data["dataEprIntellectualPropert.pssxmbh"] = "".join(dataEprIntellectualPropert_pssxmbh_res)
            dataEprIntellectualPropert_cpa_pattern = '.*?"dataEprIntellectualPropert.cpa".*?value="(.*?)".*?'
            dataEprIntellectualPropert_cpa_res = re.findall(dataEprIntellectualPropert_cpa_pattern, text, re.S)
            data["dataEprIntellectualPropert.cpa"] = "".join(dataEprIntellectualPropert_cpa_res)
            curYear_pattern = '.*?"curYear".*?value="(\d+)".*?'
            curYear_res = re.findall(curYear_pattern, text, re.S)
            data["curYear"] = BM.get_first(curYear_res)
            # 页面参数结束
            if ip_data:
                # if ip_data.get("psqrqString"):
                #     data["dataEprIntellectualPropert.Psqrq"] = ip_data.get("psqrqString")
                if ip_data.get("fromIp"):
                    data["dataEprIntellectualPropert.fromIp"] = ip_data.get("fromIp")
                # if ip_data.get("psqxmmc"):
                #     data["dataEprIntellectualPropert.Psqxmmc"] = ip_data.get("psqxmmc")
                if ip_data.get("pssxmbh"):
                    data["dataEprIntellectualPropert.pssxmbh"] = ip_data.get("pssxmbh")
                if ip_data.get("cpa"):
                    data["dataEprIntellectualPropert.cpa"] = ip_data.get("cpa")
                # if ip_data.get("phdfs"):
                #     data["dataEprIntellectualPropert.Phdfs"] = ip_data.get("phdfs")
                # if ip_data.get("psqh"):
                #     data["dataEprIntellectualPropert.Psqh"] = ip_data.get("psqh")

            if file_path:
                g.logger.info("附件: {}".format(file_path))
            g.logger.info("AD_VALUE： {}".format(g.cookie.get("AD_VALUE")))
            res = http_utils.send_post(url=save_url, data=data, file_path=file_path,
                                       file_key="upload", driver=self.driver)
            text = res.get("text")
            message = res.get("message")
            g.logger.info("知识产权表（{}）上传结果: {}".format(sign, message))
            # TODO 处理返回结果值
            if u"成功" in message or "重复" in message or "加入到已用知识产权列表" in message:
                li_list.append(sign)
            else:
                g.logger.info("text: {}".format(text))

        except Exception as err:
            err_msg = "{} file:{} lines:{}".format(
                err, err.__traceback__.tb_frame.f_globals["__file__"],
                err.__traceback__.tb_lineno)
            raise Exception(err_msg)
