# -*- coding: utf-8 -*-
# author:WS
# time:2022/1/4:15:30:10
# ___msg___ = '通过申请人获取《中国及多国》专利数据并存入redis数据库中去重'
import json

from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from quote_folder.Db_connect import DB_connect
from selenium.webdriver.common.by import By
from selenium import webdriver
from bs4 import BeautifulSoup
import time, random, re


class spider_china_other_country:
    def __init__(self, patentee, applicant):
        self.get_url = "http://cpquery.cnipa.gov.cn/"
        self.develop_con = DB_connect().online_db()  # 开发数据库连接
        self.develop_cur = self.develop_con.cursor()
        self.bigdata_con = DB_connect().online_big_data()  # 数据组数据库连接
        self.bigdata_cur = self.bigdata_con.cursor()
        self.redis_con = DB_connect().patents_reids()  # redis连接
        self.applicant = applicant
        self.patentee = patentee

    def login_gov(self):
        """ 登录中国及多国 """
        profile = webdriver.FirefoxOptions()
        profile.set_preference('network.proxy.type', 1)
        browser = webdriver.Firefox(options=profile, executable_path=r'D:\工具\geckodriver.exe')
        browser.get(self.get_url)
        time.sleep(5)
        WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.ID, "username1"))).clear()
        browser.find_element_by_id('username1').send_keys('18071440149')
        browser.find_element_by_id('password1').clear()
        browser.find_element_by_id('password1').send_keys('Xhhg123456!')
        time.sleep(random.randint(20, 40))
        current_url = browser.current_url
        return browser, current_url

    def get_html(self, browser, current_url):
        """
        通过login_gov获取抓取对象的网页url, # 获取一级页面的数据:  专利名称, 类型, 申请人, 授权日, 申请日, 主IPC等数据信息
        :param browser:
        :param current_url:
        :return:
        """
        # main_url = current_url.replace('record_start-row=11', 'record_start-row=0').replace('record_page-row=10', 'record_page-row=50')
        browser.get(
            "http://cpquery.cnipa.gov.cn//txnQueryOrdinaryPatents.do?select-key%3Asortcol=&select-key%3Asort=&attribute-node%3Apagerow=10&attribute-node%3Astartrow=991&checkflag%3Aflag=1&select-key%3Ashenqingh=&select-key%3Azhuanlimc=&select-key%3Ashenqingrxm=%E6%AD%A6%E6%B1%89%E5%B8%82%E4%B8%AD%E5%BF%83%E5%8C%BB%E9%99%A2&select-key%3Azhuanlilx=&select-key%3Ashenqingr_from=&select-key%3Ashenqingr_to=&very-code=&captchaNo=&fanyeflag=1&verycode=fanye&attribute-node:record_start-row=1&attribute-node:record_page-row=20&#anchor")
        # browser.get(main_url)
        html_info = browser.page_source
        soup = BeautifulSoup(html_info, 'lxml')
        self.get_data(soup)

    def get_data(self, soup):
        """
        获取申请号等其他数据
        :param soup:
        :return:
        """
        shenqingh = soup.select('table[class="content_listx_patent"] tr')  # 获取table标签的html数据 a[name="record:shenqingh"]
        dict_info = {}
        for item in shenqingh:
            td_list = item.select("td")
            for td_number in range(len(td_list)):

                dict_info['law_status'] = ""  # 法律状态
                dict_info['open_num'] = ''  # 公开号
                dict_info['open_day'] = ''  # 公开日
                dict_info['inventor'] = ''  # 发明人
                dict_info['agency'] = ''  # 代理机构
                dict_info['agent'] = ''  # 代理人
                dict_info['applicant_address'] = ''  # 申请人地址
                dict_info['postal_code'] = ''  # 邮编
                dict_info['digest'] = ''  # 摘要

                if td_number == 0:
                    span_tag = td_list[td_number].select("span span[name='record:zhuanlilx']")
                    dict_info["patent_type"] = span_tag[0]["title"]  # 专利类型
                if td_number == 1:
                    a_tag = td_list[td_number].select("a[name='record:shenqingh']")
                    dict_info["application_number"] = a_tag[0].text.strip()  # 申请号
                elif td_number == 2:
                    dict_info["name"] = td_list[td_number].text.strip()  # 专利名称
                elif td_number == 3:
                    dict_info["applicant"] = td_list[td_number].select("span[name='record:shenqingrxm']")[0][
                        "title"]  # 申请人
                elif td_number == 4:
                    dict_info["apply_day"] = td_list[td_number].select("span[name='record:shenqingr']")[0][
                        "title"]  # 申请日
                elif td_number == 5:
                    dict_info["power_day"] = td_list[td_number].select("span[name='record:shouquanggr']")[0][
                        "title"]  # 授权日
                elif td_number == 6:
                    dict_info["classification_ipc"] = td_list[td_number].select("span[name='record:zhufenlh']")[0][
                        "title"]  # 主分类号
        self.into_redis(dict_info)

    def into_redis(self, data):
        """
        将获取到的数据存入redis中去重
        :return:
        """
        # 将抓到的专利数据存入redis数据库
        return_flag = self.redis_con.sadd(f'{self.patentee}:application_number', data["application_number"])  # 去重操作
        if return_flag:
            self.redis_con.hset(f'{self.patentee}:Patent-to-heavy', data["application_number"], json.dumps(data))
            print(return_flag)  # 返回结果为1 存入数据库成功, 如果返回为0 说明数据重复没有存入redis
        else:
            jsondata = json.loads(self.redis_con.hset(f"{self.patentee}: Patent-to-heavy", data['application_number']))
            print(jsondata)

    def mian_fun(self):
        """
        入口函数 选择进入的函数体
        :return:
        """
        browser, current_url = self.login_gov()
        self.get_html(browser, current_url)  # 此函数通过申请人获取专利数据
        self.bigdata_con.commit()
        self.bigdata_cur.close()
        self.bigdata_con.close()
        self.develop_cur.close()
        self.develop_con.close()
        browser.close()


if __name__ == '__main__':
    SCOC = spider_china_other_country('武汉市中心医院', '武汉市中心医院')  # 此处字符串要与<高级查询系统>爬虫代码里的一致, 数据一同存入redis后在一同存入数据库
    SCOC.mian_fun()
