# coding=utf-8
from playwright.sync_api import sync_playwright
import time
import random
import win32clipboard
import pickle
import json
import requests
import pymysql
from parsel import Selector
# 查询专利代理机构2021-2022代理专利详细信息，主要使用playwright实现

# playwright录制脚本命令： playwright codegen -o scripy.py
port = 'xhhg'
user = r'42264'
passwd = r'Xhhg2019@'
session = requests.session()
conn = pymysql.connect(host="localhost",
                       user='root',
                       port=3306,
                       password='123456',
                       database='big_data')
cur = conn.cursor()


def post_parsel_patent(anid, pnid, vid):
    with open('headers.pkl', 'rb') as f:
        headers = pickle.load(f)
    # 利用专利号信息请求专利详情页
    patent_info_post_url = r'https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-views/view/abstracts'
    patent_post_data = {
        "interfaceId": "2000999",
        "vid": vid,
        "anId": anid,
        "pnId": pnid,
        "dbName": "CNDB"
    }
    patent_info_res = session.post(
        patent_info_post_url, headers=headers, data=json.dumps(patent_post_data))
    # 解析专利详细信息
    p_info_data = patent_info_res.json()
    p_status = p_info_data.get('status')
    print(f'--专利详细信息p_status：{p_status}')
    patent_info = p_info_data.get('t')
    print(patent_info)

    common_abstract_list = patent_info.get('commonAbstract')
    apply_number = common_abstract_list[0].get('value')  # 申请号
    apply_date = common_abstract_list[1].get('value')  # 申请日
    apply_date = apply_date.replace('.', '-')
    apply_country = common_abstract_list[2].get('value')  # 申请人所在地区
    agent_person = common_abstract_list[3].get('value')  # 代理人
    agent_name_str = common_abstract_list[4].get('value')  # 代理机构
    agent_name, agent_code = agent_name_str.split(' ')

    sq_abstract_list = patent_info.get('sqAbstract')
    gk_abstract_list = patent_info.get('gkAbstract')
    if sq_abstract_list == [] and gk_abstract_list != []:
        sq_abstract_list = gk_abstract_list
    sq_gonkaihao = ''.join([item.get('value') for item in sq_abstract_list if '公开号' in item.get(
        'index').get('indexCNName')])  # 公开号（授权）
    sq_gonkaidate = ''.join([item.get('value') for item in sq_abstract_list if '公开日期' in item.get(
        'index').get('indexCNName')])  # 公开日期（授权）
    sq_name = ''.join([item.get('value') for item in sq_abstract_list if '申请人' in item.get(
        'index').get('indexCNName')])  # 申请人
    sq_faminren = ''.join([item.get('value') for item in sq_abstract_list if '发明人' in item.get(
        'index').get('indexCNName')])   # 发明人
    sq_abstract_text = ''.join([item.get(
        'value') for item in sq_abstract_list if '摘要' in item.get('index').get('indexCNName')])
    ab_soup = Selector(sq_abstract_text)
    absstrct = ab_soup.xpath('//text()').extract_first()  # 摘要
    sq_title = patent_info.get('title')  # 发明名称
    # 专利详情插入数据库
    insert_sql = f"""INSERT INTO agent_patent_list (apply_number,apply_date,apply_country,agent_person,agent_name,agent_code,sq_gonkaihao,sq_gonkaidate,sq_name,sq_faminren,sq_title,absstrct) VALUES('{apply_number}','{apply_date}','{apply_country}','{agent_person}','{agent_name}','{agent_code}','{sq_gonkaihao}','{sq_gonkaidate}','{sq_name}','{sq_faminren}','{sq_title}','{absstrct}')"""
    print(insert_sql)
    cur.execute(insert_sql)
    conn.commit()
    print(f'---专利：{sq_title}_已抓取入库---')


def parse_parten_list(page_data):
    # 该功能主要解析每页专利信息列表，获取专利anId，pnId，vid为后续请求专利详细信息
    patent_list = page_data.get('t').get('searchResultRecord')
    for patent_info in patent_list:
        anid = patent_info.get('anId')
        pnid = patent_info.get('pnId')
        vid = patent_info.get('vid')
        post_parsel_patent(anid, pnid, vid)


def on_response(response):
    print(response.url)
    # print(f'statue {response.status}:{response.url}:{response}')
    if r'https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-search/search/tablesearch/executeTableSearch' in response.url:
        res_data = response.json()
        print('@@开始解析第一页数据@@')
        # parse_parten_list(res_data)
        print('@@第一页数据解析完成@@')

    elif r'https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-search/search/results/getResults' in response.url:
        # 解析第二页专利信息
        print('@@开始解析第二页数据@@')
        # res_data = response.json()
        # parse_parten_list(res_data)
        print('@@第二页数据解析完成@@')


def on_request(request):
    # 监听请求信息，发现登录请求，识别出authorization
    if r'https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-search/search/tablesearch/executeTableSearch' in request.url:
        headers = request.headers  # headers为dict
        print(headers)
        # 存储请求头与请求体到本地
        with open('headers.pkl', 'wb') as f:
            pickle.dump(headers, f, pickle.HIGHEST_PROTOCOL)
        print('@@成功获取请求headers信息@@')
    elif r'https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-search/search/results/getResults' in request.url:
        post_data = request.post_data_json
        with open('postdata.pkl', 'wb') as f:
            pickle.dump(post_data, f, pickle.HIGHEST_PROTOCOL)
        print('@@成功获取请求post_data信息@@')


def post_parsel_page(page_num):
    # 利用已获取到的headers和post_data请求页面列表数据
    print(f'@@开始抓取第{page_num}页数据@@')
    with open('headers.pkl', 'rb') as f:
        headers = pickle.load(f)
    with open('postdata.pkl', 'rb') as f:
        postdata = pickle.load(f)
    postdata['pagination']['page'] = page_num
    patent_page_post_url = r'https://pss-system.cponline.cnipa.gov.cn/api/pubsearch-app-search/search/results/getResults'
    page_res = session.post(patent_page_post_url, headers=headers,
                            data=json.dumps(postdata))
    page_data = page_res.json()
    res_status = page_data.get('status')
    print(f'--请求返回状态：{res_status}--')
    parse_parten_list(page_data)


def parse_page(page):
    page_soup = Selector(page.content())
    # 获取所有公开号
    gonkai_list = page_soup.xpath(
        '//ul[@class="tablecol tbadyTit"]/li[2]/div/span/text()').extract()
    for gonkaihao in gonkai_list:
        sql = f'''INSERT INTO agent_gonkaihao (gonkaihao) VALUES('{gonkaihao}')'''
        cur.execute(sql)
        conn.commit()


def login_get_authorization(user, passwd):
    # 使用账号密码登录，获取账号的授权信息
    with sync_playwright() as p:
        storage_state_path = './auth/XHHG_state.json'
        browser = p.firefox.launch(headless=False)
        context = browser.new_context(
            base_url='https://pss-system.cponline.cnipa.gov.cn', storage_state=storage_state_path)
        context.set_default_timeout(6000000)
        context.set_default_navigation_timeout(6000000)
        page = context.new_page()
        page.goto('https://pss-system.cponline.cnipa.gov.cn/seniorSearch')
        page.wait_for_load_state()
        input('登录：')
        # Text = '代理机构=(11390) AND 申请日=2021-01-01:2023-05-19'
        # Text = '(代理机构=(42309) OR 代理机构=(42300) )AND 申请日=2021-01-01:2023-05-19'
        Text = '代理机构=(42300) AND 申请日=2021-01-01:2023-05-19'
        page.click('xpath=//textarea[@id="textarea"]')
        time.sleep(0.5)
        page.fill('xpath=//textarea[@id="textarea"]', '')
        time.sleep(0.5)
        page.fill('xpath=//textarea[@id="textarea"]', Text)
        time.sleep(1)
        with page.expect_popup() as page1_info:
            page.get_by_role("button", name=" 检索").click()
        page1 = page1_info.value
        time.sleep(10)
        page1.wait_for_load_state('networkidle')
        # 解析最大页数
        from parsel import Selector
        soup = Selector(page1.content())
        patent_number = soup.xpath(
            'normalize-space(//div[@class=" rightMainTop large"]/div[@class="text"]/span/text())').extract_first()
        print(f'patent_number:{patent_number}')
        if patent_number.isdigit():
            patent_number = int(patent_number)
        else:
            patent_number = 0
        import math
        max_page_number = math.ceil(patent_number / 10)
        print(f'max_page_number:{max_page_number}')
        page_num =1
        if page_num>1:
            input(f'点击页面：{page_num}')
        # 解析剩下页面数据
        while page_num <= max_page_number:
            print('开始解析第%s页' % page_num)
            parse_page(page1)
            page1.click(
                f'xpath=//*[@id="app"]/div[1]/section/div/div/div[3]/div/div[1]/div[2]/div[2]/div[4]/div/div/span[3]/div/button[3]')
            time.sleep(3)
            page1.wait_for_load_state('networkidle')
            print('第%s页抓取完成' % page_num)
            page_num += 1

        page1.close()
        browser.close()


if __name__ == '__main__':
    login_get_authorization(user, passwd)
    conn.close()
