from playwright.sync_api import sync_playwright
import time
import redis
import pymysql
import json
import redis
from connect_db import ConnectMysqldb
import pandas as pd
import asyncio
import json


redis_con = redis.Redis(host='127.0.0.1',
                        port=6379, db=2, decode_responses=True)

conn = pymysql.connect(host="localhost",
                       user='root',
                       port=3306,
                       password='123456',
                       database='big_data')
cur = conn.cursor()
config = {'mysql_host': "localhost",
          'mysql_user': 'root',
          'mysql_port': 3306,
          'mysql_passwd': '123456',
          'mysql_db': 'big_data'
          }


def request_page_list(context, authorization, sqh):
    # 发送请求获取每页专利明细
    api_request_context = context.request
    headers = {
        "Accept": "application/json, text/plain, */*",
        "Content-Type": "application/json;charset=utf-8",
        "Authorization": 'Bearer ' + authorization,
        "userType": "USER_RZ_DAILIJIG"
    }
    data = {
        "zhuanlilx": "",
        "page": 1,
        "size": 10,
        "zhuanlisqh": sqh,
        "sortDataName": "",
        "sortType": "",
    }
    # 公众查询
    page_response = api_request_context.post(
        '/api/search/undomestic/publicSearch', data=data, headers=headers, timeout=0)

    page_json = page_response.json()
    status = page_json.get('status')
    print(f'---{sqh}_status:{status}---')
    return(page_json)


def parse_page(page_json, sqh):
    # 解析每页专利详细信息
    patent_info_list = page_json.get('data').get('records')

    df = pd.DataFrame(patent_info_list)
    df['sqh'] = sqh
    with ConnectMysqldb(config) as conn:
        df.to_sql('dljg_patent_info', conn,
                  if_exists='append', index=False)


def run_gonkai(i, inst, port):
    with sync_playwright() as p:
        # browser = p.firefox.launch(headless=False)
        browser = p.firefox.launch()
        context = browser.new_context(
            base_url='https://cpquery.cponline.cnipa.gov.cn', storage_state=storage_state_path)
        context.set_default_timeout(300000)
        context.set_default_navigation_timeout(300000)
        page = context.new_page()
        page.set_default_timeout(300000)
        page.set_default_navigation_timeout(300000)
        page.goto(
            'https://cpquery.cponline.cnipa.gov.cn/chinesepatent/index')
        page.wait_for_load_state('networkidle')
        scraped_nums = 0
        while redis_con.scard(port):
            sqh = redis_con.spop(port)
            scraped_nums += 1
            print(f'@@开始查询申请号:{sqh}_专利状态信息@@')
            first_page_json = request_page_list(
                context, authorization, sqh)
            parse_page(first_page_json, sqh)
            print(f'@@{sqh}_基本信息查询完成@@')
            print(
                f"进程{i}_已抓取专利数：{scraped_nums}，剩余{redis_con.scard(port)}个")
            page.reload()
            # page.wait_for_load_state('networkidle')

async def run(playwright):
    storage_state_path = f'./auth/{inst}_state.json'
    with open(storage_state_path) as file:
        storage_state = json.load(file)
        origins = storage_state.get('origins')
        local_stroge_list = [item.get('localStorage') for item in origins]
        for local_stroge in local_stroge_list:
            for item in local_stroge:
                if item.get('name') == 'ACCESS_TOKEN':
                    authorization = item.get('value')
    print(f'成功读取{inst}登录信息:{authorization}')
    
    browser = await playwright.firefox.launch()
    context = await browser.new_context('https://cpquery.cponline.cnipa.gov.cn', storage_state=storage_state_path)
    page = await context.new_page()
    await context.set_default_timeout(300000)
    await context.set_default_navigation_timeout(300000)
    await page.set_default_timeout(300000)
    await page.set_default_navigation_timeout(300000)
    await page.goto(
        'https://cpquery.cponline.cnipa.gov.cn/chinesepatent/index')
    await page.wait_for_load_state('networkidle')
    while redis_con.scard(port):
        sqh = redis_con.spop(port)
        scraped_nums += 1
        print(f'@@开始查询申请号:{sqh}_专利状态信息@@')
        api_request_context = context.request
        headers = {
            "Accept": "application/json, text/plain, */*",
            "Content-Type": "application/json;charset=utf-8",
            "Authorization": 'Bearer ' + authorization,
            "userType": "USER_RZ_DAILIJIG"
        }
        data = {
            "zhuanlilx": "",
            "page": 1,
            "size": 10,
            "zhuanlisqh": sqh,
            "sortDataName": "",
            "sortType": "",
        }
        # 公众查询
        page_response =await api_request_context.post(
            '/api/search/undomestic/publicSearch', data=data, headers=headers, timeout=0)

        page_json = page_response.json()
        status = page_json.get('status')
        print(f'---{sqh}_status:{status}---')
        first_page_json = request_page_list(
            context, authorization, sqh)
        parse_page(first_page_json, sqh)
        print(f'@@{sqh}_基本信息查询完成@@')
        print(
            f"进程{i}_已抓取专利数：{scraped_nums}，剩余{redis_con.scard(port)}个")
        page.reload()



async def main():
    async with async_playwright() as playwright:
        await run(playwright)

if __name__ == '__main__':
    port_dict = {'XHHG': 'tmp_信合红谷接口查询',
                 'XH': 'tmp_协和端口查询', 'WB': "tmp_外部接口查询"}
    inst = 'XHHG'
    port = port_dict.get(inst)
    asyncio.run(main())
    conn.close()
