import base64
import csv
import re
import time
import json
from os import system

import psycopg2
import pandas as pd

from playwright.sync_api import sync_playwright


def handle_json(json):
    csvFile = open("./data/detail/storeC_20230206.csv", 'a', encoding='utf-8', newline='')
    try:
        writer = csv.writer(csvFile)
        results = json['data']['detail']
        if results is not None:
            writer.writerow((results['f0'], results['f1'], results['f2'], results['f3'], results['f4'], results['f5'],
                             results['f6'], results['f7'], results['f8'], results['f9'], results['f10'], results['f11'],
                             results['f12'], results['f13'], results['f14']))
        else:
            print('error' + json)
    finally:
        csvFile.close()


def on_response(response):
    # print(response)
    # /data/nmpadata/search?
    if '/data/nmpadata/queryDetail' in response.url and response.status == 200:
        handle_json(response.json())


def getData():
    conn = psycopg2.connect(
        dbname="data_original",
        user="postgres",
        password="j3IA6rebQ9NG",
        host="192.168.6.226",
        port="5432"
    )
    df = pd.read_sql("select * from yjj_storec_clean_20230203 order by id desc", con=conn)
    idList = df['id']
    idList = list(idList)
    # 提交事务     # 关闭连接
    conn.commit()
    conn.close()

    # 获取定位,生成待读取列表
    index = open("./data/detail/storeC_index.csv", 'r', encoding='utf-8')
    strLine = index.readline()
    print(strLine)
    index.close()
    idList = idList[idList.index(strLine) + 0:]

    global urls
    urls = []
    for item in idList:
        # print(item)
        s = 'id=' + item + '&itemId=ff808081817f00dc01818e18c7c80bde'
        encode = base64.b64encode(s.encode('utf-8'))

        model = IdModel(item, 'https://www.nmpa.gov.cn/datasearch/search-info.html?nmpa=' + str(encode, 'utf-8'))
        urls.append(model)
        # urls.append('https://www.nmpa.gov.cn/datasearch/search-info.html?nmpa=' + str(encode, 'utf-8'), item)
        # print('id='+list[0]+str(num)+'&itemId=ff808081817f00dc01818e18c7c80bde')


class IdModel:
    def __init__(self, id, url):
        self.id = id
        self.url = url


def run():
    with sync_playwright() as p:
        for browser_type in [p.firefox]:
            # 列表末尾加百度防止监听报错
            print(len(urls))
            browser = browser_type.launch(headless=False)
            page = browser.new_page()

            def cancel_request(route, request):
                route.abort()

            page.route(re.compile(r"(\.png)|(\.jpg)"), cancel_request)
            page.on('response', on_response)
            for index, item in enumerate(urls, start=1):
                # try:
                    # region 记录当前节点及页码
                    with open("./data/detail/storeC_index.csv", 'w', encoding='utf-8') as hello:
                        hello.write(item.id)
                    print(item.id + ' ' + item.url)
                    # endregion 记录当前节点及页码

                    # print(index + ":" + item)
                    page.wait_for_timeout(4000)
                    # page.locator('#submit').press('Enter')
                    page.goto(item.url)
                    #page.reload('networkidle')
                    # page.locator('#submit').press('Enter')
                    # 等待网络响应结束
                    page.wait_for_load_state('networkidle')
                # except:
                #     continue
            # 终止检测任务
            system('taskkill /F /IM cmd.exe')
            page.wait_for_timeout(2000)
            browser.close()


# 按间距中的绿色按钮以运行脚本。
if __name__ == '__main__':
    getData()
    run()
