import requests

from bs4 import BeautifulSoup
import subprocess
import re
import tempfile
import os
import time
import json


class GgExplain(object):

    def __init__(self, link):
        self.link = link
        self.json_explained_param = ""

    def get_initial_state_obj(self):
        # 假设response是包含HTML响应的字符串
        header = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
        }
        # link = 'https://docs.google.com/forms/d/e/1FAIpQLScxILnzWzPaJ-yjBd1-P2yOmS-t5T200mNDPzCyUhyZkcor8g/viewform'
        response = requests.get(self.link, allow_redirects=False, headers=header, timeout=10)
        content = response.content
        # 使用 BeautifulSoup 解析页面内容
        soup = BeautifulSoup(response.content, 'html.parser')
        find_result = soup.find_all(string=re.compile("FB_PUBLIC_LOAD_DATA_"), limit=1)
        if len(find_result) < 1:
            print("未找到数据")
            return
        ret = str(find_result[0])
        ret_arr = ret.split('=')
        src_data = ret_arr[1][:-1]
        self.json_explained_param = src_data

    def convert_to_common_struct(self):
        src_data = self.json_explained_param
        if not src_data:
            return ""
        # 保存 src_data 到临时文件
        with tempfile.NamedTemporaryFile(delete=False) as temp_file:
            temp_file.write(src_data.encode())
        # 执行 Node.js 脚本，并从临时文件中读取数据
        current_dir = os.getcwd()
        js_file = current_dir + '/js_file/gg/retrieve2invoke.js'
        p = subprocess.Popen(['node', js_file, temp_file.name], stdout=subprocess.PIPE)
        res = p.stdout.read().decode().strip()
        # 删除临时文件
        temp_file.close()
        os.remove(temp_file.name)
        if not res:
            return ""
        return json.loads(res)

    def do_explain(self):
        self.get_initial_state_obj()
        return self.convert_to_common_struct()
