import os
import re
import sys


class Parser:
    """
    解析类, 用于解析fetch中的http相关参数
    """
    def __init__(self):
        self.method = ""        # 请求方法
        self.host = ""          # 主机ip地址或域名
        self.uri = ""           # 请求uri
        self.url = ""           # 请求url
        self.body = ""          # 请求体
        self.proto = "http"     # 请求协议
        self.headers = {}       # 请求头

    def __str__(self):
        return f"method: {self.method}\nhost: {self.host}\nuri: {self.uri}\nurl: {self.url}\n" \
               f"body: {self.body}\nproto: {self.proto}\nheaders: {self.headers}"

    def read(self, file):
        """
        description: 读取文件
        arguments:
            file(str): 待读取的文件
        return:
            description: 当读取成功, 则返回文件内容, 否则返回空
            type: any
        modify_records:
            - 2022-08-18, jetmie, create the action word.
        status: enable
        """
        if os.path.isfile(file):
            with open(file, encoding="utf-8") as f:
                content = f.read()
                _content = content.encode("utf-8")
                if _content.startswith(b"\xef\xbb\xbf"):
                    content = _content[3:].decode("utf-8")
            return content

    def parse_file(self, file="fetch/admin_fetch.js"):
        """
        description: 对文件进行解析
        arguments:
            file(str): 待解析的文件
        return:
            description: 无
            type: any
        modify_records:
            - 2022-07-23, jetmie, create the action word.
        status: enable
        remarks:
            data获取渠道:
                a) 通过浏览器点击复制/复制为Node.js fetch获取
                b) 在burpsuite中直接拷贝获取
        """
        file = file or "fetch.js"
        content = self.read(file)
        if content:
            self.parse(content)

        return self

    def parse(self, data):
        """
        description: 对文件内容进行解析
        arguments:
            data(str): 待解析的数据
        return:
            description: 无
            type: any
        modify_records:
            - 2022-07-23, jetmie, create the action word.
        status: enable
        remarks:
            data获取渠道:
                a) 通过浏览器点击复制/复制为Node.js fetch获取
                b) 在burpsuite中直接拷贝获取
        """
        data = data.strip("\r").strip("\n")
        if data.startswith("fetch(") and data.endswith("});"):          # 对fetch内容进行解析
            self.url = re.findall(re.compile(r'fetch\("(.*?)",\s+\{', re.S), data)[0]
            arguments = eval(re.findall(re.compile(r'(\{.*\})', re.S), data)[0])
            # 对头部进行格式化
            self.headers = {}
            # 将所有的key全部转换为小写
            for key in arguments.get("headers", {}):
                self.headers[key.lower()] = arguments["headers"][key]

            if arguments.get("referrer"):
                self.headers["referrer"] = arguments.get("referrer", "")
            # self.body = arguments.get("body", "")
            self.body = re.findall(re.compile(r'"body": "(.*)",'), data)
            if self.body:
                self.body = self.body[0]
            else:
                self.body = None
            self.method = arguments.get("method", "GET")

            sub_str = self.url.split("/")
            self.proto = sub_str[0].strip(":")
            self.host = sub_str[2]
            self.uri = "/" + "/".join(sub_str[3:])
            self.headers["origin"] = self.headers.get("origin", f"{self.proto}://{self.host}")

            if self.body and not self.headers.get("content-length"):
                self.headers["content-length"] = str(len(self.body))
            self.headers["connection"] = self.headers.get("connection", "close")
            self.headers["user-agent"] = self.headers.get("user-agent",
                                                          "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " \
                                                          "AppleWebKit/537.36 (KHTML, like Gecko) " \
                                                          "Chrome/103.0.5060.114 Safari/537.36 " \
                                                          "Edg/103.0.1264.62")
        else:                                           # 对bp内容进行解析
            if "\n\n" in data:
                data, self.body = data.split("\n\n")[:2]
            else:
                print("Invalid data")
                sys.exit()

            self.headers = {}
            for line in data.split("\n"):
                if ": " in line:
                    key_index = line.index(": ")
                    key, value = line[:key_index].lower(), line[key_index + 2:]
                    self.headers[key.lower()] = value

                    if key == "host":
                        self.host = value
                elif line.count(" ") >= 2 and "HTTP/" in line:
                    self.method, self.uri, _ = line.split(" ")

            url = self.headers.get("referer") or self.headers.get("origin")
            self.proto = url.split(":")[0] if url else "https"
            self.url = f"{self.proto}://{self.host}{self.uri}"

        return self


class ExParser:
    def parse_file(self, file="fetch/queue_fetch.js"):
        ret = []
        with open(file, encoding="utf-8") as f:
            content = f.read()
        for data in re.findall(re.compile(r"(fetch\(.*?\}\);)", re.S), content):
            p = Parser()
            ret.append(p.parse(data))

        return ret


if __name__ == "__main__":
    # 测试Paser类和方法
    # p = Parser()
    # p.parse_file()
    # print(p)
    # print(p.headers)
    # print(p.proto)

    # 测试ExPaser类和方法
    p = ExParser()
    p_obj = p.parse_file()
    for obj in p_obj:
        print(obj)
