import argparse
import json
import math
import os
import base64
import time

from lib import sheet

import requests


class Paser:
    parser = None
    username = None
    apiKey = None
    grammar = "/openApi/search"
    mul = "/openApi/search/batch"
    outputPath = "./outs/"
    result = {}
    page = None
    page_size = 10

    def __init__(self):
        with open(os.path.dirname(__file__) + "/../config/config.ini", "r+") as config:
            jsonObject = json.loads(config.read())
            self.username = jsonObject.get("username")
            self.apiKey = jsonObject.get("apikey")

    def getPaser(self):
        if self.parser == None:
            self.parser = argparse.ArgumentParser(description="Hunter search.")
            self.parser.add_argument('-k', '--key', dest='key', type=str, help="what are you want to search?",
                                     action='store')
            self.parser.add_argument('-f', '--file', dest='file', type=str,
                                     help="what are you want to search search search~")
            self.parser.add_argument('-o', '--output', dest='output', type=str,
                                     help="save result to file.support type as follow: .txt, .xlsx")
            self.parser.add_argument('-p', '--page', dest='page', type=str,
                                     help="default search page 1 and page_size 10, you can set -p to get more page result.example: -p 1/2/3/4/5/6/7... -p all can get all page.")
        return self.parser

    def run(self):
        p = self.getPaser().parse_args()

        if self.username == None or self.apiKey == None:
            print("username and apiKey can't be empty, check your config file :config/config.ini.")
            exit(0)

        keys = []
        if p.key != None and p.file != None:
            print("Only one of -k/-f can and must be used at the same time.")
            exit(0)
        if p.file != None:
            keys = self.pFile(p.file)
        if p.key != None:
            keys.append(p.key)
        self.page = p.page
        self.search(keys)

        if p.output != None:
            self.wFile(p.output)

    def pFile(self, filename):
        k = []
        if filename == None or filename == "":
            return []
        with open(filename) as f:
            t = f.readline().strip()
            while t != None:
                k.append(t)
                t = f.readline().strip()
        return k

    def wFile(self, filename):
        content = self.result
        filename = str(filename)
        if filename.__contains__("."):
            name = filename[0:filename.rindex(".")]
            ext = ".txt"
        else:
            name = filename
            ext = ".txt"

        if not os.path.exists(self.outputPath):
            os.mkdir(self.outputPath)

        with open(self.outputPath + name + "_urls" + ext, "w+") as f:
            l = []
            for k in content.keys():
                jsonObject = json.loads(content.get(k))
                if jsonObject.get("code") != 200:
                    print("search error.")
                data = jsonObject.get("data")
                arr = data["arr"]
                consume_quota = data["consume_quota"]
                rest_quota = data["rest_quota"]
                if arr == None:
                    break
                for t in arr:
                    f.write(t["url"] + "\n")

                    content1 = {
                        "URL": t['url'],
                        "IP": t["ip"],
                        "DOMAIN": t["domain"],
                        "PORT": t["port"],
                        "PROTOCOL": t["protocol"],
                        "STATUS": t["status_code"],
                    }
                    l.append(content1)
            l.append({"消耗积分": consume_quota, "剩余积分": rest_quota})
            sheet.writeToxlsx(self.outputPath + name + "_all.xlsx", l)

        print(
            "Result saved in :" + self.outputPath + name + "_urls" + ext + " and " + self.outputPath + name + "_all.xlsx")

    def search(self, keys):
        url = "https://hunter.qianxin.com/"
        header = {"User-Agent": "aksdfhaskldfnspvogslkdnfvapsgdks;adlk;c"}
        i = 1
        page = None
        result = ""
        while keys != None and keys != []:
            tmp = base64.urlsafe_b64encode(keys.pop().encode("utf-8"))
            if self.page == None:
                page = 1
            elif self.page == "all" or self.page == "*":
                req = requests.get(url=url + self.grammar, params={
                    "username": self.username,
                    "api-key": self.apiKey,
                    "search": tmp,
                    "page": 1,
                    "page_size": 10,
                    "is_web": 3,
                    "status": "",
                    "start_time": "",
                    "end_time": ""
                }, headers=header)
                p = req.text
                total = json.loads(p).get("data")["total"]
                if total >= 300:
                    yes = input(
                        str(total) + " results can be take ,are you sure want to take all?if no we'll take 3 pages [Y/n]").strip()
                    if yes == "" or yes.lower() == "y" or yes.lower() == "yes":
                        yes = True
                    else:
                        yes = False
                    if yes:
                        self.page_size = 100
                        page = int(math.ceil(total / self.page_size))
                    else:
                        page = 3
                else:
                    page = int(math.ceil(total / self.page_size))

            else:
                page = int(self.page)
            post = {
                "username": self.username,
                "api-key": self.apiKey,
                "search": tmp,
                "page": page,
                "page_size": self.page_size,
                "is_web": 3,
                "status": "",
                "start_time": "",
                "end_time": ""
            }
            while i <= page:
                post["page"] = i
                req = requests.get(url=url + self.grammar, params=post, headers=header)
                print("taking result from page:" + str(i))
                page_res = req.text
                if json.loads(page_res).get("code") == 400:
                    print(json.loads(page_res).get("message"))
                tmp = json.loads(page_res).get("data")
                time.sleep(1.5)
                # print(tmp)
                if tmp == None or tmp["arr"] == None:
                    print("no more page, searching end.")
                    i = page + 1
                else:
                    self.result[i] = page_res
                    i += 1
                    for k in tmp["arr"]:
                        result += k["url"] + "\n"
                        print(k["url"])
            return result
