import time, os, argparse
import pandas as pd

HTTPX_PATH = "/home/chuen/workspace/Encrypted-DNS-Traffic-Generation/toolkit/httpx/httpx"
TRANCO_LIST_PATH = "data/tranco/XVWN-DEFAULT.csv"
NUMBER_OF_DOAMIN_TO_PROBE = 100
PROBE_TIMEOUT = 10

class Probe:
    def name(self, origin_name:str, include_time:bool=True, annotation:str="") -> str:
        """Generate derived name from origin name.

        For example, 
        run name("a.csv", True, "text1") will get "a-text1-202201010000.csv", 
        run name("a.csv", False, "text2") will get "a-text2.csv".

        Args:
            origin_name: Origin name.
            include_time: Whether include the time the name was generated.
            annotation: For comment use.
        
        Returns:
            derived name: Derived name, in the same directory as origin_name.
        """
        name, ext = origin_name.split(".")[0], origin_name.split(".")[-1]
        t = "-" + time.strftime("%Y%m%d%H%M%S", time.localtime()) if include_time == True else ""
        annotation = "" if annotation == "" else "-" + annotation

        derived_name = format(name + annotation + t + "." + ext)

        return derived_name

    def generate_urls_file(self, n:int, input_file:str) -> str:
        """Generate a urls file based on input file.

        Every line in input file should consist of index, comma and domain name,
        e.g. "1,google.com".
        Every line in output file is just a URL, e.g. "https://google.com"

        Args:
            n: Read top n line of input file.
            input_file: Input file path.

        Returns:
            output_file: Output file path.
        """
        output_file = self.name(input_file, annotation="URL-{}".format(n))

        with open(output_file, "w") as f_out:
            with open(input_file) as f_in:
                for _ in range(n):     
                    url = "https://" + f_in.readline().strip().split(",")[1]
                    f_out.write(url + "\n")

        return output_file

    def get_reachable_domains(self, n:int, probed_file:str, input_file:str) -> str:
        """Keep reachable domains.

        If a domain's URL is reachable, its final status code should be 2xx.

        Args:
            n: Read top n line of input file.
            input_file: Input file path.

        Returns:
            reachable_domains_file: Reachable URLs file.
        """
        reachable_domains = []

        # Read probed file
        df = pd.read_csv(probed_file, encoding_errors="ignore")

        # Preserve links with status code 2xx
        df = df[(df["status-code"]>=200) & (df["status-code"]<300)]["url"]

        # Remove prefix ("https://") and postfix (":443"), save to a set
        domains = set(df.apply(lambda x: x[8:-4]).tolist())

        # Keep the original order
        with open(input_file) as f_in:
            for _ in range(n):
                domain = f_in.readline().strip().split(",")[1]
                if domain in domains:
                    reachable_domains.append(domain)
        
        reachable_domains_file = \
        self.name(input_file, True, "reachable-{}".format(len(reachable_domains)))
        
        with open(reachable_domains_file, "w") as f_out:
            for domain in reachable_domains:
                f_out.write(domain + "\n")

        return reachable_domains_file

    def run(self, n:int, input_file:str=TRANCO_LIST_PATH) -> str:
        """Scan URLs via httpx, an open source http probe.
        
        See "https://github.com/projectdiscovery/httpx" for more information about httpx.

        Every line in input file should consist of index, comma and domain name,
        e.g. "1,google.com". This is default tranco list file format.

        This function will generate a temporary urls file.

        Args:
            n: Read top n line of input file.
            input_file: Input file path.

        Returns:
            probed_file: Probed file.
        """

        host_file = self.generate_urls_file(n, input_file)
        probed_file = self.name(input_file, True, "probed-{}".format(n))

        cmd = "{} -l {} -timeout {} -o {} -csv -s -sc -fr".format(
            HTTPX_PATH,                                     # httpx executable file path
            host_file,                                      # hostfile path
            PROBE_TIMEOUT,                                  # timeout
            probed_file                                     # probed_file
        )

        # subprocess.run(cmd)
        os.system(cmd)
        os.remove(host_file)

        return probed_file

if __name__ == '__main__':
    description = "Detect whether top n web sites from tranco list are reachable via httpx."

    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('-n',
                        type = int,
                        default = NUMBER_OF_DOAMIN_TO_PROBE,
                        help="Number of domain to probe.")
    args = parser.parse_args()

    p = Probe()   
    probed_file = p.run(args.n)

    p.get_reachable_domains(50000, probed_file, TRANCO_LIST_PATH)