import subprocess
import os
import re
import glob
import time
import shutil
import shlex
import getpass
import pathlib
import binascii
from tempfile import mkstemp, mkdtemp

from cbuild.core import logger, paths, errors
from cbuild.apk import cli as apki

_chroot_checked = False
_chroot_ready = False

def host_cpu():
    return _host

def set_host(tgt):
    global _host
    _host = tgt

def chroot_check(force = False):
    global _chroot_checked, _chroot_ready

    if _chroot_checked and not force:
        return _chroot_ready

    _chroot_checked = True

    if (paths.bldroot() / ".cbuild_chroot_init").is_file():
        _chroot_ready = True
        cpun = (paths.bldroot() / ".cbuild_chroot_init").read_text().strip()
        set_host(cpun)
    else:
        _chroot_ready = False

    return _chroot_ready

def _subst_in(pat, rep, src, dest = None):
    inf = open(src, "r")
    if dest:
        outf = open(dest, "w")
    else:
        fd, nm = mkstemp()
        outf = open(nm, "w")

    for line in inf:
        out = re.sub(pat, rep, line)
        outf.write(out)

    inf.close()
    outf.close()

    if not dest:
        shutil.move(nm, src)

def _remove_ro(f, path, _):
    os.chmod(path, stat.S_IWRITE)
    f(path)

def _prepare_passwd():
    bfp = paths.distdir() / "main/base-files/files"
    tfp = paths.bldroot() / "etc"

    shutil.copy(bfp / "etc/passwd", tfp)
    shutil.copy(bfp / "etc/group", tfp)

    with open(tfp / "passwd", "a") as pf:
        pf.write(f"cbuild:x:1337:1337:cbuild user:/tmp:/bin/nologin\n")

    with open(tfp / "group", "a") as pf:
        pf.write(f"cbuild:x:1337:\n")

def _init():
    xdir = paths.bldroot() / "etc" / "apk"
    xdir.mkdir(parents = True, exist_ok = True)

    shutil.copy("/etc/resolv.conf", paths.bldroot() / "etc")
    # generate machine-id
    with open(paths.bldroot() / "etc/machine-id", "wb") as mid:
        mid.write(b"%s\n" % binascii.b2a_hex(os.urandom(16)))

def _prepare():
    sfpath = paths.bldroot() / ".cbuild_chroot_init"
    if sfpath.is_file():
        return
    if not (paths.bldroot() / "usr" / "bin" / "sh").is_file():
        raise errors.CbuildException("bootstrap not installed, can't continue")

    (paths.bldroot() / "etc" / "localtime").symlink_to(
        "../usr/share/zoneinfo/UTC"
    )

    if (paths.bldroot() / "usr/bin/update-ca-certificates").is_file():
        enter("update-ca-certificates", "--fresh")

    _prepare_passwd()

    with open(sfpath, "w") as sf:
        sf.write(host_cpu() + "\n")

def setup_keys(rootp):
    # copy over apk public keys
    keydir = rootp / "etc/apk/keys"

    shutil.rmtree(keydir, ignore_errors = True)
    keydir.mkdir(parents = True, exist_ok = True)

    for f in (paths.distdir() / "etc/apk/keys").glob("*.pub"):
        shutil.copy2(f, keydir)

    for f in (paths.distdir() / "etc/keys").glob("*.pub"):
        shutil.copy2(f, keydir)

_crepos = None

def get_confrepos():
    global _crepos

    if _crepos:
        return _crepos

    _crepos = []
    for f in (paths.distdir() / "etc/apk/repositories.d").glob("*.conf"):
        with open(f) as repof:
            for repo in repof:
                _crepos.append(repo.strip())

    return _crepos

def repo_init():
    setup_keys(paths.bldroot())

    apkpath = paths.bldroot() / "etc/apk"

    rfile = apkpath / "repositories"
    rfile.unlink(missing_ok = True)

    cfile = apkpath / "cache"
    cfile.unlink(missing_ok = True)

    return rfile, cfile

def shell_update(rnet):
    hcpu = host_cpu()
    rfile, cfile = repo_init()
    with rfile.open("w") as rfh:
        for rd in paths.repository().iterdir():
            for cr in get_confrepos():
                if not cr.startswith("/"):
                    continue
                cr = cr.lstrip("/").replace("@section@", rd.name)
                idxp = rd.parent / cr / hcpu / "APKINDEX.tar.gz"
                if idxp.is_file():
                    rfh.write(f"/binpkgs/{cr}\n")
        if paths.alt_repository():
            for rd in paths.alt_repository().iterdir():
                for cr in get_confrepos():
                    if not cr.startswith("/"):
                        continue
                    cr = cr.lstrip("/").replace("@section@", rd.name)
                    idxp = rd.parent / cr / hcpu / "APKINDEX.tar.gz"
                    if idxp.is_file():
                        rfh.write(f"/altbinpkgs/{cr}\n")
        # remote repos come last
        if rnet:
            from cbuild.core import profile
            for rd in profile.get_profile(hcpu).repos:
                for cr in get_confrepos():
                    if cr.startswith("/"):
                        continue
                    rfh.write(cr.replace("@section@", rd))
                    rfh.write("\n")

    # ensure any local apk commands can write into cache
    (paths.cbuild_cache() / "apk" / hcpu).mkdir(
        parents = True, exist_ok = True
    )
    cfile.symlink_to(f"/cbuild_cache/apk/{hcpu}")

    if apki.call_chroot(
        "update", [], None, full_chroot = True, allow_network = rnet
    ).returncode != 0:
        raise errors.CbuildException(f"failed to update pkg database")

def initdb(path = None):
    # we init the database ourselves
    if not path:
        path = paths.bldroot()

    (path / "tmp").mkdir(parents = True, exist_ok = True)
    (path / "dev").mkdir(parents = True, exist_ok = True)
    (path / "etc/apk").mkdir(parents = True, exist_ok = True)
    (path / "usr/lib/apk/db").mkdir(parents = True, exist_ok = True)
    (path / "var/cache/apk").mkdir(parents = True, exist_ok = True)
    (path / "var/cache/misc").mkdir(parents = True, exist_ok = True)
    (path / "var/log").mkdir(parents = True, exist_ok = True)

    # largely because of custom usrmerge
    if not (path / "lib").is_symlink():
        (path / "lib").symlink_to("usr/lib")

    (path / "usr/lib/apk/db/installed").touch()
    (path / "etc/apk/world").touch()

def install():
    if chroot_check():
        return

    logger.get().out("cbuild: installing base-cbuild...")

    initdb()

    setup_keys(paths.bldroot())

    irun = apki.call(
        "add", ["--no-chown", "--no-scripts", "base-cbuild"],
        "main", arch = host_cpu(),
    )
    if irun.returncode != 0:
        raise errors.CbuildException("failed to install base-cbuild")

    logger.get().out("cbuild: installed base-cbuild successfully!")

    paths.prepare()
    _prepare()
    chroot_check(True)
    _init()

def get_fakeroot(bootstrap):
    inp = paths.cbuild() / "misc/fakeroot.sh"

    if bootstrap:
        return inp

    rp = paths.bldroot() / ".cbuild_fakeroot.sh"

    if rp.is_file():
        return "/.cbuild_fakeroot.sh"

    rp.unlink(missing_ok = True)
    shutil.copyfile(inp, rp)

    return "/.cbuild_fakeroot.sh"

def _setup_dummy(rootp, archn):
    tmpd = mkdtemp()
    tmpd = pathlib.Path(tmpd)

    pkgn = "base-cross-target-meta"
    pkgv = "0.1-r0"
    repod = tmpd / archn
    repod.mkdir()

    epoch = int(time.time())

    logger.get().out(f"cbuild: installing virtual provider for {archn}...")

    # generate exact provided versions
    #
    # this is necessary because if any other versions are provided, it will
    # cause problems with some makedepends (e.g. static libraries for musl,
    # libunwind and so on depend on exact versions of their devel packages)

    from cbuild.core import template

    def _get_ver(pkgn):
        tobj = template.read_pkg(
            f"main/{pkgn}", archn, True, False, (1, 1), False, False, None,
            ignore_missing = True, ignore_errors = True
        )
        return f"{tobj.pkgver}-r{tobj.pkgrel}"

    fortify_ver = _get_ver("fortify-headers")
    atomic_ver = _get_ver("libatomic-chimera")
    files_ver = _get_ver("base-files")
    musl_ver = _get_ver("musl")
    llvm_ver = _get_ver("llvm")

    provides = [
        f"base-files={files_ver}",
        f"fortify-headers={fortify_ver}",
        f"libatomic-chimera={atomic_ver}",
        f"libatomic-chimera-devel={atomic_ver}",
        f"musl={musl_ver}",
        f"musl-devel={musl_ver}",
        f"libcxx={llvm_ver}",
        f"libcxx-devel={llvm_ver}",
        f"libcxxabi={llvm_ver}",
        f"libcxxabi-devel={llvm_ver}",
        f"libunwind={llvm_ver}",
        f"libunwind-devel={llvm_ver}",
        "so:libc.so=0",
        "so:libc++abi.so.1=1.0",
        "so:libc++.so.1=1.0",
        "so:libatomic.so.1=1.69.0",
        "so:libunwind.so.1=1.0",
    ]

    try:
        ret = apki.call(
            "mkpkg",
            [
                "--output", repod / f"{pkgn}-{pkgv}.apk",
                "--info", f"name:{pkgn}",
                "--info", f"version:{pkgv}",
                "--info", f"description:Target sysroot virtual provider",
                "--info", f"arch:{archn}",
                "--info", f"origin:{pkgn}",
                "--info", f"url:https://chimera-linux.org",
                "--info", f"build-time:{int(epoch)}",
                "--info", f"provides:{' '.join(provides)}",
            ],
            None, root = rootp, capture_output = True, arch = archn,
            allow_untrusted = True
        )
        if ret.returncode != 0:
            outl = ret.stderr.strip().decode()
            if len(outl) > 0:
                logger.get().out_plain(">> stderr:")
                logger.get().out_plain(outl)
            raise errors.CbuildException(f"failed to create virtual provider for {archn}")

        if not apki.build_index(repod, epoch, None):
            raise errors.CbuildException(f"failed to index virtual provider for {archn}")

        ret = apki.call(
            "add", ["--no-scripts", "--no-chown", "--repository", tmpd, pkgn],
            None, root = rootp, capture_output = True, arch = archn,
            allow_untrusted = True
        )

        if ret.returncode != 0:
            outl = ret.stderr.strip().decode()
            if len(outl) > 0:
                logger.get().out_plain(">> stderr:")
                logger.get().out_plain(outl)
            raise errors.CbuildException(f"failed to install virtual provider for {archn}")
    finally:
        shutil.rmtree(tmpd)

def _prepare_arch(prof):
    rootp = paths.bldroot() / prof.sysroot.relative_to("/")
    # drop the whole thing
    if rootp.exists():
        logger.get().out(f"cbuild: clearing sysroot for {prof.arch}...")
        shutil.rmtree(rootp)

    logger.get().out(f"setting up sysroot for {prof.arch}...")
    initdb(rootp)
    setup_keys(rootp)
    _setup_dummy(rootp, prof.arch)

def prepare_arch(arch):
    paths.prepare()

    if not arch:
        return

    from cbuild.core import profile

    prof = profile.get_profile(arch)

    if not prof.cross:
        return

    _prepare_arch(prof)

def remove_autodeps(bootstrapping, prof = None):
    if bootstrapping is None:
        bootstrapping = not (paths.bldroot() / ".cbuild_chroot_init").is_file()

    log = logger.get()

    log.out("cbuild: removing autodeps...")

    failed = False

    # best way to ensure everything is clean in stage 0
    if bootstrapping:
        # we need to keep builddir as that holds our state (logs etc)
        # everything else is handled by paths.prepare() and others
        for d in paths.bldroot().iterdir():
            if d.name == "builddir":
                continue
            if d.is_dir() and not d.is_symlink():
                shutil.rmtree(d)
            else:
                d.unlink()
        paths.prepare()
        initdb()
        repo_init()
        return

    paths.prepare()

    if  apki.call("info", [
        "--installed", "autodeps-host"
    ], None, capture_output = True, allow_untrusted = True).returncode == 0:
        del_ret = apki.call_chroot(
            "del", ["autodeps-host"], None, capture_output = True
        )

        if del_ret.returncode != 0:
            log.out_plain(">> stderr (host):")
            log.out_plain(del_ret.stderr.decode())
            failed = True

    if apki.call("info", [
        "--installed", "autodeps-target"
    ], None, capture_output = True, allow_untrusted = True).returncode == 0:
        del_ret = apki.call_chroot(
            "del", ["autodeps-target"], None, capture_output = True
        )

        if del_ret.returncode != 0:
            log.out_plain(">> stderr (target):")
            log.out_plain(del_ret.stderr.decode())
            failed = True

    if prof and prof.cross:
        _prepare_arch(prof)

    if failed:
        raise errors.CbuildException("failed to remove autodeps")

def update(pkg):
    if not chroot_check():
        return

    logger.get().out("cbuild: updating software in %s container..." \
        % str(paths.bldroot()))

    paths.prepare()
    repo_init()

    # reinit passwd/group
    _prepare_passwd()

    apki.call_chroot("update", ["-q"], pkg, check = True, use_stage = True)
    apki.call_chroot(
        "upgrade", ["--available"], pkg, check = True, use_stage = True
    )

    # this is bootstrap-update
    if isinstance(pkg, str):
        return

    prof = pkg.profile()

    # not cross, so we don't care
    if not prof.cross:
        return

    rootp = paths.bldroot() / prof.sysroot.relative_to("/")

    # otherwise also update indexes in cross root
    if apki.call(
        "update", ["-q"], pkg, root = rootp, arch = prof.arch
    ).returncode != 0:
        raise errors.CbuildException(f"failed to update cross pkg database")

def enter(cmd, *args, capture_output = False, check = False,
          env = {}, stdout = None, stderr = None, wrkdir = None,
          bootstrapping = False, ro_root = False, ro_build = False,
          ro_dest = True, unshare_all = False, mount_binpkgs = False,
          mount_cbuild_cache = False, mount_cports = False,
          fakeroot = False, new_session = True, binpkgs_rw = False,
          signkey = None, wrapper = None, lldargs = None):
    defpath = "/usr/bin"
    if bootstrapping:
        defpath = os.environ["PATH"]

    from cbuild.core import profile

    hprof = profile.get_profile(host_cpu())

    envs = {
        "PATH": defpath,
        "SHELL": "/bin/sh",
        "HOME": "/tmp",
        "LC_COLLATE": "C",
        "LANG": "C.UTF-8",
        "UNAME_m": hprof.machine,
        **env
    }

    if hprof.wordsize == 32:
        kpers = "linux32"
    else:
        kpers = "linux64"

    if not unshare_all:
        if "NO_PROXY" in os.environ:
            envs["NO_PROXY"] = os.environ["NO_PROXY"]
        if "FTP_PROXY" in os.environ:
            envs["FTP_PROXY"] = os.environ["FTP_PROXY"]
        if "HTTP_PROXY" in os.environ:
            envs["HTTP_PROXY"] = os.environ["HTTP_PROXY"]
        if "HTTPS_PROXY" in os.environ:
            envs["HTTPS_PROXY"] = os.environ["HTTPS_PROXY"]
        if "SOCKS_PROXY" in os.environ:
            envs["SOCKS_PROXY"] = os.environ["SOCKS_PROXY"]
        if "FTP_RETRIES" in os.environ:
            envs["FTP_RETRIES"] = os.environ["FTP_RETRIES"]
        if "HTTP_PROXY_AUTH" in os.environ:
            envs["HTTP_PROXY_AUTH"] = os.environ["HTTP_PROXY_AUTH"]

    # if running from template, ensure wrappers are early in executable path
    if "CBUILD_STATEDIR" in envs:
        envs["PATH"] = envs["CBUILD_STATEDIR"] + "/wrappers:" + envs["PATH"]

    if new_session:
        envs["PYTHONUNBUFFERED"] = "1"

    # ccache path is searched first
    #
    # this has the implication of having ccache invoke whatever cc wrapper
    # we have at the time, rather than the other way around, which means
    # the wrappers don't have to account for ccache explicitly
    if "CCACHEPATH" in envs:
        envs["PATH"] = envs["CCACHEPATH"] + ":" + envs["PATH"]

    if ro_root:
        root_bind = "--ro-bind"
    else:
        root_bind = "--bind"

    if ro_build:
        build_bind = "--ro-bind"
    else:
        build_bind = "--bind"

    if ro_dest:
        dest_bind = "--ro-bind"
    else:
        dest_bind = "--bind"

    if bootstrapping:
        return subprocess.run(
            [cmd, *args], env = envs,
            capture_output = capture_output, check = check,
            stdout = stdout, stderr = stderr,
            cwd = os.path.abspath(wrkdir) if wrkdir else None
        )

    bcmd = [
        "bwrap",
        "--unshare-all",
        "--hostname", "cbuild",
        root_bind, paths.bldroot(), "/",
        build_bind, paths.builddir(), "/builddir",
        dest_bind, paths.bldroot() / "destdir", "/destdir",
        "--ro-bind", paths.sources(), "/sources",
        "--dev", "/dev",
        "--proc", "/proc",
        "--tmpfs", "/tmp",
        "--tmpfs", "/var/tmp",
    ]

    if new_session:
        bcmd += ["--new-session", "--die-with-parent"]

    if mount_binpkgs:
        bcmd += [
            "--ro-bind" if not binpkgs_rw else "--bind",
            paths.repository(),
            "/binpkgs",
            "--ro-bind" if not binpkgs_rw else "--bind",
            paths.stage_repository(),
            "/stagepkgs"
        ]
        if paths.alt_repository():
            bcmd += ["--ro-bind", paths.alt_repository(), "/altbinpkgs"]

    if mount_cbuild_cache:
        bcmd += ["--bind", paths.cbuild_cache(), "/cbuild_cache"]

    # always bubblewrap as cbuild user
    # root-needing things are done through fakeroot so we can chown
    bcmd += ["--uid", "1337"]
    bcmd += ["--gid", "1337"]

    if not unshare_all:
        bcmd += ["--share-net"]

    if wrkdir:
        bcmd.append("--chdir")
        bcmd.append(wrkdir)

    # extra file descriptors to pass to sandbox and bind to a file
    fdlist = []

    if signkey:
        # reopen as file descriptor to pass
        signfd = os.open(signkey, os.O_RDONLY)
        fdlist.append(signfd)
        bcmd += ["--ro-bind-data", str(signfd), f"/tmp/{signkey.name}"]

    if wrapper:
        rfd, wfd = os.pipe()
        os.write(wfd, wrapper.encode())
        os.close(wfd)
        fdlist.append(rfd)
        bcmd += ["--ro-bind-data", str(rfd), "/tmp/cbuild-chroot-wrapper.sh"]

    if lldargs:
        rfd, wfd = os.pipe()
        os.write(wfd, "\n".join(lldargs).encode())
        os.close(wfd)
        fdlist.append(rfd)
        bcmd += ["--ro-bind-data", str(rfd), "/tmp/cbuild-lld-args"]

    if fakeroot:
        bcmd += [
            "--setenv", "FAKEROOTDONTTRYCHOWN", "1", "--", kpers, "sh",
            get_fakeroot(False)
        ]
    else:
        bcmd += [kpers, "--"]

    if wrapper:
        bcmd += ["sh", "/tmp/cbuild-chroot-wrapper.sh"]

    bcmd.append(cmd)
    bcmd += args

    try:
        return subprocess.run(
            bcmd, env = envs, capture_output = capture_output, check = check,
            stdout = stdout, stderr = stderr, pass_fds = tuple(fdlist)
        )
    finally:
        for fd in fdlist:
            os.close(fd)
