# keep X minimal
x11-base/xorg-server                          -elogind                        # why it enables by default?
x11-base/xorg-server						  -suid -udev -xorg


            # miscellaneous file systems
            # FIXME
            mc.select_by_symbol("MSDOS_FS", "m")
            mc.select_tree_by_symbol("MSDOS_FS", "m,y", value_priority=mc.ValuePriority.USER_POLICY)
            mc.select_by_symbol("CIFS", "m")
            mc.select_tree_by_symbol("CIFS", "m,y", value_priority=mc.ValuePriority.USER_POLICY)



    @staticmethod
    def src_arch(bootEntry):
        # FIXME: what's the difference with arch?

        if bootEntry.spec.arch.value == "i386" or bootEntry.spec.arch.value == "x86_64":
            return "x86"
        elif bootEntry.spec.arch.value == "sparc32" or bootEntry.spec.arch.value == "sparc64":
            return "sparc"
        elif bootEntry.spec.arch.value == "sh":
            return "sh64"
        else:
            return bootEntry.spec.arch.value



# context.select_tree_by_symbol("AGP", "m,y", value_priority=context.ValuePriority.USER_POLICY)


# return [
#     ## tty ##############################################
#     "SERIAL_NONSTANDARD=n",
#     "TTY_PRINTK=n",
#     "IPMI_HANDLER=n",                                                      # unfamiliar
#     "IPMB_DEVICE_INTERFACE=n",                                             # unfamiliar
#     "TCG_TPM=n",                                                           # unfamiliar
#     "TELCLOCK=n",                                                          # unfamiliar
#     "XILLYBUS=n",                                                          # unfamiliar
#     "XILLYUSB=n",                                                          # unfamiliar
#     "NOZOMI=n",                                                            # not support PCMCIA hardware
#     "MWAVE=n",                                                             # unfamiliar

#     # serial device is under tty subsystem, not a good design
#     "SERIAL_8250_16550A_VARIANTS=n",                                       # enable it takes additional time at boot
#     "SERIAL_8250_RUNTIME_UARTS=0",                                         # only support PNP serial device
#     "SERIAL_8250_EXTENDED=n",
#     "[symbols:SERIAL_8250]=m,y",
#     "[symbols:SERIAL_8250_PCI]=m,y",
#     #[symbols:/Device drivers/Character devices/Serial drivers]=n"             # pylkc can not find this menu?!


#     ## sound ############################################

#     "SND_USB_6FIRE=n",                                                                                                                     # no firmware, we think it's rare
#     "SND_HDSP=n",                                                                                                                          # no firmware, we think it's rare
#     "SND_HDSPM=n",                                                                                                                         # no firmware, we think it's rare
#     "SND_CS46XX=n",                                                                                                                        # no firmware, we think it's rare
#     "SND_MIXART=n",                                                                                                                        # no firmware, we think it's rare
#     "SND_ASIHPI=n",                                                                                                                        # no firmware, we think it's rare
#     "SND_PCXHR=n",                                                                                                                         # no firmware, we think it's rare
#     "[prompt-regex-symbols:\\(Echoaudio\\) .*:/Device Drivers/Sound card support/Advanced Linux Sound Architecture/PCI sound devices]=n",  # no firmware, we think it's rare

#     #SND_LAYLA24=n                                               # no firmware, we think it's rare
#     #SND_INDIGOIO
#     #SND_INDIGO
#     #SND_INDIGODJ
#     #SND_ECHO3G
#     #SND_GINA20
#     #SND_MONA

#     "SOUND=m",
#     "SND_SOC=n",
#     "SND_PCSP=n",
#     "SND_SEQ_DUMMY=n",
#     "[symbols:SND_DRIVERS]=n",    # disable all generic sound drivers, which are only used for testing purpose
#     "[symbols:SOUND]=m,y",


#     ## graphics #########################################

#     "[symbols:AGP]=m,y",

#     "DRM_NOUVEAU=n",              # it has compiling error

#     "[symbols:DRM]=m,y",
#     "DRM_LOAD_EDID_FIRMWARE=n",
#     "[regex-symbols:DRM_.*:/Device Drivers/Graphics support]=m,y",
#     "HSA_AMD=y",

#     "[symbols:/Device Drivers/Graphics support/Backlight & LCD device support]=m,y",


#     ## usb ##############################################

#     "USBIP_CORE=n",                                             # USB/IP seems to be an advanced feature
#     "MUSB_PIO_ONLY=n",

#     "USB_XHCI_DBGCAP=n",                                        # seems debug?
#     "USB_HCD_TEST_MODE=n",                                      # seems debug?
#     "USB_DWC2_TRACK_MISSED_SOFS=n",                             # seems experiment?

#     "USB_ISIGHTFW=n",                                           # no firmware, we think it's rare

#     "USB_OTG_DISABLE_EXTERNAL_HUB=n",
#     "[symbols:USB_SUPPORT]=m,y",


#     ## other ############################################           # FIXME: belongs here?

#     "[symbols:/Device Drivers/Character devices]=m,y",

#     # menu: LED Support
#     "[symbols:NEW_LEDS]=m,y",

#     # menu: Real Time Clock
#     "RTC_HCTOSYS=n",                                            # no service
#     "RTC_SYSTOHC=n",                                            # no service
#     "[symbols:RTC_CLASS]=m,y",

#     # menu: DMA Engine support
#     "[symbols:DMADEVICES]=m,y",

#     # menu: Multimedia support
#     "V4L_PLATFORM_DRIVERS=n",                                   # no rare
#     "[symbols:MEDIA_SUPPORT]=m,y",

#     # menu: X86 Platform Specific Device Drivers
#     "X86_PLATFORM_DEVICES=y",
#     "ACPI_WMI=m,y",


#     ## other ############################################           # FIXME: belongs here?

#     #	CONFIG_CRYPTO_USER_API_SKCIPHER			m
#     #	CONFIG_CRYPTO_ECB				M
#     #	CONFIG_CRYPTO_CBC				OK
#     "CRYPTO_DES=m",                            # iwd need it
#     "CRYPTO_DES3_EDE_X86_64=m",                # iwd need it

#     "[prompt-regex-symbols:Userspace .*:/Cryptographic API]=m,y",
#     "[prompt-regex-symbols:User-space interface for .*:/Cryptographic API]=m,y",
#     "[symbols:/Cryptographic API]=n",                                                  # remove all uneccessary options

#     "[symbols:/Library routines]=n",                                                   # remove all uneccessary options
# ]


    @staticmethod
    def githubGetFileContent(user, repo, filepath):
        with TempCreateFile() as tmpFile:
            url = "https://github.com/%s/%s/trunk/%s" % (user, repo, filepath)
            Util.cmdCall("svn", "export", "-q", "--force", url, tmpFile)
            return pathlib.Path(tmpFile).read_text()


    @staticmethod
    def isInChroot():
        # This technique is used in a few maintenance scripts in Debian
        out1 = Util.cmdCall("/usr/bin/stat", "-c", "%%d:%%i", "/")
        out2 = Util.cmdCall("/usr/bin/stat", "-c", "%%d:%%i", "/proc/1/root/.")
        return out1 != out2

    @staticmethod
    def cmdListPtyExec(cmdList, envDict=None):
        proc = ptyprocess.PtyProcessUnicode.spawn(cmdList, env=envDict)
        Util._communicateWithPty(proc)

    @staticmethod
    def cmdListPtyExecWithStuckCheck(cmdList, envDict={}, bQuiet=False):
        proc = ptyprocess.PtyProcessUnicode.spawn(cmdList, env=envDict)
        Util._communicateWithPtyStuckCheck(proc, bQuiet)

    @staticmethod
    def shellPtyExec(cmd, envDict=None):
        proc = ptyprocess.PtyProcessUnicode.spawn(["/bin/sh", "-c", cmd], env=envDict)
        Util._communicateWithPty(proc)

    @staticmethod
    def shellPtyExecWithStuckCheck(cmd, envDict=None, bQuiet=False):
        proc = ptyprocess.PtyProcessUnicode.spawn(["/bin/sh", "-c", cmd], env=envDict)
        Util._communicateWithPtyStuckCheck(proc, bQuiet)

    @staticmethod
    def getMountDeviceForPath(pathname):
        buf = Util.cmdCall("/bin/mount")
        for line in buf.split("\n"):
            m = re.search("^(.*) on (.*) type ", line)
            if m is not None and m.group(2) == pathname:
                return m.group(1)
        return None



    @staticmethod
    def _communicateWithPty(ptyProc):
        if hasattr(selectors, 'PollSelector'):
            pselector = selectors.PollSelector
        else:
            pselector = selectors.SelectSelector

        # redirect proc.stdout/proc.stderr to stdout/stderr
        # make CalledProcessError contain stdout/stderr content
        sStdout = ""
        with pselector() as selector:
            selector.register(ptyProc, selectors.EVENT_READ)
            while selector.get_map():
                res = selector.select(TIMEOUT)
                for key, events in res:
                    try:
                        data = key.fileobj.read(1)
                    except EOFError:
                        selector.unregister(key.fileobj)
                        continue
                    sStdout += data
                    sys.stdout.write(data)
                    sys.stdout.flush()

        ptyProc.wait()
        if ptyProc.signalstatus is not None:
            time.sleep(PARENT_WAIT)
        if ptyProc.exitstatus:
            raise subprocess.CalledProcessError(ptyProc.exitstatus, ptyProc.argv, sStdout, "")

    @staticmethod
    def _communicateWithPtyStuckCheck(ptyProc, bQuiet):
        if hasattr(selectors, 'PollSelector'):
            pselector = selectors.PollSelector
        else:
            pselector = selectors.SelectSelector

        # redirect proc.stdout/proc.stderr to stdout/stderr
        # make CalledProcessError contain stdout/stderr content
        # terminate the process and raise exception if they stuck
        sStdout = ""
        bStuck = False
        with pselector() as selector:
            selector.register(ptyProc, selectors.EVENT_READ)
            while selector.get_map():
                res = selector.select(TIMEOUT)
                if res == []:
                    bStuck = True
                    if not bQuiet:
                        sys.stderr.write("Process stuck for %d second(s), terminated.\n" % (TIMEOUT))
                    ptyProc.terminate()
                    break
                for key, events in res:
                    try:
                        data = key.fileobj.read(1)
                    except EOFError:
                        selector.unregister(key.fileobj)
                        continue
                    sStdout += data
                    sys.stdout.write(data)
                    sys.stdout.flush()

        ptyProc.wait()
        if ptyProc.signalstatus is not None:
            time.sleep(PARENT_WAIT)
        if bStuck:
            raise ProcessStuckError(ptyProc.args, TIMEOUT)
        if ptyProc.exitstatus:
            raise subprocess.CalledProcessError(ptyProc.exitstatus, ptyProc.argv, sStdout, "")






class _InterProcessCounter:

    def __init__(self, name):
        self.name = name
        self.librt = ctypes.CDLL("librt.so", use_errno=True)

        # # https://github.com/erikhvatum/py_interprocess_shared_memory_blob
        # self.shm_open_argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_unit32]

        # self.pthread_rwlockattr_t = ctypes.c_byte * 8
        # self.pthread_rwlockattr_t_p = ctypes.POINTER(self.pthread_rwlockattr_t)

        # self.pthread_rwlock_t = ctypes.c_byte * 56
        # self.pthread_rwlock_t_p = ctypes.POINTER(self.pthread_rwlock_t)

        # API = [
        #     ('pthread_rwlock_destroy', [pthread_rwlock_t_p], 'pthread'),
        #     ('pthread_rwlock_init', [pthread_rwlock_t_p, pthread_rwlockattr_t_p], 'pthread'),
        #     ('pthread_rwlock_unlock', [pthread_rwlock_t_p], 'pthread'),
        #     ('pthread_rwlock_wrlock', [pthread_rwlock_t_p], 'pthread'),
        #     ('pthread_rwlockattr_destroy', [pthread_rwlockattr_t_p], 'pthread'),
        #     ('pthread_rwlockattr_init', [pthread_rwlockattr_t_p], 'pthread'),
        #     ('pthread_rwlockattr_setpshared', [pthread_rwlockattr_t_p, ctypes.c_int], 'pthread'),
        #     ('shm_open', shm_open_argtypes, 'os'),
        #     ('shm_unlink', [ctypes.c_char_p], 'os')
        # ]

    def incr(self):
        pass

    def decr(self):
        pass






        with self.infoPrinter.printInfoAndIndent("- Checking file systems"):
            # if True:
            #     # what we can check is very limited:
            #     # 1. no way to fsck ext4 root partition when it's on-line
            #     # 2. fscking vfat partition when it's on-line always finds dirty-bit
            #     if self.bAutoFix:
            #         fatFsckCmd = "fsck.vfat -a"
            #     else:
            #         fatFsckCmd = "fsck.vfat -n"

            #     if isinstance(layout, FmStorageLayoutBiosSimple):
            #         pass
            #     elif isinstance(layout, FmStorageLayoutEfiSimple):
            #         Util.shellExec("%s %s" % (fatFsckCmd, layout.hddEspParti))
            #     elif isinstance(layout, FmStorageLayoutEfiLvm):
            #         for hdd in layout.lvmPvHddList:
            #             Util.shellExec("%s %s" % (fatFsckCmd, Util.devPathDiskToPartition(hdd, 1)))
            #     elif isinstance(layout, FmStorageLayoutEfiBcacheLvm):
            #         if layout.ssd is not None:
            #             Util.shellExec("%s %s" % (fatFsckCmd, layout.ssdEspParti))
            #         for hdd in layout.lvmPvHddDict:
            #             Util.shellExec("%s %s" % (fatFsckCmd, Util.devPathDiskToPartition(hdd, 1)))
            #     else:
            #         assert False
            pass



    @staticmethod
    def getBlkDevCapacity(devPath):
        ret = Util.cmdCall("/bin/df", "-BM", devPath)
        m = re.search("%s +(\\d+)M +(\\d+)M +\\d+M", ret, re.M)
        total = int(m.group(1))
        used = int(m.group(2))
        return (total, used)        # unit: MB


    @staticmethod
    def getBlkDevLvmInfo(devPath):
        """Returns (vg-name, lv-name)
           Returns None if the device is not lvm"""

        rc, out = Util.shellCallWithRetCode("/sbin/dmsetup info %s" % (devPath))
        if rc == 0:
            m = re.search("^Name: *(\\S+)$", out, re.M)
            assert m is not None
            ret = m.group(1).split(".")
            if len(ret) == 2:
                return ret
            ret = m.group(1).split("-")         # compatible with old lvm version
            if len(ret) == 2:
                return ret

        m = re.fullmatch("(/dev/mapper/\\S+)-(\\S+)", devPath)          # compatible with old lvm version
        if m is not None:
            return Util.getBlkDevLvmInfo("%s-%s" % (m.group(1), m.group(2)))

        return None




def fetchPkg(self, pkgName):
    with _FakeUsrSrcLinuxDirectory():
        # update USE flag
        while True:
            self._pretendEmerge(pkgName)
            if not self._updateUseFlag():
                break

        # do fetch
        subprocess.check_call("emerge", "--fetchonly", pkgName)
        if os.path.exists(self._fnBreakCycle):
            Util.forceDelete(self._fnBreakCycle)
    
    self._pretendOutput = ""



                # atop
                # b43-fwcutter
                # borg
                # chntpw
                # clonezilla
                # crda
                # darkhttpd
                # ddrescue
                # dhclient
                # dialog
                # dmraid
                # dnsmasq
                # dnsutils
                # elinks
                # ethtool
                # # featherpad
                # # firefox-esr-bin
                # fsarchiver
                # geany
                # gnu-netcat
                # gpm
                # grml-zsh-config
                # # growpart
                # grsync
                # iftop
                # iotop
                # irssi
                # iwd
                # # joe                       # this package disppears
                # keepassxc
                # lftp
                # lightdm
                # linux-atm
                # lzip
                # ncdu
                # ndisc6
                # network-manager-applet
                # networkmanager-openvpn
                # networkmanager-vpnc
                # # nwipe
                # openconnect
                # openssh
                # openvpn
                # partclone
                # partimage
                # ppp
                # pptpclient
                # pv
                # rdesktop
                # # refind-efi                    # this package disappears
                # rkhunter
                # rp-pppoe
                # sudo
                # sysstat
                # testdisk
                # tigervnc
                # ttf-dejavu
                # ttf-droid
                # usb_modeswitch
                # vim-minimal
                # vpnc
                # wipe
                # wireless-regdb
                # wireless_tools
                # wvdial
                # xarchiver
                # xfce4
                # xfce4-battery-plugin
                # xfce4-taskmanager
                # xkbsel
                # xkeyboard-config
                # xl2tpd
                # xorg-apps
                # xorg-drivers
                # xorg-server
                # xorg-xinit
                # yubikey-manager-qt
                # yubikey-personalization-gui
                # # zerofree
                # zile



class ArchLinuxBasedOsBuilder:

    def __init__(self, mirrorList, cacheDir, tmpDir):
        self.mirrorList = mirrorList
        self.cacheDir = cacheDir
        self.pkgCacheDir = os.path.join(cacheDir, "pkg")
        self.tmpDir = tmpDir

    def bootstrapPrepare(self):
        try:
            # get cached file
            cachedDataFile = None
            if os.path.exists(self.cacheDir):
                for fn in sorted(os.listdir(self.cacheDir)):
                    if re.fullmatch("archlinux-bootstrap-(.*)-x86_64.tar.gz", fn) is None:
                        continue
                    if not os.path.exists(os.path.join(self.cacheDir, fn + ".sig")):
                        continue
                    cachedDataFile = fn

            # select mirror
            mr = None
            if len(self.mirrorList) == 0:
                if cachedDataFile is not None:
                    dataFile = cachedDataFile
                    signFile = cachedDataFile + ".sig"
                    return False
                else:
                    raise Exception("no Arch Linux mirror")
            else:
                mr = self.mirrorList[0]

            # get remote file
            dataFile = None
            signFile = None
            if True:
                url = "%s/iso/latest" % (mr)
                with urllib.request.urlopen(url, timeout=robust_layer.TIMEOUT) as resp:
                    root = lxml.html.parse(resp)
                    for link in root.xpath(".//a"):
                        fn = os.path.basename(link.get("href"))
                        if re.fullmatch("archlinux-bootstrap-(.*)-x86_64.tar.gz", fn) is not None:
                            dataFile = fn
                            signFile = fn + ".sig"

            # changed?
            return (cachedDataFile != dataFile)
        finally:
            self.dataFile = dataFile
            self.signFile = signFile
            self.bootstrapDir = os.path.join(self.tmpDir, "bootstrap")
            self.rootfsDir = os.path.join(self.tmpDir, "airootfs")

    def bootstrapDownload(self):
        os.makedirs(self.cacheDir, exist_ok=True)
        mr = self.mirrorList[0]
        Util.wgetDownload("%s/iso/latest/%s" % (mr, self.dataFile), os.path.join(self.cacheDir, self.dataFile))
        Util.wgetDownload("%s/iso/latest/%s" % (mr, self.signFile), os.path.join(self.cacheDir, self.signFile))

    def bootstrapExtract(self):
        os.makedirs(self.tmpDir, exist_ok=True)
        Util.cmdCall("/bin/tar", "-xzf", os.path.join(self.cacheDir, self.dataFile), "-C", self.tmpDir)
        Util.forceDelete(self.bootstrapDir)
        os.rename(os.path.join(self.tmpDir, "root.x86_64"), self.bootstrapDir)

    def createRootfs(self, initcpioHooksDir=None, pkgList=[], localPkgFileList=[], fileList=[], cmdList=[]):
        Util.forceDelete(self.rootfsDir)
        os.mkdir(self.rootfsDir)

        os.makedirs(self.pkgCacheDir, exist_ok=True)

        # copy resolv.conf
        Util.cmdCall("/bin/cp", "-L", "/etc/resolv.conf", os.path.join(self.bootstrapDir, "etc"))

        # modify mirror
        with open(os.path.join(self.bootstrapDir, "etc", "pacman.d", "mirrorlist"), "w") as f:
            for mr in self.mirrorList:
                f.write("Server = %s/$repo/os/$arch\n" % (mr))

        # initialize, add packages
        mountList = DirListMount.standardDirList(self.bootstrapDir)
        tstr = os.path.join(self.bootstrapDir, "var", "cache", "pacman", "pkg")
        mountList.append((tstr, "--bind %s %s" % (self.pkgCacheDir, tstr)))
        tstr = os.path.join(self.bootstrapDir, "mnt")
        mountList.append((tstr, "--bind %s %s" % (self.rootfsDir, tstr)))     # mount rootfs directory as /mnt
        with DirListMount(mountList):
            # prepare pacman
            Util.cmdCall("/usr/bin/chroot", self.bootstrapDir,  "/sbin/pacman-key", "--init")
            Util.cmdCall("/usr/bin/chroot", self.bootstrapDir,  "/sbin/pacman-key", "--populate", "archlinux")

            # install basic system files
            Util.cmdExec("/usr/bin/chroot", self.bootstrapDir,  "/sbin/pacstrap", "-c", "/mnt", "base")
            Util.cmdExec("/usr/bin/chroot", self.bootstrapDir,  "/sbin/pacstrap", "-c", "/mnt", "lvm2")

            # install mkinitcpio and modify it's configuration
            Util.cmdExec("/usr/bin/chroot", self.bootstrapDir,  "/sbin/pacstrap", "-c", "/mnt", "mkinitcpio")
            if initcpioHooksDir is not None:
                # copy /etc/mkinitcpio/hooks files
                for fullfn in glob.glob(os.path.join(initcpioHooksDir, "hooks", "*")):
                    dstFn = os.path.join(self.rootfsDir, "etc", "initcpio", "hooks", os.path.basename(fullfn))
                    shutil.copy(fullfn, dstFn)
                    os.chmod(dstFn, 0o644)

                # record after information
                afterDict = dict()
                for fullfn in glob.glob(os.path.join(initcpioHooksDir, "install", "*.after")):
                    fn = os.path.basename(fullfn)
                    name = fn.split(".")[0]
                    afterDict[name] = pathlib.Path(fullfn).read_text().rstrip("\n")

                # copy /etc/mkinitcpio/install files
                # add hook to /etc/mkinitcpio.conf
                confFile = os.path.join(self.rootfsDir, "etc", "mkinitcpio.conf")
                self._removeMkInitcpioHook(confFile, "fsck")
                self._addMkInitcpioHook(confFile, "lvm2", "block")
                for fullfn in glob.glob(os.path.join(initcpioHooksDir, "install", "*")):
                    if fullfn.endswith(".after"):
                        continue
                    name = os.path.basename(fullfn)
                    dstFn = os.path.join(self.rootfsDir, "etc", "initcpio", "install", name)
                    shutil.copy(fullfn, dstFn)
                    os.chmod(dstFn, 0o644)
                    self._addMkInitcpioHook(confFile, name, afterDict.get(name))

            # install linux kernel
            Util.cmdExec("/usr/bin/chroot", self.bootstrapDir,  "/sbin/pacstrap", "-c", "/mnt", "linux-lts")

            # install packages
            for pkg in pkgList:
                Util.cmdExec("/usr/bin/chroot", self.bootstrapDir,  "/sbin/pacstrap", "-c", "/mnt", pkg)

            # install packages from local repository
            for fullfn in localPkgFileList:
                fn = os.path.basename(fullfn)
                dstFn = os.path.join(self.bootstrapDir, "var", "cache", "pacman", "pkg", fn)
                shutil.copy(fullfn, dstFn)
                try:
                    fn2 = os.path.join("/var", "cache", "pacman", "pkg", fn)
                    Util.cmdExec("/usr/bin/chroot", self.bootstrapDir,  "/sbin/pacstrap", "-c", "-U", "/mnt", fn2)
                finally:
                    os.remove(dstFn)

        # add files
        for fullfn, mode, dstDir in fileList:
            assert dstDir.startswith("/")
            dstDir = self.rootfsDir + dstDir
            dstFn = os.path.join(dstDir, os.path.basename(fullfn))
            os.makedirs(dstDir, exist_ok=True)
            shutil.copy(fullfn, dstFn)
            os.chmod(dstFn, mode)

        # exec custom script
        for cmd in cmdList:
            Util.shellCall("/usr/bin/chroot %s %s" % (self.rootfsDir, cmd))

    def squashRootfs(self, rootfsDataFile, rootfsMd5File, kernelFile, initcpioFile):
        assert rootfsDataFile.startswith("/")
        assert rootfsMd5File.startswith("/")
        assert kernelFile.startswith("/")
        assert initcpioFile.startswith("/")

        Util.cmdCall("/bin/mv", os.path.join(self.rootfsDir, "boot", "vmlinuz-linux-lts"), kernelFile)
        Util.cmdCall("/bin/mv", os.path.join(self.rootfsDir, "boot", "initramfs-linux-lts-fallback.img"), initcpioFile)
        shutil.rmtree(os.path.join(self.rootfsDir, "boot"))

        Util.cmdExec("/usr/bin/mksquashfs", self.rootfsDir, rootfsDataFile, "-no-progress", "-noappend", "-quiet")
        with TempChdir(os.path.dirname(rootfsDataFile)):
            Util.shellExec("/usr/bin/sha512sum \"%s\" > \"%s\"" % (os.path.basename(rootfsDataFile), rootfsMd5File))

    def clean(self):
        Util.forceDelete(self.rootfsDir)
        Util.forceDelete(self.bootstrapDir)
        del self.rootfsDir
        del self.bootstrapDir
        del self.signFile
        del self.dataFile

    def _addMkInitcpioHook(self, confFile, name, after=None):
        buf = pathlib.Path(confFile).read_text()
        hookList = re.search("^HOOKS=\\((.*)\\)", buf, re.M).group(1).split(" ")
        assert name not in hookList
        if after is not None:
            try:
                i = hookList.index(after)
                hookList.insert(i + 1, name)
            except ValueError:
                hookList.append(name)
        else:
            hookList.append(name)
        with open(confFile, "w") as f:
            f.write(re.sub("^HOOKS=\\(.*\\)", "HOOKS=(%s)" % (" ".join(hookList)), buf, 0, re.M))

    def _removeMkInitcpioHook(self, confFile, name):
        buf = pathlib.Path(confFile).read_text()
        hookList = re.search("^HOOKS=\\((.*)\\)", buf, re.M).group(1).split(" ")
        if name in hookList:
            hookList.remove(name)
            with open(confFile, "w") as f:
                f.write(re.sub("^HOOKS=\\(.*\\)", "HOOKS=(%s)" % (" ".join(hookList)), buf, 0, re.M))


    def get_stats(self, name):
        if name in ["cache_hit_ratio_five_minute", "cache_hit_ratio_hour", "cache_hit_ratio_day", "cache_hit_ratio_total"]:
            name = name.replace("cache_hit_ratio_", "")
            ret = 0
            for cacheDev in self._cacheDevSet:
                fullfn = os.path.join("/sys", "fs", "bcache", BcacheUtil.getSetUuid(cacheDev), "stats_%s" % (name), "cache_hit_ratio")
                ret += int(pathlib.Path(fullfn).read_text().rstrip("\n"))
            return ret / len(self._cacheDevSet) / 100
        else:
            assert False




    @staticmethod
    def createEnvBlkFile(name):
        DEFAULT_ENVBLK_SIZE = 1024
        GRUB_ENVBLK_SIGNATURE = "# GRUB Environment Block\n"
        GRUB_ENVBLK_MESSAGE = "# WARNING: Do not edit this file by tools other than grub-editenv!!!\n"

        tmpName = name + ".new"

        with open(tmpName, "w") as f:
            f.write(GRUB_ENVBLK_SIGNATURE)
            f.write(GRUB_ENVBLK_MESSAGE)
            f.write("#" * (DEFAULT_ENVBLK_SIZE - len(GRUB_ENVBLK_SIGNATURE) - len(GRUB_ENVBLK_MESSAGE)))

        os.rename(tmpName, name)


class StructUtil:

    class Exception(Exception):
        pass

    @staticmethod
    def readStream(f, fmt):
        buf = bytes()
        while len(buf) < struct.calcsize(fmt):
            buf2 = f.read(struct.calcsize(fmt) - len(buf))
            if buf2 is None:
                raise StructUtil.Exception("not enough data")
            buf += buf2
        return struct.unpack(fmt, buf)




class Resolution:

    def __init__(self, x, y):
        self.x = x
        self.y = y

    def __hash__(self):
        return hash(self.x, self.y)

    def __eq__(self, other):
        assert isinstance(other, self.__class__)
        return self.x == other.x and self.y == other.y

    def __lt__(self, other):
        assert isinstance(other, self.__class__)
        return self.x < other.x and self.y < other.y

    def __gt__(self, other):
        assert isinstance(other, self.__class__)
        return self.x > other.x and self.y > other.y



    @staticmethod
    def portageIsSimplePkgAtom(pkgAtom):
        if ":" in pkgAtom:
            return False
        for op in [">", ">=", "<", "<=", "=", "~", "!"]:
            if pkgAtom.startswith(op):
                return False
        return True



    @staticmethod
    def portageGetSameSlotPkgAtom(pkgAtom):
        p = portage.db[portage.root]["porttree"].dbapi
        slot = p.aux_get(pkgAtom, ["SLOT"])[0]
        pkgName = portage.versions.pkgsplit(pkgAtom)[0]
        return p.match("%s:%s" % (pkgName, slot))



    @staticmethod
    def portageIsPkgMultiSlot(porttree, pkgName):
        cpvList = porttree.dbapi.match(pkgName)
        assert len(cpvList) > 0

        slot = None
        for cpv in cpvList:
            nslot = porttree.dbapi.aux_get(cpv, ["SLOT"])[0]
            if slot is not None and slot != nslot:
                return True
            slot = nslot

        return False

    @staticmethod
    def portageGetVcsTypeAndUrlFromReposConfFile(reposConfFile):
        with open(reposConfFile, "r") as f:
            buf = f.read()
            m = re.search("^sync-type *= *(.*)$", buf, re.M)
            if m is None:
                return None
            vcsType = m.group(1)
            url = re.search("^sync-uri *= *(.*)$", buf, re.M).group(1)
            return (vcsType, url)


            with ParallelRunSequencialPrint() as prspObj:
                if buildServer is not None:
                    startCoro = buildServer.asyncStartSshExec
                    waitCoro = buildServer.asyncWaitSshExec
                else:
                    startCoro = Util.asyncStartCmdExec
                    waitCoro = Util.asyncWaitCmdExec
                for repo in pkgwh.layman.getOverlayList():
                    if pkgwh.layman.getOverlayType(repo) == "static":
                        continue
                    prspObj.add_task(
                        startCoro, [self.opSync, self.param.runMode, "sync-overlay", repo],
                        waitCoro,
                        pre_func=lambda x=repo: self.infoPrinter.printInfo(">> Synchronizing overlay \"%s\"..." % (x)),
                        post_func=lambda: print(""),
                    )
            # FIXME: there should be no sync down after realtime network filesystem support is done
            if buildServer is not None:
                buildServer.syncDownDirectory(FmConst.portageDataDir)




    @staticmethod
    async def asyncStartCmdExec(cmd, *kargs, loop=None):
        assert loop is not None
        proc = await asyncio.create_subprocess_exec(cmd, *kargs, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, loop=loop)
        return (proc, proc.stdout)

    @staticmethod
    async def asyncWaitCmdExec(proc):
        retcode = await proc.wait()
        if retcode != 0:
            raise subprocess.CalledProcessError(retcode, [])      # use subprocess.CalledProcessError since there's no equivalent in asyncio




# H3C CAS 2.0 still use legacy virtio device, so it is needed
# buf += "VIRTIO_PCI_LEGACY=y\n"
# buf += "\n"



class NewMountNamespace:

    _CLONE_NEWNS = 0x00020000               # <linux/sched.h>
    _MS_REC = 16384                         # <sys/mount.h>
    _MS_PRIVATE = 1 << 18                   # <sys/mount.h>
    _libc = None
    _mount = None
    _setns = None
    _unshare = None

    def __init__(self):
        if self._libc is None:
            self._libc = ctypes.CDLL('libc.so.6', use_errno=True)
            self._mount = self._libc.mount
            self._mount.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p]
            self._mount.restype = ctypes.c_int
            self._setns = self._libc.setns
            self._unshare = self._libc.unshare

        self.parentfd = None

    def open(self):
        assert self.parentfd is None

        self.parentfd = open("/proc/%d/ns/mnt" % (os.getpid()), 'r')

        # copied from unshare.c of util-linux
        try:
            if self._unshare(self._CLONE_NEWNS) != 0:
                e = ctypes.get_errno()
                raise OSError(e, errno.errorcode[e])

            srcdir = ctypes.c_char_p("none".encode("utf_8"))
            target = ctypes.c_char_p("/".encode("utf_8"))
            if self._mount(srcdir, target, None, (self._MS_REC | self._MS_PRIVATE), None) != 0:
                e = ctypes.get_errno()
                raise OSError(e, errno.errorcode[e])
        except BaseException:
            self.parentfd.close()
            self.parentfd = None
            raise

    def close(self):
        assert self.parentfd is not None

        self._setns(self.parentfd.fileno(), 0)
        self.parentfd.close()
        self.parentfd = None

    def __enter__(self):
        return self

    def __exit__(self, *_):
        self.close()

class FakeChroot:

    """
    This class use a mounted ext4-fs image, mount/pid/user container to create a chroot environment
    """

    @staticmethod
    def create_image(imageFilePath, imageSize):
        assert imageSize % (1024 * 1024) == 0
        Util.shellCall("dd if=/dev/zero of=%s bs=%d count=%d conv=sparse" % (imageFilePath, 1024 * 1024, imageSize // (1024 * 1024)))
        Util.shellCall("/sbin/mkfs.ext4 -O ^has_journal %s" % (imageFilePath))

    def __init__(self, imageFilePath, iAmRoot, mountDir):
        self._imageFile = imageFilePath
        self._mntdir = mountDir
        self._iAmRoot = iAmRoot

        try:
            if self._iAmRoot:
                Util.shellCall("mount -t ext4 %s %s" % (self._imageFile, self._mntdir))
                self._fuseProc = None
            else:
                self._fuseProc = subprocess.Popen(["fuse2fs", "-f", self._imageFile, self._mntdir])
        except BaseException:
            self.dispose()
            raise

    def dispose(self):
        if self._iAmRoot:
            if Util.ismount(self._mntdir):
                Util.shellCall("umount %s" % (self._mntdir))
        else:
            if self._fuseProc is not None:
                self._fuseProc.terminate()
                self._fuseProc.wait()
                self._fuseProc = None

    def run_cmd(self):
        pass

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.dispose()



    def _nonEmptySplit(theStr, delimiter):
        ret = []
        for i in theStr.split(delimiter):
            if i != "":
                ret.append(i)
        return ret


class UsrMerge:

    """
    UsrMerge is started by Fedora (https://fedoraproject.org/wiki/Features/UsrMove), which merges the following directories:
    /bin   -> /usr/bin
    /sbin  -> /usr/sbin
    /lib   -> /usr/lib
    /lib64 -> /usr/lib64

    This scheme is what Gentoo is following, but not officially supported yet.

    And Arch Linux has an addtional optimization based on it, which I think is a good one:
    /usr/sbin -> /usr/bin

    This patchset is for whom are willing to make that happen on his/her own system.

    Reference:
    [1] https://leo3418.github.io/2021/01/16/gentoo-merge-usr.html
    [2] https://fedoraproject.org/wiki/Features/UsrMove
    [3] https://bugs.gentoo.org/690294
    """

    def __init__(self, merge_sbin=False):
        self._mergeSbin = merge_sbin

    def update_target_settings(self, host_info, target_settings):
        if "split-usr" not in target_settings.use_mask:
            target_settings.use_mask.append("split-usr")

        if self._mergeSbin:
            target_settings.repo_postsync_patch_directories.append(os.path.join(host_info.repo_postsync_patch_source_dir, "merge-sbin"))

    def get_custom_action(self):
        buf = self._scriptFileContent
        if self._mergeSbin:
            buf += self._scriptMergeSbin
        return SimpleCustomAction(ScriptFromBuffer(buf),
                                  after=["init_confdir", "create_overlays"],
                                  before=["update_world"])

        # UNINSTALL_IGNORE="/bin /lib /lib64 /sbin /usr/sbin"

    _scriptFileContent = """
#!/usr/bin/python

import os
import glob
import shutil
import subprocess

# fix /bin/awk
os.unlink("/bin/awk")

# copy root directories to /usr counterparts and create
# the /usr merge compatibility symlinks
for dir in ["/bin", "/sbin"] + glob.glob("/lib*"):
    subprocess.run("cp -a --remove-destination %s/* /usr/%s" % (dir, dir[1:]), shell=True)
    shutil.rmtree(dir)
    os.symlink("usr/%s" % (dir[1:]), dir)
"""

    _scriptMergeSbin = """
# merge /usr/sbin into /usr/bin
if True:
    subprocess.run("cp -a --remove-destination /usr/sbin/* /usr/bin", shell=True)
    shutil.rmtree("/usr/sbin")
    os.symlink("bin", "/usr/sbin")
"""



    def write_use_mask(self):
        # modify and write out use.mask (in chroot)
        if len(self._ts.use_mask) > 0:
            fpath = os.path.join(self._dir, "profile", "use.mask")
            os.makedirs(os.path.dirname(fpath), exist_ok=True)
            with open(fpath, "w") as myf:
                for use_flag in self._ts.use_mask:
                    myf.write("%s\n" % (use_flag))


    # check /etc/portage/profile/use.mask file
    if True:
        fpath = os.path.join(FmConst.portageCfgDir, "profile", "use.mask")
        if len(ts.use_mask) > 0:
            os.makedirs(os.path.dirname(fpath), exist_ok=True)
            self.__checkAndFixEtcFile(fpath, "".join([x + "\n" for x in ts.use_mask]))
        else:
            if os.path.exists(fpath):
                if not self.bAutoFix:
                    raise FmCheckException("\"%s\" should not exist" % (fpath))
                else:
                    os.unlink(fpath)
            if os.listdir(os.path.dirname(fpath)) == 0:
                if not self.bAutoFix:
                    raise FmCheckException("\"%s\" should not exist" % (os.path.dirname(fpath)))
                else:
                    os.rmdir(os.path.dirname(fpath))



    # this function is not used
    def smartEmergePkg(self, pretendCmd, realCmd, cfgProtect=True, quietFail=False, pkgName=None):
        # features:
        # 1. auto resolve circular dependencies
        # 2. (one-by-one + try-version) mode

        if not cfgProtect:
            cpStr = "CONFIG_PROTECT=\"-* /.fpemud-refsystem\""          # FIXME
        else:
            cpStr = ""
        pretendCmd2 = "emerge -p %s" % (pretendCmd)
        realCmd2 = "%s /usr/bin/emerge %s" % (cpStr, realCmd)

        rc, out = Util.shellCallWithRetCode(pretendCmd2)
        if rc != 0:
            smartEmergeUseFile = os.path.join(FmConst.portageCfgUseDir, "smart-emerge")

            if quietFail:
                return

            if "possible to break this cycle" in out:
                # change use flag temporaryly according to the suggestion
                if not os.path.exists(smartEmergeUseFile):
                    info = None     # (pkg-atom, change-use)
                    for m in re.finditer(r"- (\S+/\S+) \(Change USE: (\S+)\)(\n \(.*?\))?", out):
                        if m.group(3) is None:
                            info = (m.group(1), m.group(2))
                            break
                    if info is not None:
                        try:
                            with open(smartEmergeUseFile, "w") as f:
                                f.write("=%s %s" % (info[0], info[1]))
                            self.smartEmergePkg(pretendCmd, realCmd, cfgProtect=cfgProtect, pkgName=pkgName)
                        finally:
                            Util.forceDelete(smartEmergeUseFile)
                        self.smartEmergePkg(pretendCmd, realCmd, cfgProtect=cfgProtect, pkgName=pkgName)
            elif "Multiple package instances within a single package slot" in out:
                # use (strict one-by-one) mode when slot conflict occured
                for line in out.split("\n"):
                    m = re.search("^\\[ebuild(.*?)\\] (\\S+)", line)
                    if m is not None:
                        pkgAtom = m.group(2)
                        if pkgName is not None and pkgName == Util.portageGetPkgNameFromPkgAtom(pkgAtom):
                            # this is the target package
                            Util.shellExec(realCmd2)
                        else:
                            rc2, out2 = Util.shellCallWithRetCode("emerge -p -uN -1 =%s" % (pkgAtom))
                            if rc2 != 0:
                                if "Multiple package instances within a single package slot" in out2:
                                    # ignore slot conflict package currently
                                    continue
                            Util.shellExec("emerge -uN -1 =%s" % (pkgAtom))
            else:
                # we need user intervention
                Util.shellExec(realCmd2)
        else:
            try:
                Util.shellExec(realCmd2)
            except subprocess.CalledProcessError as e:
                # terminated by signal, no further processing needed
                if e.returncode > 128:
                    raise

                # use (one-by-one + try-version) mode when failure
                out = Util.shellCall(pretendCmd2)
                complete = False
                while not complete:
                    complete = True
                    for line in out.split("\n"):
                        m = re.search("^\\[ebuild.*?\\] (\\S+)", line)
                        if m is None:
                            continue
                        pkgAtom = m.group(1)

                        if pkgName is not None and pkgName == Util.portageGetPkgNameFromPkgAtom(pkgAtom):
                            # this is the target package
                            Util.shellExec(realCmd2)
                        else:
                            rc2 = None
                            out2 = None
                            try:
                                tempMaskFile = os.path.join(FmConst.portageCfgMaskDir, "temp-")
                                with open(tempMaskFile, "w") as f:
                                    f.write("=%s" % (pkgAtom))
                                rc2, out2 = Util.cmdCallWithRetCode(pretendCmd2)
                            finally:
                                Util.forceDelete(tempMaskFile)

                            if rc2 != 0:
                                # we need this specific package version
                                Util.shellExec("emerge -uN -1 =%s" % (pkgAtom))
                            else:
                                # alternative package version exists
                                try:
                                    Util.shellExec("emerge -uN -1 =%s" % (pkgAtom))
                                except subprocess.CalledProcessError as e:
                                    # terminated by signal, no further processing needed
                                    if e.returncode > 128:
                                        raise

                                    # mask the current version, fall back to alternative
                                    bugfixMaskFile = os.path.join(FmConst.portageCfgMaskDir, "bugfix")
                                    buf = ""
                                    if os.path.exists(bugfixMaskFile):
                                        with open(bugfixMaskFile, "r") as f:
                                            buf = f.read()
                                    if buf == "" or buf[-1] == "\n":
                                        buf += "=%s\n" % (pkgAtom)
                                    else:
                                        buf += "\n=%s\n" % (pkgAtom)
                                    with open(bugfixMaskFile, "w") as f:
                                        f.write(buf)
                                    out = out2
                                    complete = False
                                    break





    def _exportToIsoFile(self):
        # all targets have been built
        assert all([x["completed"] for x in self._archInfoDict.values()])

        iso = pycdlib.PyCdlib()
        iso.new(udf="2.60")
        try:
            # add README.TXT
            buf = ""
            buf += 'This disc contains a "UDF" file system and requires an operating system\n'
            buf += 'that supports the ISO-13346 "UDF" file system specification.\n'
            buf = buf.encode("iso8859-1")
            iso.add_fp(io.ByteIO(buf), len(buf), iso_path="/README.TXT")

            # add files
            f = iso.get_udf_facade()
            f.add_directory("/os")
            for arch, v in self._archInfoDict.items():
                tmpRootfsDir = self.__tmpRootfsDir(arch)
                dstOsDir = os.path.join("/os", self._archInfoDict[arch]["dirname"])

                f.add_directory(dstOsDir)
                f.add_file(os.path.join(tmpRootfsDir, "boot", "vmlinuz"), os.path.join(dstOsDir, "vmlinuz"))
                f.add_file(os.path.join(tmpRootfsDir, "boot", "initramfs.img"), os.path.join(dstOsDir, "initramfs.img"))
                f.add_file(os.path.join(tmpRootfsDir, "usr", "share", "memtest86+", "memtest.efi64"), os.path.join(dstOsDir, "memtest.efi64"))
                f.add_file(os.path.join(tmpRootfsDir, "usr", "share", "memtest86+", "memtest64.bios"), os.path.join(dstOsDir, "memtest64.bios"))
                Util.makeSquashedRootfsFiles(tmpRootfsDir, dstOsDir)

            # add boot files
            pass

            # write
            iso.write(self._devPath)
        finally:
            iso.close()




README_FILENAME = "README.txt"

README_CONTENT = """
This lists the possible command line options that can be used to tweak the boot
process of this %DISK_NAME%.  This list contains a few options that are built-in
to the kernel, but that have been proven very useful. Also, all options that
start with "do" have a "no" inverse, that does the opposite.  For example, "doscsi"
enables SCSI support in the initial ramdisk boot, while "noscsi" disables it.


Hardware options:
acpi=on         This loads support for ACPI and also causes the acpid daemon to
                be started by the CD on boot.  This is only needed if your
                system requires ACPI to function properly.  This is not
                required for Hyperthreading support.
acpi=off        Completely disables ACPI.  This is useful on some older systems
                and is also a requirement for using APM.  This will disable any
                Hyperthreading support of your processor.
console=X       This sets up serial console access for the CD.  The first
                option is the device, usually ttyS0 on x86, followed by any
                connection options, which are comma separated.  The default
                options are 9600,8,n,1.
dmraid=X        This allows for passing options to the device-mapper RAID
                subsystem.  Options should be encapsulated in quotes.
doapm           This loads APM driver support.  This requires you to also use
                acpi=off.
dopcmcia        This loads support for PCMCIA and Cardbus hardware and also
                causes the pcmcia cardmgr to be started by the CD on boot.
                This is only required when booting from PCMCIA/Cardbus devices.
doscsi          This loads support for most SCSI controllers.  This is also a
                requirement for booting most USB devices, as they use the SCSI
                subsystem of the kernel.
hda=stroke      This allows you to partition the whole hard disk even when your
                BIOS is unable to handle large disks.  This option is only used
                on machines with an older BIOS.  Replace hda with the device
                that is requiring this option.
ide=nodma       This forces the disabling of DMA in the kernel and is required
                by some IDE chipsets and also by some CDROM drives.  If your
                system is having trouble reading from your IDE CDROM, try this
                option.  This also disables the default hdparm settings from
                being executed.
noapic          This disables the Advanced Programmable Interrupt Controller
                that is present on newer motherboards.  It has been known to
                cause some problems on older hardware.
nodetect        This disables all of the autodetection done by the CD,
                including device autodetection and DHCP probing.  This is
                useful for doing debugging of a failing CD or driver.
nodhcp          This disables DHCP probing on detected network cards.  This is
                useful on networks with only static addresses.
nodmraid        Disables support for device-mapper RAID, such as that used for
                on-board IDE/SATA RAID controllers.
nofirewire      This disables the loading of Firewire modules.  This should
                only be necessary if your Firewire hardware is causing
                a problem with booting the CD.
nogpm           This diables gpm console mouse support.
nohotplug       This disables the loading of the hotplug and coldplug init
                scripts at boot.  This is useful for doing debugging of a
                failing CD or driver.
nokeymap        This disables the keymap selection used to select non-US
                keyboard layouts.
nolapic         This disables the local APIC on Uniprocessor kernels.
nosata          This disables the loading of Serial ATA modules.  This is used
                if your system is having problems with the SATA subsystem.
nosmp           This disables SMP, or Symmetric Multiprocessing, on SMP-enabled
                kernels.  This is useful for debugging SMP-related issues with
                certain drivers and motherboards.
nosound         This disables sound support and volume setting.  This is useful
                for systems where sound support causes problems.
nousb           This disables the autoloading of USB modules.  This is useful
                for debugging USB issues.
slowusb         This adds some extra pauses into the boot process for slow
                USB CDROMs, like in the IBM BladeCenter.

Volume/Device Management:
doevms          This enables support for IBM's pluggable EVMS, or Enterprise
                Volume Management System.  This is not safe to use with lvm2.
dolvm           This enables support for Linux's Logical Volume Management.
                This is not safe to use with evms2.

Screen reader access:
speakup.synth=synth  starts speakup using a given synthesizer.
                     supported synths are acntpc, acntsa, apollo, audptr, bns,
                     decext, dectlk, dtlk, keypc, ltlk, spkout and txprt.
                     Also, soft is supported for software speech and dummy is
                     supported for testing.
speakup.quiet=1      sets the synthesizer not to speak until a key is pressed.
speakup_SYNTH.port=n sets the port for internal synthesizers.
speakup_SYNTH.ser=n  sets the serial port for external synthesizers.

Other options:
debug           Enables debugging code.  This might get messy, as it displays
                a lot of data to the screen.
docache         This caches the entire runtime portion of the CD into RAM,
                which allows you to umount /mnt/cdrom and mount another CDROM.
                This option requires that you have at least twice as much
                available RAM as the size of the CD.
doload=X        This causes the initial ramdisk to load any module listed, as
                well as dependencies.  Replace X with the module name.
                Multiple modules can be specified by a comma-separated list.
dosshd          Starts sshd on boot, which is useful for unattended installs.
passwd=foo      Sets whatever follows the equals as the root password, which
                is required for dosshd since we scramble the root password.
noload=X        This causes the initial ramdisk to skip the loading of a
                specific module that may be causing a problem.  Syntax matches
                that of doload.
nonfs           Disables the starting of portmap/nfsmount on boot.
nox             This causes an X-enabled LiveCD to not automatically start X,
                but rather, to drop to the command line instead.
scandelay       This causes the CD to pause for 10 seconds during certain
                portions the boot process to allow for devices that are slow to
                initialize to be ready for use.
scandelay=X     This allows you to specify a given delay, in seconds, to be
                added to certain portions of the boot process to allow for
                devices that are slow to initialize to be ready for use.
                Replace X with the number of seconds to pause.
"""


# f.write("    linux %s/vmlinuz root=/dev/ram0 init=/linuxrc dev_uuid=%s looptype=squashfs loop=%s/rootfs.sqfs cdroot dokeymap docache gk.hw.use-modules_load=1\n" % (osArchDir, uuid, osArchDir))            # without gk.hw.use-modules_load=1, squashfs module won't load, sucks


        # ftKernel = gstage4.target_features.UseGenkernel(kernel_sources_pkg_atom=self._targetSystemInfo[arch]["releng-spec"].kernel_sources_pkg_atom,
        #                                                 kernel_config=self._targetSystemInfo[arch]["releng-spec"].kernel_config,
        #                                                 check_kernel_config_version=True)


# distkmerge_get_image_path() {
#     case ${clst_basearch} in
#         amd64|x86)
#             echo arch/x86/boot/bzImage
#             ;;
#         arm64)
#             echo arch/arm64/boot/Image.gz
#             ;;
#         arm)
#             echo arch/arm/boot/zImage
#             ;;
#         hppa|ppc|ppc64)
#             echo ./vmlinux
#             ;;
#         riscv)
#             echo arch/riscv/boot/Image.gz
#             ;;
#         *)
#             die "unsupported ARCH=${clst_basearch}"
#             ;;
#     esac
# }


#   dracut "${DRACUT_ARGS[@]}" || exit 1


#             # USE="-initramfs" run_merge --update "${ksource}"

#   # Kernel already built, let's run dracut to make initramfs
#   distkernel_source_path=$(equery -Cq f ${ksource} | grep "/usr/src/linux-" -m1)
#   distkernel_image_path=$(distkmerge_get_image_path)
#   distkernel_version=${distkernel_source_path##"/usr/src/linux-"}

#   DRACUT_ARGS=(
#     --force
#     --kernel-image="${distkernel_source_path}/${distkernel_image_path}"
#     --kver="${distkernel_version}"
#   )



    @staticmethod
    def systemdIsServiceEnabled(serviceName):
        # FIXME: should not rpc to systemd
        obj = Gio.DBusProxy.new_for_bus_sync(Gio.BusType.SYSTEM,
                                             Gio.DBusProxyFlags.NONE,
                                             None,
                                             "org.freedesktop.systemd1",            # bus_name
                                             "/org/freedesktop/systemd1",           # object_path
                                             "org.freedesktop.systemd1.Manager")    # interface_name
        return (obj.GetUnitFileState("(s)", serviceName) == "enabled")


    @staticmethod
    def systemdIsUnitRunningByDbus(unitName):
        obj = Gio.DBusProxy.new_for_bus_sync(Gio.BusType.SYSTEM,
                                             Gio.DBusProxyFlags.NONE,
                                             None,
                                             "org.freedesktop.systemd1",            # bus_name
                                             "/org/freedesktop/systemd1",           # object_path
                                             "org.freedesktop.systemd1.Manager")    # interface_name
        unit = obj.GetUnit("(s)", unitName)

        obj = Gio.DBusProxy.new_for_bus_sync(Gio.BusType.SYSTEM,
                                             Gio.DBusProxyFlags.NONE,
                                             None,
                                             "org.freedesktop.systemd1",            # bus_name
                                             unit,                                  # object_path
                                             "org.freedesktop.systemd1.Unit")       # interface_name
        return (obj.ActiveState == "active")

    @staticmethod
    def systemdGetAllServicesEnabled():
        # FIXME: should not rpc to systemd
        obj = Gio.DBusProxy.new_for_bus_sync(Gio.BusType.SYSTEM,
                                             Gio.DBusProxyFlags.NONE,
                                             None,
                                             "org.freedesktop.systemd1",            # bus_name
                                             "/org/freedesktop/systemd1",           # object_path
                                             "org.freedesktop.systemd1.Manager")    # interface_name
        ret = []
        for unitFile, unitState in obj.ListUnitFiles():
            if unitState == "enabled":
                ret.append(os.path.basename(unitFile))
        return ret

    @staticmethod
    def updateDir(oriDir, newDir, keepList=[]):
        """Update oriDir by newDir, meta-data is also merged
           Elements in keepList are glob patterns, and they should not appear in newDir"""

        assert os.path.isabs(oriDir) and os.path.isabs(newDir)
        keepList = Util.getAbsPathList(oriDir, keepList)

        # call assistant
        dirCmpObj = filecmp.dircmp(oriDir, newDir)
        Util._updateDirImpl(oriDir, newDir, keepList, dirCmpObj)

    @staticmethod
    def _updateDirImpl(oriDir, newDir, keepAbsList, dirCmpObj):
        # fixme: should consider acl, sparse file, the above is same

        assert len(dirCmpObj.common_funny) == 0
        assert len(dirCmpObj.funny_files) == 0

        # delete files
        for fb in dirCmpObj.left_only:
            of = os.path.join(oriDir, fb)
            if any(x for x in keepAbsList if fnmatch.fnmatch(of, x)):
                continue
            if os.path.isdir(of):
                shutil.rmtree(of)
            else:
                os.remove(of)

        # add new directories and files
        for fb in dirCmpObj.right_only:
            of = os.path.join(oriDir, fb)
            nf = os.path.join(newDir, fb)
            assert not any(x for x in keepAbsList if fnmatch.fnmatch(of, x))
            assert Util.isTrivalFileOrDir(of)
            if os.path.isdir(of):
                shutil.copytree(nf, of)
            else:
                shutil.copy2(nf, of)
            os.chown(of, os.stat(nf).st_uid, os.stat(nf).st_gid)

        # copy stat info for common directories
        for fb in dirCmpObj.common_dirs:
            of = os.path.join(oriDir, fb)
            nf = os.path.join(newDir, fb)
            assert not any(x for x in keepAbsList if fnmatch.fnmatch(of, x))
            assert Util.isTrivalFileOrDir(of)
            shutil.copystat(nf, of)
            os.chown(of, os.stat(nf).st_uid, os.stat(nf).st_gid)

        # copy common files
        for fb in dirCmpObj.common_files:
            of = os.path.join(oriDir, fb)
            nf = os.path.join(newDir, fb)
            assert not any(x for x in keepAbsList if fnmatch.fnmatch(of, x))
            assert Util.isTrivalFileOrDir(of)
            shutil.copy2(nf, of)
            os.chown(of, os.stat(nf).st_uid, os.stat(nf).st_gid)

        # recursive operation
        for fb2, dirCmpObj2 in list(dirCmpObj.subdirs().items()):
            of2 = os.path.join(oriDir, fb2)
            nf2 = os.path.join(newDir, fb2)
            Util._updateDirImpl(of2, nf2, keepAbsList, dirCmpObj2)


        # mr = mrget.target_urls("mirror://gentoo-portage", protocols=["rsync"])[0]
        # subprocess.check_call(["rsync", "-rlptD", "-z", "-hhh", "--no-motd", "--delete", "--info=progress2", mr, repoDir])   # we use "-rlptD" insead of "-a" so that the remote user/group is ignored


class FmRescueManager:

    def __init__(self, param):
        self.param = param
        self.infoPrinter = self.param.infoPrinter

    def installRescueOs(self):
        self.infoPrinter.printInfo(">> Preparing...")
        machine = None
        layout = None
        bbkiObj = None
        targetSysBuilder = None
        if True:
            self.param.basicCheck()
            DynCfgModifier.update()
            machine = strict_hwcfg.probe()
            layout = strict_hdds.get_storage_layout()
            bbkiObj = BbkiWrapper(machine, layout)
            targetSysBuilder = AssistOsBuilder(self.infoPrinter, machine, False, "Rescue OS")
            print("")

        self.infoPrinter.printInfo(">> Downloading files...")
        targetSysBuilder.downloadFiles(False)
        print("")

        self.infoPrinter.printInfo(">> Building Rescue OS...")
        tmpRootfsDir = os.path.join(self.param.tmpDirOnHdd, "rescue-os-rootfs")
        targetSysBuilder.build(Util.getCpuArch(), tmpRootfsDir)
        print("")

        with BootDirWriter(layout):
            self.infoPrinter.printInfo(">> Installing Rescue OS into /boot...")
            if True:
                # create os directory
                Util.forceDelete(bbkiObj.rescue_os_spec.root_dir)
                os.makedirs(bbkiObj.rescue_os_spec.root_dir)

                # place kernel and initramfs
                shutil.copy(os.path.join(tmpRootfsDir, "boot", "vmlinuz"), bbkiObj.rescue_os_spec.kernel_filepath)
                shutil.copy(os.path.join(tmpRootfsDir, "boot", "initramfs.img"), bbkiObj.rescue_os_spec.initrd_filepath)

                # create livecd marker file, genkernel's initrd requires this file
                markerFile = os.path.join(bbkiObj.rescue_os_spec.root_dir, "livecd")
                with open(markerFile, "w") as f:
                    f.write("")

                # generate rootfs squash image
                sqfsFile, _ = Util.makeSquashedRootfsFiles(tmpRootfsDir, bbkiObj.rescue_os_spec.root_dir, exclude=["boot/vmlinuz", "boot/initramfs.img"])

                # generate kernel cmdline file
                if layout.boot_mode == strict_hdds.StorageLayout.BOOT_MODE_EFI:
                    assert sqfsFile.startswith("/boot")
                    assert markerFile.startswith("/boot")
                    kcmdSqfsFile = sqfsFile[len("/boot"):]
                    kcmdMarkerFile = markerFile[len("/boot"):]
                    # devPath = layout.dev_boot
                elif layout.boot_mode == strict_hdds.StorageLayout.BOOT_MODE_BIOS:
                    kcmdSqfsFile = sqfsFile
                    kcmdMarkerFile = markerFile
                    # devPath = layout.dev_rootfs
                else:
                    assert False
                with open(bbkiObj.rescue_os_spec.kernel_cmdline_filepath, "w") as f:
                    f.write(" ".join([
                        "root=/dev/ram0",
                        "init=/linuxrc",
                        "looptype=squashfs",
                        "loop=%s" % (kcmdSqfsFile),
                        "cdroot",
                        "cdroot_marker=%s" % (kcmdMarkerFile),
                        "dokeymap",
                        "gk.hw.use-modules_load=1",            # without gk.hw.use-modules_load=1, squashfs module won't load, sucks
                    ]))
                    # "docache",
                    # "cdroot=UUID=%s" % (Util.getBlkDevUuid(devPath)),
            print("")

            self.infoPrinter.printInfo(">> Updating boot-loader...")
            bbkiObj.updateBootloaderAfterRescueOsChange()
            print("")

    def uninstallRescueOs(self):
        self.infoPrinter.printInfo(">> Preparing...")
        machine = None
        layout = None
        bbkiObj = None
        if True:
            self.param.basicCheck()
            machine = strict_hwcfg.probe()
            layout = strict_hdds.get_storage_layout()
            bbkiObj = BbkiWrapper(machine, layout)
            print("")

        if not bbkiObj.isRescueOsInstalled():
            print("Rescue OS is not installed.", file=sys.stderr)
            return 1

        with BootDirWriter(layout):
            self.infoPrinter.printInfo(">> Uninstalling Rescue OS...")
            Util.forceDelete(bbkiObj.rescue_os_spec.root_dir)
            print("")

            self.infoPrinter.printInfo(">> Updating boot-loader...")
            bbkiObj.updateBootloaderAfterRescueOsChange()
            print("")


class CloudCacheGentooReleng:

    class Stage1Spec:
        def __init__(self):
            assert False

    class Stage2Spec:
        def __init__(self):
            assert False

    class Stage3Spec:
        def __init__(self):
            assert False

    class LivecdStage1Spec:
        def __init__(self):
            assert False

    class LivecdStage2Spec:

        def __init__(self, baseDir, arch, subarch, fullfn, buf):
            self.arch = arch
            self.subarch = subarch
            self.profile = None
            self.dist_kernel = None
            self.dracut_args = None
            self.kernel_config = None

            for line in buf.split("\n"):
                m = re.fullmatch(r"profile:\s+(.*)", line)
                if m is not None:
                    self.profile = m.group(1)
                    continue
                m = re.fullmatch(r"boot/kernel/gentoo/distkernel:\s+(.*)", line)
                if m is not None:
                    self.dist_kernel = m.group(1)
                    continue
                m = re.fullmatch(r"boot/kernel/gentoo/dracut_args:\s+(.*)", line)
                if m is not None:
                    self.dracut_args = m.group(1)
                    continue
                m = re.fullmatch(r"boot/kernel/gentoo/config:\s+(.*)", line)
                if m is not None:
                    fn = m.group(1).replace("@REPO_DIR@/", "")
                    self.kernel_config = pathlib.Path(os.path.join(baseDir, fn)).read_text()
                    continue

            if self.profile is None:
                raise Exception("no key \"profile\" in \"%s\"" % (fullfn))
            if self.dist_kernel != "yes":
                raise Exception("no key or invalid value for key \"boot/kernel/gentoo/distkernel\" in \"%s\"" % (fullfn))
            if self.dracut_args is None:
                raise Exception("no key \"boot/kernel/gentoo/dracut_args\" in \"%s\"" % (fullfn))
            if self.kernel_config is None:
                raise Exception("no key \"boot/kernel/gentoo/config\" in \"%s\"" % (fullfn))

    def __init__(self, cacheDir):
        self._baseUrl = "https://anongit.gentoo.org/git/proj/releng.git"
        self._dir = cacheDir

    def sync(self):
        robust_layer.simple_git.pull(self._dir, reclone_on_failure=True, url=self._baseUrl)

    def get_arch_list(self):
        assert self._isSynced()
        list1 = os.listdir(os.path.join(self._dir, "releases", "specs"))
        list2 = os.listdir(os.path.join(self._dir, "releases", "specs-qemu"))
        return sorted(list(set(list1 + list2)))

    def get_subarch_list(self, arch):
        assert self._isSynced()
        assert False

    def get_spec(self, arch, subarch, name):
        assert self._isSynced()
        assert arch in self.get_arch_list()

        if arch == "amd64":
            assert subarch == "amd64"
            specFullfn = os.path.join(self._dir, "releases", "specs", arch, "%s.spec" % (name))
            if not os.path.exists(specFullfn):
                raise FileNotFoundError("no spec file found")
        else:
            assert False

        buf = pathlib.Path(specFullfn).read_text()
        ret = self._parse(buf)

        if ret["subarch"] != subarch:
            raise FileNotFoundError("no spec file found")

        if ret["target"] == "stage1":
            return self.Stage1Spec(self._dir, arch, subarch, specFullfn, buf)
        elif ret["target"] == "stage2":
            return self.Stage2Spec(self._dir, arch, subarch, specFullfn, buf)
        elif ret["target"] == "stage3":
            return self.Stage3Spec(self._dir, arch, subarch, specFullfn, buf)
        elif ret["target"] == "livecd-stage1":
            return self.LivecdStage1Spec(self._dir, arch, subarch, specFullfn, buf)
        elif ret["target"] == "livecd-stage2":
            return self.LivecdStage2Spec(self._dir, arch, subarch, specFullfn, buf)
        else:
            raise Exception("unknown target \"%s\" in \"%s\"" % (ret["target"], specFullfn))

    def _isSynced(self):
        return os.path.exists(os.path.join(self._dir, ".git", "config"))

    def _parse(self, buf):
        ret = {
            "subarch": None,
            "target": None,
        }
        for line in buf.split("\n"):
            for key in ret.keys():
                m = re.fullmatch(r"%s:\s+(.*)" % (key), line)
                if m is not None:
                    ret[key] = m.group(1)
                    break
        return ret




class SwapLvmLv:

    @staticmethod
    def proxy(func):
        if isinstance(func, property):
            def f_get(self):
                return getattr(self._swap, func.fget.__name__)
            f_get.__name__ = func.fget.__name__
            return property(f_get)
        else:
            def f(self, *args):
                return getattr(self._swap, func.__name__)(*args)
            return f

    def __init__(self, bSwapLv):
        self._bSwapLv = bSwapLv

    @property
    def dev_swap(self):
        return LvmUtil.swapLvDevPath if self._bSwapLv else None

    def create_swap_lv(self):
        assert not self._bSwapLv
        Util.cmdCall("lvm", "lvcreate", "-L", "%dGiB" % (Util.getSwapSizeInGb()), "-n", LvmUtil.swapLvName, LvmUtil.vgName)
        self._bSwapLv = True

    def remove_swap_lv(self):
        assert self._bSwapLv
        Util.cmdCall("lvm", "lvremove", LvmUtil.swapLvDevPath)
        self._bSwapLv = False

    def get_swap_size(self):
        assert self._bSwapLv
        return Util.getBlkDevSize(LvmUtil.swapLvDevPath)

    def check(self, auto_fix, error_callback):
        if not self._bSwapLv:
            error_callback(errors.CheckCode.SWAP_NOT_ENABLED)
        else:
            if Util.getBlkDevSize(LvmUtil.swapLvDevPath) < Util.getSwapSize():
                if auto_fix:
                    if not Util.isSwapFileOrPartitionBusy(LvmUtil.swapLvDevPath):
                        self.remove_swap_lv()
                        self.create_swap_lv()
                        return
                error_callback(errors.CheckCode.SWAP_SIZE_TOO_SMALL, "LV")


    @staticmethod
    def lvmEnsureVgLvAndGetPvList(storageLayoutName):
        # check vg
        if not Util.cmdCallTestSuccess("lvm", "vgdisplay", LvmUtil.vgName):
            raise errors.StorageLayoutParseError(storageLayoutName, errors.LVM_VG_NOT_FOUND(LvmUtil.vgName))

        # get pv list
        pvList = []
        out = Util.cmdCall("lvm", "pvdisplay", "-c")
        for m in re.finditer("(/dev/\\S+):%s:.*" % (LvmUtil.vgName), out, re.M):
            pvList.append(m.group(1))

        # find root lv
        out = Util.cmdCall("lvm", "lvdisplay", "-c")
        if re.search("/dev/hdd/root:%s:.*" % (LvmUtil.vgName), out, re.M) is None:
            raise errors.StorageLayoutParseError(storageLayoutName, errors.LVM_LV_NOT_FOUND(LvmUtil.rootLvDevPath))

        return pvList

    @staticmethod
    def swapLvDetectAndNew(storageLayoutName):
        out = Util.cmdCall("lvm", "lvdisplay", "-c")
        if re.search("/dev/hdd/swap:%s:.*" % (LvmUtil.vgName), out, re.M) is not None:
            if Util.getBlkDevFsType(LvmUtil.swapLvDevPath) != Util.fsTypeSwap:
                raise errors.StorageLayoutParseError(storageLayoutName, errors.SWAP_DEV_HAS_INVALID_FS_FLAG(LvmUtil.swapLvDevPath))
            return SwapLvmLv(True)
        else:
            return SwapLvmLv(False)


class LvmUtil:

    vgName = "hdd"

    rootLvName = "root"
    rootLvDevPath = "/dev/mapper/hdd.root"

    swapLvName = "swap"
    swapLvDevPath = "/dev/mapper/hdd.swap"

    class Error(Exception):
        pass

    @classmethod
    def getSlaveDevPathList(cls, vgName):
        ret = []
        out = Util.cmdCall("lvm", "pvdisplay", "-c")
        for m in re.finditer("^\\s*(\\S+):%s:.*" % (vgName), out, re.M):
            if m.group(1) == "[unknown]":
                raise cls.Error("volume group %s not fully loaded" % (vgName))
            ret.append(m.group(1))
        return ret

    @staticmethod
    def addPvToVg(pvDevPath, vgName, mayCreate=False):
        Util.cmdCall("lvm", "pvcreate", pvDevPath)
        if mayCreate and not Util.cmdCallTestSuccess("lvm", "vgdisplay", vgName):
            Util.cmdCall("lvm", "vgcreate", vgName, pvDevPath)
        else:
            Util.cmdCall("lvm", "vgextend", vgName, pvDevPath)

    @classmethod
    def removePvFromVg(cls, pvDevPath, vgName):
        rc, out = Util.cmdCallWithRetCode("lvm", "pvmove", pvDevPath)
        if rc != 5:
            raise cls.Error("failed")

        if pvDevPath in LvmUtil.getSlaveDevPathList(vgName):
            Util.cmdCall("lvm", "vgreduce", vgName, pvDevPath)

    @staticmethod
    def createLvWithDefaultSize(vgName, lvName):
        out = Util.cmdCall("lvm", "vgdisplay", "-c", vgName)
        freePe = int(out.split(":")[15])
        Util.cmdCall("lvm", "lvcreate", "-l", "%d" % (freePe // 2), "-n", lvName, vgName)

    @staticmethod
    def activateAll():
        Util.cmdCall("lvm", "vgchange", "-ay")

    @staticmethod
    def getVgList():
        out = Util.cmdCall("lvm", "vgdisplay", "-s")
        return [x for x in out.split("\n") if x != ""]

    @staticmethod
    def autoExtendLv(lvDevPath):
        total, used = Util.getBlkDevCapacity(lvDevPath)
        if used / total < 0.9:
            return
        added = int(used / 0.7) - total
        added = (added // 1024 + 1) * 1024      # change unit from MB to GB
        Util.cmdCall("lvm", "lvextend", "-L+%dG" % (added), lvDevPath)




    def enable(self):
        # FIMXE: should call common implementation

        targetFullf, ftype = self._find()
        assert targetFullf is not None

        # already enabled by system
        fullf = os.path.join(self._cfgDir.lib_system_dir, "swap.target.wants", self._unitName)
        if os.path.exists(fullf):
            return

        fulld = os.path.join(self._cfgDir.etc_system_dir, "swap.target.wants")
        fullf = os.path.join(fulld, self._unitName)
        bCreateDir = (not os.path.exists(fulld))

        if bCreateDir:
            os.mkdir(fulld)
        else:
            # "fulld" must be a normal directory
            if os.path.islink(fulld) or not os.path.isdir(fulld):
                raise CfgError('"%s" is invalid' % (fulld))

        try:
            # targetFullf is in /etc
            if ftype == 0:
                target = os.path.join("..", self._unitName)
                if os.path.islink(fullf):
                    if os.readlink(fullf) == target:
                        return
                Util.forceSymlink(target, fullf)
                return

            # targetFullf is in /usr/lib
            if ftype == 1:
                if os.path.islink(fullf):
                    if os.readlink(fullf) == targetFullf:
                        return
                Util.forceSymlink(targetFullf, fullf)
                return

            assert False
        except BaseException:
            if bCreateDir:
                Util.forceDelete(fulld)
            raise

    def disable(self):
        # FIMXE: should call common implementation

        targetFullf, ftype = self._find()
        assert targetFullf is not None

        fullf = os.path.join(self._cfgDir.lib_system_dir, "swap.target.wants", self._unitName)
        if os.path.exists(fullf):
            raise CfgError('"%s" can not be disabled' % (fullf))

        fulld = os.path.join(self._cfgDir.etc_system_dir, "swap.target.wants")
        fullf = os.path.join(fulld, self._unitName)
        Util.forceDelete(fullf)
        Util.removeEmptyDir(fulld)



    @staticmethod
    def portageGetMakeConfList():
        return Util._portageGetMakeConfListImpl(os.path.realpath("/etc/portage/make.profile"))

    @staticmethod
    def _portageGetMakeConfListImpl(curDir):
        ret = []

        parentFn = os.path.join(curDir, "parent")
        if os.path.exists(parentFn):
            with open(parentFn) as f:
                for line in f.read().split("\n"):
                    if line.strip() != "":
                        ret += Util._portageGetMakeConfListImpl(os.path.realpath(os.path.join(curDir, line)))

        makeConfFn = os.path.join(curDir, "make.defaults")
        if os.path.exists(makeConfFn):
            ret.append(makeConfFn)

        return ret



    async def asyncStartTelnetExec(self, cmd, *kargs, loop=None):
        assert self._wTelnetPort is not None
        assert loop is not None

        cmd = "ssh -t -e none -p %d -F %s %s %s %s" % (self._wTelnetPort, self._cfgFile, self._hostname, cmd, " ".join(kargs))
        proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, loop=loop)
        self._asyncJobCount += 1
        return (proc, proc.stdout)

    async def asyncWaitTelnetExec(self, proc):
        try:
            retcode = await proc.wait()
            if retcode != 0:
                raise subprocess.CalledProcessError(retcode, proc.args)      # use subprocess.CalledProcessError since there's no equivalent in asyncio
        finally:
            self._asyncJobCount -= 1



        # FIXME: ssh sucks that it must use a shell to execute remote command
        if base64:
            args2 = list(args)
            for i in range(1, len(args2)):
                args2[i] = base64.b64encode(args2[i].encode("ascii")).decode("ascii")
        else:
            args2 = args

        # "-t" can get Ctrl+C controls remote process
        # XXXXX so that we forward signal to remote process, FIXME
        cmd = "ssh -t -e none -p %d -F %s %s %s" % (self._wTelnetPort, self._cfgFile, self._hostname, " ".join(args2))
        Util.shellExec(cmd)


    def _checkDevDirContent(self, devDir, nodeInfoList):
        assert self.__validPath(devDir)
        assert all([not x[0].startswith("/") and not x[0].endswith("/") for x in nodeInfoList])

        for nodeName, devType, major, minor, mode, owner, group in nodeInfoList:
            fn = os.path.join(devDir, nodeName)
            fullfn = self.__fn2fullfn(fn)

            # check file existence
            if not os.path.lexists(fullfn):
                if self.p._bAutoFix:
                    _makeDeviceNodeFile(fullfn, devType, major, minor, mode, owner, group)
                else:
                    self.p._errCb("\"%s\" does not exist." % (fn))
                    continue

            s = os.lstat(fullfn)

            # check type
            if devType == "b":
                if not stat.S_ISBLK(s.st_mode):
                    if self.p._bAutoFix:
                        os.remove(fullfn)
                        _makeDeviceNodeFile(fullfn, devType, major, minor, mode, owner, group)
                    else:
                        self.p._errCb("\"%s\" is not a block special device file." % (fn))
                        continue
            elif devType == "c":
                if not stat.S_ISCHR(s.st_mode):
                    if self.p._bAutoFix:
                        os.remove(fullfn)
                        _makeDeviceNodeFile(fullfn, devType, major, minor, mode, owner, group)
                    else:
                        self.p._errCb("\"%s\" is not a character special device file." % (fn))
                        continue
            else:
                assert False

            # check major and minor
            if os.major(s.st_rdev) != major or os.minor(s.st_rdev) != minor:
                if self.p._bAutoFix:
                    os.remove(fullfn)
                    _makeDeviceNodeFile(fullfn, devType, major, minor, mode, owner, group)
                else:
                    self.p._errCb("\"%s\" has invalid major and minor number." % (fn))
                    continue

            # check mode, owner and group
            self.__checkMode(fn, fullfn, mode)
            self.__checkOwnerGroup(fn, fullfn, owner, group)

        # redundant files
        keepList = [os.path.join(devDir, x[0]) for x in nodeInfoList]
        for fn in reversed(self._fullListDir(devDir, recursive=True)):
            if fn in keepList:
                continue

            if self.p._bAutoFix:
                fullfn = self.__fn2fullfn(fn)
                if os.path.islink(fullfn) or not os.path.isdir(fullfn):
                    # remove redundant file
                    os.remove(fullfn)
                else:
                    # remove redundant directory
                    # files are iterated before their parent directory using reversed()
                    try:
                        os.rmdir(fullfn)
                    except OSError as e:
                        if e.errno == 39:
                            # OSError: [Errno 39] Directory not empty
                            self.p._errCb("Directory \"%s\" should not exist but has valid file(s) in it." % (fn))
                        else:
                            raise
            else:
                self.p._errCb("\"%s\" should not exist." % (fn))

        # record files
        for fn in self._fullListDir(devDir, recursive=True):
            self.p._record.add(fn)






    @staticmethod
    def portageGetChost():
        return Util.shellCall("portageq envvar CHOST 2>/dev/null").rstrip("\n")





        # install kernel modules, firmwares and executables for debugging, use bash as init
        if self.trickDebug:
            dstdir = os.path.join(self._initramfsTmpDir, self._be.kernel_modules_dirpath[1:])
            if os.path.exists(dstdir):
                shutil.rmtree(dstdir)
            shutil.copytree(self._be.kernel_modules_dirpath, dstdir, symlinks=True)

            dstdir = os.path.join(self._initramfsTmpDir, self._be.firmware_dirpath[1:])
            if os.path.exists(dstdir):
                shutil.rmtree(dstdir)
            shutil.copytree(self._be.firmware_dirpath, dstdir, symlinks=True)

            self._installBin("/bin/bash", self._initramfsTmpDir)
            self._installBin("/bin/cat", self._initramfsTmpDir)
            self._installBin("/bin/cp", self._initramfsTmpDir)
            self._installBin("/bin/dd", self._initramfsTmpDir)
            self._installBin("/bin/echo", self._initramfsTmpDir)
            self._installBin("/bin/ls", self._initramfsTmpDir)
            self._installBin("/bin/ln", self._initramfsTmpDir)
            self._installBin("/bin/mount", self._initramfsTmpDir)
            self._installBin("/bin/ps", self._initramfsTmpDir)
            self._installBin("/bin/rm", self._initramfsTmpDir)
            self._installBin("/bin/touch", self._initramfsTmpDir)
            self._installBin("/usr/bin/basename", self._initramfsTmpDir)
            self._installBin("/usr/bin/dirname", self._initramfsTmpDir)
            self._installBin("/usr/bin/find", self._initramfsTmpDir)
            self._installBin("/usr/bin/sleep", self._initramfsTmpDir)
            self._installBin("/usr/bin/tree", self._initramfsTmpDir)
            self._installBin("/usr/bin/xargs", self._initramfsTmpDir)
            self._installBin("/usr/bin/hexdump", self._initramfsTmpDir)

            self._installBin("/sbin/blkid", self._initramfsTmpDir)
            self._installBin("/sbin/switch_root", self._initramfsTmpDir)

            self._installBin("/bin/lsmod", self._initramfsTmpDir)
            self._installBin("/bin/modinfo", self._initramfsTmpDir)
            self._installBin("/sbin/modprobe", self._initramfsTmpDir)
            shutil.copytree("/etc/modprobe.d", os.path.join(self._initramfsTmpDir, "etc", "modprobe.d"), symlinks=True)

            self._installBin("/sbin/dmsetup", self._initramfsTmpDir)
            self._installBin("/sbin/lvm", self._initramfsTmpDir)

            if os.path.exists("/usr/bin/nano"):
                self._installBin("/usr/bin/nano", self._initramfsTmpDir)

            os.rename(os.path.join(self._initramfsTmpDir, "init"), os.path.join(self._initramfsTmpDir, "init.bak"))
            os.symlink("/bin/bash", os.path.join(self._initramfsTmpDir, "init"))

            with open(os.path.join(self._initramfsTmpDir, ".bashrc"), "w") as f:
                f.write("echo \"<initramfs-debug> Mounting basic file systems\"\n")
                f.write("mount -t sysfs none /sys\n")
                f.write("mount -t proc none /proc\n")
                f.write("mount -t devtmpfs none /dev\n")
                f.write("\n")

                f.write("echo \"<initramfs-debug> Loading all the usb drivers\"\n")
                dstdir = os.path.join(self._be.kernel_modules_dirpath, "kernel", "drivers", "usb")
                f.write("find \"%s\" -name \"*.ko\" | xargs basename -a -s \".ko\" | xargs /sbin/modprobe -a" % (dstdir))
                f.write("\n")

                f.write("echo \"<initramfs-debug> Loading all the hid drivers\"\n")
                dstdir = os.path.join(self._be.kernel_modules_dirpath, "kernel", "drivers", "hid")
                f.write("find \"%s\" -name \"*.ko\" | xargs basename -a -s \".ko\" | xargs /sbin/modprobe -a" % (dstdir))
                f.write("\n")

                f.write("echo \"<initramfs-debug> Loading all the input drivers\"\n")
                dstdir = os.path.join(self._be.kernel_modules_dirpath, "kernel", "drivers", "input")
                f.write("find \"%s\" -name \"*.ko\" | xargs basename -a -s \".ko\" | xargs /sbin/modprobe -a" % (dstdir))
                f.write("\n")


class CoreDumpHandler:

    class Type(enum.Enum):
        NONE = "none"
        TO_FILE = "to_file"
        CUSTOM = "custom"

    def __init__(self, core_dump_handler_type, **kwargs):
        self.core_dump_handler_type = core_dump_handler_type
        if self.core_dump_handler_type == self.Type.NONE:
            assert len(kwargs) == 0
        elif self.core_dump_handler_type == self.Type.TO_FILE:
            # FIXME: convert kwargs to core_pattern and core_pipe_limit?
            assert False
        elif self.core_dump_handler_type == self.Type.CUSTOM:
            assert "core_pattern" in kwargs and "core_pipe_limit" in kwargs
            self.core_pattern = kwargs["core_pattern"]
            self.core_pipe_limit = kwargs["core_pipe_limit"]
        else:
            assert False

    def __eq__(self, other):
        if type(self) is not type(other):
            return False

        if self.core_dump_handler_type != other.core_dump_handler_type:
            return False

        if self.core_dump_handler_type == self.Type.NONE:
            pass
        elif self.core_dump_handler_type in [self.Type.TO_FILE, self.Type.CUSTOM]:
            if self.core_pattern != other.core_pattern:
                return False
            if self.core_pipe_limit != other.core_pipe_limit:
                return False
        else:
            assert False

        return True

    def __hash__(self):
        if self.core_dump_handler_type == self.Type.NONE:
            return hash(self.core_dump_handler_type)
        elif self.core_dump_handler_type in [self.Type.TO_FILE, self.Type.CUSTOM]:
            return hash((self.core_dump_handler_type, self.core_pattern, self.core_pipe_limit))
        else:
            assert False


        # register coredump handler
        if coreDumpHandler.core_dump_handler_type == coreDumpHandler.Type.NONE:
            pass
        elif coreDumpHandler.core_dump_handler_type == coreDumpHandler.Type.TO_FILE:
            # FIXME
            assert False
        elif coreDumpHandler.core_dump_handler_type == coreDumpHandler.Type.CUSTOM:
            buf += "echo '%s' > /proc/sys/kernel/core_pattern\n" % (coreDumpHandler.core_pattern)
            buf += "echo '%s' > /proc/sys/kernel/core_pipe_limit\n" % (coreDumpHandler.core_pipe_limit)
            buf += "\n"
        else:
            assert False



        boot_sector = f.read(512)
        sector_size = struct.unpack('<H', boot_sector[0xB:0xD])[0]
        mft_cluster = struct.unpack('<Q', boot_sector[0x28:0x30])[0]
        sectors_per_cluster = boot_sector[0x40]
        mft_offset = mft_cluster * sectors_per_cluster * sector_size
        boot_mft_record_offset = mft_offset + 1 * 1024                          # $Boot file is at position 1 in MFT, MFT record default size is 1024 bytes

        f.seek(boot_mft_record_offset)
        boot_mft_record = f.read(1024)
        boot_file_offset = sector_size                                          # $Boot file starts from the 2nd sector
        boot_file_size = struct.unpack('<Q', boot_mft_record[0x30:0x38])[0]
        if boot_file_size != 16 * 512:                                          # $Boot file should occupy 16 sectors
            raise Exception("invalid $Boot file size %d" % (boot_file_size))

        ret = boot_sector[0x54:0x54+426]                                        # read the 426 bytes of boot code in boot sector
        f.seek(boot_file_offset)
        ret += f.read(boot_file_size - sector_size)                             # read the rest content in $Boot file





    @classmethod
    def ntfsReadBootCode(cls, devPath):
        ret = b''
        with open(devPath, "rb") as f:
            boot_sector, _, boot_file_size = cls._ntfsGetBootFileInfo(f)
            ret += boot_sector[0x54:0x54 + 426]     # read the 426 bytes of boot code in boot sector
            f.seek(512)
            ret += f.read(boot_file_size - 512)     # read the rest content in $Boot file
        return ret

    @classmethod
    def ntfsWriteBootCode(cls, devPath, bootCode):
        with open(devPath, "rb+") as f:
            boot_sector, sector_size, boot_file_size = cls._ntfsGetBootFileInfo(f)
            if 426 + (boot_file_size - 512) != len(bootCode):
                raise Exception("invalid $Boot file size %d" % (boot_file_size))

            # read and compare backup boot sector
            backup_boot_sector_offset = cls._ntfsGetBackupBootSectorOffset(f, sector_size)
            f.seek(backup_boot_sector_offset)
            if f.read(512) != boot_sector:
                raise Exception("invalid backup boot sector content")

            # write to $Boot file
            f.seek(0)
            f.write(bootCode[:426])
            f.seek(512)
            f.write(bootCode[426:])

            # write to backup boot sector
            f.seek(backup_boot_sector_offset + 0x54)
            f.write(bootCode[:426])

    @staticmethod
    def _ntfsGetBootFileInfo(f):
        boot_sector = f.read(512)

        sector_size = struct.unpack('<H', boot_sector[0xB:0xD])[0]
        if sector_size != 512:
            # FIXME: do we need to support this?
            raise Exception("invalid sector size %d" % (sector_size))

        mft_cluster = struct.unpack('<Q', boot_sector[0x28:0x30])[0]
        sectors_per_cluster = boot_sector[0x40]
        mft_offset = mft_cluster * sectors_per_cluster * sector_size
        boot_mft_record_offset = mft_offset + 1 * 1024                          # $Boot file is at position 1 in MFT, MFT record default size is 1024 bytes

        f.seek(boot_mft_record_offset)
        boot_mft_record = f.read(1024)
        boot_file_size = struct.unpack('<Q', boot_mft_record[0x30:0x38])[0]
        if boot_file_size != 16 * 512:                                          # $Boot file should occupy 16 sectors
            raise Exception("invalid $Boot file size %d" % (boot_file_size))

        return (boot_sector, sector_size, boot_file_size)

    @staticmethod
    def _ntfsGetBackupBootSectorOffset(f, sector_size):
        f.seek(0, 2)
        device_size = f.tell()
        total_sectors = device_size // sector_size
        return (total_sectors - 1) * sector_size







class VendorPluginStandardOfflineComponent(Component):

    def __init__(self, sysSnapshot, vendorPlugin, componentInfo, serial, componentSaveId, offlineReason):
        # create instance
        super().__init__(vendorPlugin.get_id(),
                         componentInfo["model"],
                         ComponentType(componentInfo["component_type"]),
                         serial,
                         None,                  # id
                         componentSaveId,       # saveId
                         offlineReason)         # offlineReason

        # add devices
        for componentDeviceInfo in componentInfo.get("devices", []):
            # all devices in offline component are dumb device
            obj = Device(None,
                         None,
                         DeviceType(componentDeviceInfo["device_type"]),
                         componentDeviceInfo.get("id", "0"))
            self._addDevice(obj)

        # add connectors
        for componentConnectorInfo in componentInfo.get("connectors", []):
            # all devices in offline component are dumb device
            obj = Connector(ConnectorType(componentConnectorInfo["connector_type"]),
                            componentConnectorInfo.get("id", "0"))
            self._addConnector(obj)

    def _callEvaluate(self, sys_snapshot, parent):
        assert self._parent is None

        hint = self._hint.copy()
        assert "parent_is_online" not in hint
        assert "parent_offline_reason" not in hint
        if isinstance(parent, Machine):
            self._hint["parent_is_online"] = True
        elif isinstance(parent, Component):
            self._hint["parent_is_online"] = self._parent.is_online()
            if self._parent.is_online():
                self._hint["parent_offline_reason"] = self._parent.get_offline_reason()
        else:
            assert False

        self._realId, self._realLocation, self._realPeerConnectorMatcher, self._realOfflineReason = self._evaluate(sys_snapshot, self._hint)
        self._parent = parent

        if "location" in self._hint:
            assert self._realLocation == self._hint["location"]

        if "peer_connector_matcher" in self._hint:
            assert self._realPeerConnectorMatcher == self._hint["peer_connector_matcher"]

        if "is_online" in self._hint:
            if self._hint["is_online"]:
                assert self._realOfflineReason is None
            else:
                assert isinstance(self._realOfflineReason, OfflineReason)
                assert self._realOfflineReason not in [OfflineReason.POWER_OFF, OfflineReason.REMOVED]


    def set_peer_connector_matcher(self, peer_connector_matcher, pre_ready_call=False):
        assert pre_ready_call
        assert self._getMachine() is None

        # do set
        if peer_connector_matcher == self._peerConnectorMatcher:
            return
        self._peerConnectorMatcher = peer_connector_matcher

    def _callEvaluate(self, sys_snapshot, parent):
        assert self._parent is None

        hint = self._hint.copy()
        assert "parent_id" not in hint
        assert "parent_is_online" not in hint
        assert "parent_offline_reason" not in hint
        if isinstance(parent, Machine):
            self._hint["parent_id"] = None
            self._hint["parent_is_online"] = True
        elif isinstance(parent, Component):
            self._hint["parent_id"] = parent.get_id()
            self._hint["parent_is_online"] = self._parent.is_online()
            if self._parent.is_online():
                self._hint["parent_offline_reason"] = self._parent.get_offline_reason()
        else:
            assert False

        self._realId, self._realLocation, self._realOfflineReason = self._evaluate(sys_snapshot, self._hint)
        self._parent = parent

        if "location" in self._hint:
            assert self._realLocation == self._hint["location"]

        if "is_online" in self._hint:
            if self._hint["is_online"]:
                assert self._realOfflineReason is None
            else:
                assert isinstance(self._realOfflineReason, OfflineReason)
                assert self._realOfflineReason not in [OfflineReason.POWER_OFF, OfflineReason.REMOVED]


    def set_hint(self, key, value):
        assert self._parent is None

        assert value is not None
        self._hint[key] = value

        # read:  self.get_hint()
        # write: self.set_hint()
        #        Can do the above operation only when this component is NOT attached to machine
        #        User hints: location:str
        #                    need_offline_state:bool
        #                    is_online:bool
        #                    offline_reason:enum
        #        Inner hints:
        self._hint = dict()



        # read:  self.get_hint()
        # write: self.set_hint()
        #        Can do the above operation only when this component is NOT attached to machine
        #        User hints: id:str
        #                    location:str
        #                    is_online:bool
        #                    offline_reason:enum
        #        Inner hints: parent_is_on_line:bool
        #                     parent_offline_reason:enum
        self._hint = dict()



    def _callEvaluate(self, sys_snapshot, parent):
        assert self._parent is None

        self._realId, self._realLocation, self._realPersistent, self._realOfflineReason = self._evaluate(sys_snapshot, self._hint)
        self._parent = parent

        if "location" in self._hint:
            assert self._realLocation == self._hint["location"]

        if "need_offline_state" in self._hint:
            assert self._realPersistent == self._hint["need_offline_state"]

        if "is_online" in self._hint:
            if self._hint["is_online"]:
                assert self._realOfflineReason is None
            else:
                assert isinstance(self._realOfflineReason, OfflineReason)

        for d in self._devices:
            ComponentDeviceConnectorPropertiesFile.modify_device(d)
            d._callEvaluate(sys_snapshot, self)

        for x in self._connectors:
            ComponentDeviceConnectorPropertiesFile.modify_connector(x)
            x._callEvaluate(sys_snapshot, self)

        self._after_devices_connectors_evaluated(sys_snapshot)





    def supplement_components(self, machine):
        prober = SysSnapshotProber()
        for k, data in self._products["components"].items():
            if "prober" not in data:
                continue
            model, block = k
            print(k)
            prober.add_expression(data["prober"], lambda m=model, b=block: machine.add_component(ComponentDeviceConnectorPropertiesFile.modify_component(self.get_component(m, b)), pre_ready_call=True))
        prober.evaluate(machine._sysSnapshot)




        # use udev to recognize vendor and model
        if vendor is None:
            ret = udevContext.list_devices(path="/devices/virtual/dmi/id")
            if len(ret) > 0:
                assert len(ret) == 1
                ret = ret[0]
                vendor = ret.properties("ID_VENDOR")
                model = ret.properties("ID_MODEL")



    # def get_monitor_layout(self):
    #     pass

    # def get_loudspeaker_layout(self):
    #     pass




        if os.path.exists("/usr/bin/bluetoothctl"):
            for line in subprocess.check_output(["bluetoothctl", "devices"], universal_newlines=True).split("\n"):
                m = re.fullmatch(r"Device (.*) (\S+)", line)
                if m is not None:
                    self._bluetoothDevices.append(self.BluetoothDevice(m.group(2), m.group(1)))

    def get_lmsensors_config_file(self):
        assert self.__ready()
        # FIXME



        # read:  self.get_kernel_config(), self.get_kernel_cmdline_paramter(), self.get_lmsensors_config_file()
        #        Runtime properties, only "attached Device object" can do the above operations
        # write: Runtime properties, not writable
        pass
    








class DevHwCfgDb:

    @staticmethod
    def getDevHwCfg(vendorId, deviceId):
        ret = DevHwCfgDb._doGetDrm(vendorId, deviceId)
        if ret is not None:
            return ret

        return None

    @staticmethod
    def _doGetDrm(vendorId, deviceId):
        # returns: {
        #     "mem": 8,           # unit: GB
        #     "fp64": 760,        # unit: gflops
        #     "fp32": 13200,      # unit: gflops
        #     "fp16": 24500,      # unit: gflops
        # }

        if vendorId == 0x1002 and deviceId == 0x66af:
            # AMD Radeon VII, https://www.amd.com/en/products/graphics/amd-radeon-vii
            return {
                "mem": 16 * 1024 * 1024 * 1024,                # 16GiB
                "fp64": int(3.46 * 1024),                      # 3.46 TFLOPs
                "fp32": int(13.8 * 1024),                      # 13.8 TFLOPs
                "fp16": int(27.7 * 1024),                      # 27.7 TFLOPs
            }

        # unknown device
        return None





class _UtilHwDict:

    @staticmethod
    def get(hwSpec):
        if hwSpec is not None:
            ret = copy.deepcopy(hwSpec)
        else:
            ret = dict()

        _UtilHwDict._getCpuInfo(ret)
        _UtilHwDict._getMemInfo(ret)
        _UtilHwDict._getSensorInfo(ret)
        return ret

    @staticmethod
    def _getCpuInfo(ret):
        buf = pathlib.Path("/proc/cpuinfo").read_text()

        ret["cpu"] = dict()

        ret["cpu"]["vendor"] = "Unknown"
        if True:
            m = re.search(r'vendor_id\s*:\s*(\S+)', buf, re.M)
            if m is not None:
                if m.group(1) == "GenuineIntel":
                    ret["cpu"]["vendor"] = "Intel"
                if m.group(1) == "AuthenticAMD":
                    ret["cpu"]["vendor"] = "AMD"

        ret["cpu"]["model"] = "Unknown"
        if True:
            m = re.search(r'model name\s*:\s*(.*)', buf, re.M)
            if m is not None:
                # intel models
                if "i7-4600U" in m.group(1):
                    ret["cpu"]["model"] = "i7-4600U"

                # amd models
                if "Ryzen Threadripper 1920X" in m.group(1):
                    ret["cpu"]["model"] = "Ryzen Threadripper 1920X"
                if "Ryzen Threadripper 2990WX" in m.group(1):
       cores
        # FIXME: how to identify which file should be write
        if not pre_ready_call:
            ComponentDeviceFile.write(self._components, self._devices)






    @classmethod
    def modify_device(cls, device):
        cls._read()

        ret = Handy.findItemsByDevice(cls._DEVICES, device)
        if len(ret) > 1:
            raise ConfigError("invalid device found in file %s" % (cls._FULLFN))
        elif len(ret) == 1:
            item = ret[0]
            if "is_dumb" in item:
                device.set_dumb(pre_ready_call=True)
            if "default_offline_reason" in item:
                device.set_default_offline_reason(OfflineReason(item["default_offline_reason"]), pre_ready_call=True)
            return device
        elif len(ret) == 0:
            return device
        else:
            assert False

    tlist = []
    for item in cls._DEVICES:
        ret = Handy.findDevicesByItem(machine._devices, item)
        if len(ret) > 1:
            raise ConfigError("invalid device found in file %s" % (cls._FULLFN))
        elif len(ret) == 1:
            pass
        elif len(ret) == 0:
            if "vendor" in item:
                ret = Handy.getVendorPlugin(item["vendor"]).get_device(item["model"], None)
            else:
                from .machine import Device
                ret = Device(None, None, None, item["device_type"])
            ret.set_id(item.get("device_id", None))
            if "is_dumb" in item:
                ret.set_dumb(pre_ready_call=True)
            if "default_offline_reason" in item:
                ret.set_default_offline_reason(OfflineReason(item["default_offline_reason"]), pre_ready_call=True)
            tlist.append(ret)
        else:
            assert False
    if len(tlist) > 0:
        machine._addDevices(tlist)

    buf += "\n"

    buf += "[device]\n"
    for c in machine._devices:
        item = dict()
        if c.get_vendor() is not None:
            item["vendor"] = c.get_vendor()
            item["model"] = c.get_model()
        else:
            item["device_type"] = c.get_type()
        if c.get_id() is not None:
            item["id"] = c.get_id()
        buf += json.dumps(item, indent=4)
        buf += "\n"
    buf += "\n"


    if section == "[device]":
        if line == "{":
            if objStr is not None:
                raise ConfigError("invalid line %d in file %s" % (i, cls._FULLFN))
            objStr = line + "\n"
            continue
        if line == "}":
            if objStr is None:
                raise ConfigError("invalid line %d in file %s" % (i, cls._FULLFN))
            objStr += line + "\n"
            item = json.loads(objStr)
            __checkDevice(item)
            cls._DEVICES.append(item)
            continue
        if objStr is not None:
            objStr += line + "\n"
        continue



    def __checkDevice(item):
        vendor = item.get("vendor", None)
        model = item.get("model", None)
        deviceType = item.get("device_type", None)

        if vendor is not None:
            if model is not None:
                if deviceType is not None:
                    raise ConfigError("invalid device found in file %s" % (cls._FULLFN))
            else:
                raise ConfigError("invalid device found in file %s" % (cls._FULLFN))
        else:
            if model is not None:
                raise ConfigError("invalid device found in file %s" % (cls._FULLFN))
            else:
                if deviceType is None:
                    raise ConfigError("invalid device found in file %s" % (cls._FULLFN))

        if "is_dumb" in item:
            if not item["is_dumb"]:
                raise ConfigError("invalid device found in file %s" % (cls._FULLFN))

        if "default_offline_reason" in item:
            if not isinstance(item["default_offline_reason"], OfflineReason):
                raise ConfigError("invalid device found in file %s" % (cls._FULLFN))




        devices = []
        for item in cls._DEVICES:
            ret = Handy.findDevicesByItem(machine._devices, item)
            if len(ret) > 1:
                raise ConfigError("invalid device found in file %s" % (cls._FULLFN))
            elif len(ret) == 1:
                pass
            elif len(ret) == 0:
                if "vendor" in item:
                    ret = Handy.getVendorPlugin(item["vendor"]).get_device(item["model"], None)
                else:
                    from .machine import Device
                    ret = Device(None, None, None, item["device_type"])
                ret.set_id(item.get("device_id", None))
                devices.append(ret)
            else:
                assert False



    def add_components(self, components, pre_ready_call=False):
        assert self.__readyCheck(pre_ready_call)
        assert len(components) > 0

        # pre-add check
        for component in components:
            assert isinstance(component, Component)
            assert component._parent is None
            # assert component.get_seat() is None or component.get_seat() in self._seats

        # do add
        for component in components:
            self._components.append(component)
            component._parent = self
            component._callEvaluate()
            for d in component._getDevices():
                d._callEvaluate()

        # post-add check
        for component in components:
            assert component.is_dumb() or not component.is_online()                         # components that are not dumb nor offline should be auto detected, not added
        assert not MachineUtil.hasDuplicateIds(self)
        assert not MachineUtil.hasDuplicateNames(self)

        # write config file
        if not pre_ready_call:
            ComponentsFile.write(self._components, self._devices)












    def is_category(self, category):
        assert False


class Machine(PluginComponent):

    def get_all_peripherals(self):
        assert self._ready

        ret = []
        for c in self._components:
            if not c.is_peripheral():
                continue
            ret.append(c)
        return ret

    def get_peripherals(self):
        return [x for x in self.get_all_peripherals() if not x.is_disabled() and not c.is_removed()]






class DumbComponentsFile:

    """
    Example:
    [
        {
            "vendor": "xxx",
            "model": "yyy",
            "id": "xxx",
        }
        {
            "vendor": "xxx",
            "model": "yyy",
        }
        {
            "component_type": "printer",
            "id": "xxx",
        }
        {
            "component_type": "printer",
        }
    ]
    """

    PATH = "/etc/udev/dumb-components.json"

    @classmethod
    def _read(cls):
        components = []
        devices = []

        if not os.path.exists(cls.PATH):
            return (components, devices)

        def __checkComponent(item):
            vendor = item.get("vendor", None)
            model = item.get("model", None)
            componentType = item.get("component_type", None)

            if vendor is not None:
                if model is not None:
                    if componentType is not None:
                        raise ConfigError("invalid component found in file %s" % (cls.PATH))
                else:
                    raise ConfigError("invalid component found in file %s" % (cls.PATH))
            else:
                if model is not None:
                    raise ConfigError("invalid component found in file %s" % (cls.PATH))
                else:
                    if componentType is None:
                        raise ConfigError("invalid component found in file %s" % (cls.PATH))

            if "is_dumb" in item:
                if not item["is_dumb"]:
                    raise ConfigError("invalid component found in file %s" % (cls.PATH))

            if "default_offline_reason" in item:
                if not isinstance(item["default_offline_reason"], OfflineReason):
                    raise ConfigError("invalid component found in file %s" % (cls.PATH))

        def __checkDevice(item):
            vendor = item.get("vendor", None)
            model = item.get("model", None)
            deviceType = item.get("device_type", None)

            if vendor is not None:
                if model is not None:
                    if deviceType is not None:
                        raise ConfigError("invalid device found in file %s" % (cls.PATH))
                else:
                    raise ConfigError("invalid device found in file %s" % (cls.PATH))
            else:
                if model is not None:
                    raise ConfigError("invalid device found in file %s" % (cls.PATH))
                else:
                    if deviceType is None:
                        raise ConfigError("invalid device found in file %s" % (cls.PATH))

            if "is_dumb" in item:
                if not item["is_dumb"]:
                    raise ConfigError("invalid device found in file %s" % (cls.PATH))

            if "default_offline_reason" in item:
                if not isinstance(item["default_offline_reason"], OfflineReason):
                    raise ConfigError("invalid device found in file %s" % (cls.PATH))

        lines = pathlib.Path(cls.PATH).read_text().split("\n")
        section = None
        objStr = None
        for i in range(0, len(lines)):
            line = lines[i]
            if line in ["[component]", "[device]"]:
                section = line
                continue
            if line == "":
                section = None
                continue

            if section == "[component]":
                if line == "{":
                    if objStr is not None:
                        raise ConfigError("invalid line %d in file %s" % (i, cls.PATH))
                    objStr = line + "\n"
                    continue
                if line == "}":
                    if objStr is None:
                        raise ConfigError("invalid line %d in file %s" % (i, cls.PATH))
                    objStr += line + "\n"
                    item = json.loads(objStr)
                    __checkComponent(item)
                    components.append(item)
                    continue
                if objStr is not None:
                    objStr += line + "\n"
                continue

            if section == "[device]":
                if line == "{":
                    if objStr is not None:
                        raise ConfigError("invalid line %d in file %s" % (i, cls.PATH))
                    objStr = line + "\n"
                    continue
                if line == "}":
                    if objStr is None:
                        raise ConfigError("invalid line %d in file %s" % (i, cls.PATH))
                    objStr += line + "\n"
                    item = json.loads(objStr)
                    __checkDevice(item)
                    devices.append(item)
                    continue
                if objStr is not None:
                    objStr += line + "\n"
                continue

        return (components, devices)

    @classmethod
    def generate(cls, machine):
        buf = ""

        for c in machine._components:
            item = dict()
            if c.get_vendor() is not None:
                item["vendor"] = c.get_vendor()
                item["model"] = c.get_model()
            item["component_type"] = c.get_type()
            if c.get_id() is not None:
                item["id"] = c.get_id()
            buf += json.dumps(item, indent=4)
            buf += "\n"
        buf += "\n"

        return (cls.PATH, buf)

    @classmethod
    def write(cls, machine):
        fullfn, buf = cls.generate(machine)
        os.makedirs(os.path.dirname(fullfn), exist_ok=True)
        with open(fullfn, "w") as f:
            f.write(buf)





    @classmethod
    def _read(cls):
        if cls._DATA_MAP is not None:
            return

        cls._DATA_MAP = {
            "component": [],
            "device": [],
            "connector": []
        }

        if not os.path.exists(cls.PATH):
            return

        lines = pathlib.Path(cls.PATH).read_text().split("\n")
        objStr = None
        for i in range(0, len(lines)):
            line = lines[i].strip()

            if line == "{":
                if objStr is not None:
                    raise ConfigError("invalid line %d in file %s" % (i, cls.PATH))
                objStr = line + "\n"
                continue

            if line == "}":
                if objStr is None:
                    raise ConfigError("invalid line %d in file %s" % (i, cls.PATH))
                objStr += line + "\n"
                obj = json.loads(objStr)
                if "match_component" in obj:
                    obj["match"] = obj.pop("match_component")
                    cls._DATA_MAP["component"].append(obj)
                elif "match_device" in obj:
                    obj["match"] = obj.pop("match_device")
                    cls._DATA_MAP["device"].append(obj)
                elif "match_connector" in obj:
                    obj["match"] = obj.pop("match_connector")
                    cls._DATA_MAP["connector"].append(obj)
                else:
                    raise ConfigError("invalid line %d in file %s" % (i, cls.PATH))
                objStr = None
                continue

            if objStr is None:
                if line == "" or line.startswith("#"):
                    continue
                else:
                    raise ConfigError("invalid line %d in file %s" % (i, cls.PATH))

            objStr += line + "\n"

        if objStr is not None:
            raise ConfigError("invalid content in file %s" % (cls.PATH))



    def modify_component(cls, component):
        cls._read()

        m = MatchComponent()
        for item in data["component"]:
            m.add_expression(item["mactcher"], functools.partial(cls._modifyComponent, item))
        m.evaluate(component)

        return component

    def modify_device(cls, device):
        cls._read()

        m = MatchDevice()
        for item in data["device"]:
            m.add_expression(item["mactcher"], functools.partial(cls._modifyDevice, item))
        m.evaluate(device)

        return device

    def modify_connector(cls, connector):
        cls._read()

        m = MatchConnector()
        for item in data["connector"]:
            m.add_expression(item["mactcher"], functools.partial(cls._modifyConnector, item))
        m.evaluate(connector)

        return connector

    def _modifyComponent(cls, item, component):
        if "is_dumb" in item:
            if not item["is_dumb"]:
                raise ConfigError("invalid component found in file %s" % (cls.PATH))
            component.set_dumb(pre_ready_call=True)

        if "default_offline_reason" in item:
            if not isinstance(item["default_offline_reason"], OfflineReason):
                raise ConfigError("invalid component found in file %s" % (cls.PATH))
            component.set_default_offline_reason(item["default_offline_reason"], pre_ready_call=True)

    def _modifyConnector(cls, item, connector):
        if "peer_connector_matcher" in item:
            connector.set_peer_connector_matcher(item["peer_connector_matcher"], pre_ready_call=True)


    @staticmethod
    def ddcGetI2cDevice(drmConnector):
        try:
            out = subprocess.check_output(["ddcutil", "detect"], text=True)
        except FileNotFoundError:
            # ddcutil not installed
            return None

        tmpDev, tmpConnector = None, None

        for line in out.split("\n"):
            if line.startswith("Display "):
                tmpDev, tmpConnector = None, None
                continue
            if True:
                m = re.search(r"\s+I2C bus:\s+(\S+)", line)
                if m is not None:
                    tmpDev = m.group(1)
                    continue
            if True:
                m = re.search(r"\s+DRM connector:\s+(\S+)", line)
                tmpConnector = m.group(1)
                if tmpConnector == drmConnector:
                    break
                continue

        if tmpDev is not None and tmpConnector == drmConnector:
            return tmpDev

        return None

