#!/usr/bin/python3
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-

import os
import re
import sys
import pwd
import grp
import glob
import stat
import time
import json
import http.client
import errno
import fcntl
import shutil
import parted
import psutil
import urllib.error
import urllib.request
import gstage4
import pathlib
import asyncio
import asyncio_pool
import socket
import struct
import pyudev
import random
import fnmatch
import termios
import hashlib
import blessed
import zipfile
import portage
import passlib.hosts
import tenacity
import tempfile
import platform
import functools
import threading
import subprocess
import strict_hwcfg
import multiprocessing
from datetime import datetime
from OpenSSL import crypto
from gi.repository import Gio
from gi.repository import GLib


class Util:

    @staticmethod
    def boolToStr01(b):
        return "1" if b else "0"

    @staticmethod
    def str01ToBool(s):
        if s == "1":
            return True
        elif s == "0":
            return False
        else:
            assert False

    @staticmethod
    def boolToStrYn(b):
        return "Y" if b else "N"

    @staticmethod
    def strYnToBool(s):
        if s == "Y":
            return True
        elif s == "N":
            return False
        else:
            assert False

    @staticmethod
    def zramDevGetSize(path):
        return int(subprocess.check_output(["zramctl", "-n", "-b", "-o", "DISKSIZE", path], text=True).strip())

    @staticmethod
    def readFlagFile(flagFile, defaultValue=None):
        try:
            return pathlib.Path(flagFile).read_text().rstrip("\n")
        except FileNotFoundError:
            if defaultValue is not None:
                return defaultValue
            raise

    @staticmethod
    def writeFlagFile(flagFile, value):
        pathlib.Path(flagFile).write_text(str(value))

    @staticmethod
    def die(message):
        print(message, file=sys.stderr)
        sys.exit(1)

    @classmethod
    def robustUrlOpen(cls, *kargs, **kwargs):
        assert "timeout" not in kwargs

        timeoutSeconds = 60
        tryCount, retryWaitSeconds = cls._parseWgetrc()

        def _retryIfTrue(e):
            if isinstance(e, http.client.RemoteDisconnected):
                return True
            if isinstance(e, urllib.error.URLError):
                if isinstance(e.reason, socket.gaierror):
                    return True
                if isinstance(e.reason, TimeoutError):
                    return True
            if isinstance(e, TimeoutError):
                return True
            return False

        if tryCount == 0:
            retryer = tenacity.Retrying(wait=tenacity.wait_fixed(retryWaitSeconds),
                                        retry=tenacity.retry_if_exception(_retryIfTrue),
                                        reraise=True)
        else:
            retryer = tenacity.Retrying(wait=tenacity.wait_fixed(retryWaitSeconds),
                                        stop=tenacity.stop_after_attempt(tryCount),
                                        retry=tenacity.retry_if_exception(_retryIfTrue),
                                        reraise=True)
        for attempt in retryer:
            with attempt:
                with urllib.request.urlopen(*kargs, **kwargs, timeout=timeoutSeconds) as resp:
                    yield resp
                    break

    @functools.cache
    @staticmethod
    def _parseWgetrc():
        tryCount = 20
        waitRetry = 10

        try:
            buf = pathlib.Path("/etc/wgetrc").read_text()
            # tryCount
            m = re.search(r"^\s*tries\s*=\s*([0-9]+)\s*$", buf, re.M)
            if m is not None:
                tryCount = int(m.group(1))
            # waitRetry
            m = re.search(r"^\s*waitretry\s*=\s*([0-9]+)\s*$", buf, re.M)
            if m is not None:
                waitRetry = int(m.group(2))
        except FileNotFoundError:
            pass

        return (tryCount, waitRetry)

    @staticmethod
    def getOsName(osType):
        _osNameDict = {
            "gentoo": "Gentoo Linux",
            "mswin_xp": "Microsoft Windows XP",
            "mswin_7": "Microsoft Windows 7",
            "lineage": "Lineage OS",
        }
        return _osNameDict[osType]

    @staticmethod
    def getArchName(arch):
        _archNameDict = {
            "i386": "X86",
            "amd64": "X86_64",
            "arm64": "ARM64",
        }
        return _archNameDict[arch]

    @staticmethod
    def compressMultiVolumeTarXz(srcDir, dstFile, volumeSize):
        # compress
        with TempChdir(srcDir):
            subprocess.check_call("tar -cf - * | xz -c | split -b %s - %s." % (volumeSize, dstFile), shell=True)

        # if there's only one volume, remove postfix
        fullfnList = glob.glob(dstFile + ".*")
        assert len(fullfnList) > 0
        if len(fullfnList) == 1:
            assert fullfnList[0].endswith(".aa")
            os.rename(fullfnList[0], dstFile)

    @staticmethod
    def compressMultiVolume7z(srcDir, dstFile, volumeSize):
        with TempChdir(srcDir):
            subprocess.check_call("7z a %s * -v%s" % (dstFile, volumeSize), shell=True, stdout=subprocess.DEVNULL)
            os.rename(dstFile + ".001", dstFile)

    @staticmethod
    def generateAllZeroMachineId():
        return '0' * 32

    @staticmethod
    def generateRandomMachineId():
        return ''.join(random.choice('0123456789abcdef') for _ in range(32))

    @staticmethod
    def checkMachineId(machine_id):
        if len(machine_id) != 32:
            return False
        if any([c not in '0123456789abcdef' for c in machine_id]):
            return False
        return True

    @staticmethod
    def json2file(data, filepath):
        with open(filepath, "w") as f:
            json.dump(data, f)

    @staticmethod
    def file2json(filepath):
        with open(filepath) as f:
            return json.load(f)

    @staticmethod
    def forceDelete(path):
        if os.path.islink(path):
            os.remove(path)
        elif os.path.isfile(path):
            os.remove(path)
        elif os.path.isdir(path):
            shutil.rmtree(path)
        elif os.path.lexists(path):
            os.remove(path)             # other type of file, such as device node
        else:
            pass                        # path does not exist, do nothing

    @staticmethod
    def forceSymlink(target, link_path):
        if os.path.islink(link_path) and os.readlink(link_path) == target:      # already exist
            return
        Util.forceDelete(link_path)                   # os.symlink won't overwrite anything, so we delete it first
        os.symlink(target, link_path)

    @staticmethod
    def isKernelThread(pid):
        pass

    @staticmethod
    def bcacheGetHitRatio(setUuid):
        return int(pathlib.Path(os.path.join("/sys", "fs", "bcache", setUuid, "stats_day", "cache_hit_ratio")).read_text().rstrip("\n"))

    @staticmethod
    def mbrReadBootCode(devPath):
        with open(devPath, "rb") as f:
            return f.read(440)

    @staticmethod
    def fatGenerateRandomFsUuid():
        ret = ''.join(random.choice('0123456789ABCDEF') for _ in range(8))
        ret = ret[:4] + "-" + ret[4:]
        return ret

    @staticmethod
    def fatSetFsUuid(devPath, fsUuid):
        assert fsUuid[4] == "-"
        fsUuid = fsUuid[:4] + fsUuid[5:]
        fsUuid = int(fsUuid, 16).to_bytes(4, byteorder='little')
        with open(devPath, "rb+") as f:
            f.seek(0x43)
            f.write(fsUuid)

    @staticmethod
    def ntfsReadBootCode(devPath):
        with open(devPath, "rb") as f:
            f.seek(0x54)
            return f.read(426)

    @staticmethod
    def makeSquashedRootfsFiles(rootfsDir, dstDir, exclude=[]):
        sqfsFile = os.path.join(dstDir, "rootfs.sqfs")
        sqfsSumFile = os.path.join(dstDir, "rootfs.sqfs.sha512")

        if len(exclude) > 0:
            subprocess.check_call("mksquashfs %s %s -no-progress -noappend -quiet -e %s" % (rootfsDir, sqfsFile, " ".join(exclude)), shell=True)
        else:
            subprocess.check_call("mksquashfs %s %s -no-progress -noappend -quiet" % (rootfsDir, sqfsFile), shell=True)
        subprocess.check_call("sha512sum %s > %s" % (sqfsFile, sqfsSumFile), shell=True)

        # remove directory prefix in rootfs.sqfs.sha512, sha512sum sucks
        subprocess.check_call(["sed", "-i", "s#%s/\\?##" % (dstDir), sqfsSumFile])

        return (sqfsFile, sqfsSumFile)

    @staticmethod
    def formatDisk(devPath, partitionTableType, fsType=None, fsUuid=None, label=None):
        if partitionTableType == "mbr":
            partitionTableType = "msdos"
        elif partitionTableType == "gpt":
            pass
        else:
            assert False

        if fsType == "fat":
            pass
        elif fsType is None:
            assert fsUuid is None and label is None
        else:
            assert False

        if label is None:
            label = ""

        disk = parted.freshDisk(parted.getDevice(devPath), partitionTableType)

        assert len(disk.getFreeSpaceRegions()) == 1
        freeRegion = disk.getFreeSpaceRegions()[0]

        pStart = disk.device.optimalAlignedConstraint.startAlign.alignUp(freeRegion, freeRegion.start)
        pEnd = disk.device.optimalAlignedConstraint.endAlign.alignDown(freeRegion, freeRegion.end)
        region = parted.Geometry(device=disk.device, start=pStart, end=pEnd)

        if fsType == "fat":
            partition = parted.Partition(disk=disk, type=parted.PARTITION_NORMAL,
                                         fs=parted.FileSystem(type="fat32", geometry=region),
                                         geometry=region)
        else:
            partition = parted.Partition(disk=disk, type=parted.PARTITION_NORMAL,
                                         geometry=region)

        if not disk.addPartition(partition=partition, constraint=disk.device.optimalAlignedConstraint):
            # it sucks that disk.addPartition() won't do the job of restricting region INSIDE constraint
            # so we must calculate pStart and pEnd manually beforehand
            raise Exception("failed to format %s" % (devPath))
        if not disk.commit():
            # experiments show that disk.commit() blocks until /dev is updated
            raise Exception("failed to format %s" % (devPath))

        partDevPath = devPath + "1"
        if fsType == "fat":
            Util.cmdCall("mkfs.vfat", "-F", "32", "-n", label, partDevPath)
            if fsUuid is not None:
                Util.fatSetFsUuid(partDevPath, fsUuid)
        else:
            pass

        return partDevPath

    @staticmethod
    def createAndMountFatDiskImage(filepath, imageSize, partitionTableType, loopFile, mntDir, fsUuid=None, label=None):
        if partitionTableType == "mbr":
            partitionTableType = "msdos"
        elif partitionTableType == "gpt":
            pass
        else:
            assert False

        if label is None:
            label = ""

        with open(filepath, 'wb') as f:
            f.truncate(imageSize)

        disk = parted.freshDisk(parted.getDevice(filepath), partitionTableType)

        assert len(disk.getFreeSpaceRegions()) == 1
        freeRegion = disk.getFreeSpaceRegions()[0]

        pStart = disk.device.optimalAlignedConstraint.startAlign.alignUp(freeRegion, freeRegion.start)
        pEnd = disk.device.optimalAlignedConstraint.endAlign.alignDown(freeRegion, freeRegion.end)
        region = parted.Geometry(device=disk.device, start=pStart, end=pEnd)

        partition = parted.Partition(disk=disk, type=parted.PARTITION_NORMAL,
                                     fs=parted.FileSystem(type="fat32", geometry=region),
                                     geometry=region)

        if not disk.addPartition(partition=partition, constraint=disk.device.optimalAlignedConstraint):
            # it sucks that disk.addPartition() won't do the job of restricting region INSIDE constraint
            # so we must calculate pStart and pEnd manually beforehand
            raise Exception("failed to create %s" % (filepath))
        if not disk.commit():
            # experiments show that disk.commit() blocks until /dev is updated
            raise Exception("failed to create %s" % (filepath))

        Util.loopMount(filepath, loopFile, region.start * disk.device.sectorSize, region.length * disk.device.sectorSize)
        try:
            Util.cmdCall("mkfs.vfat", "-F", "32", "-n", label, loopFile)
            if fsUuid is not None:
                Util.fatSetFsUuid(loopFile, fsUuid)
            subprocess.check_call(["fusefat", loopFile, mntDir, "-o", "rw+"],
                                  stdout=subprocess.DEVNULL,                       # fusefat has no quiet option
                                  stderr=subprocess.DEVNULL)                       # fusefat has no quiet option
        except BaseException:
            Util.loopUnMount(loopFile)
            raise

    @staticmethod
    def gptSetPartitionLabel(devPath, label):
        diskPath, partiId = Util.devPathPartitionToDiskAndPartitionId(devPath)
        dev = parted.getDevice(diskPath)
        disk = parted.newDisk(dev)
        partition = disk.partitions[partiId - 1]
        partition.name = label
        disk.commit()

    @staticmethod
    def getBlkDevPartitionTableType(devPath):
        ret = Util.cmdCall("blkid", "-o", "export", devPath)
        m = re.search("^PTTYPE=(\\S+)$", ret, re.M)
        if m is not None:
            if m.group(1) == "gpt":
                return "gpt"
            elif m.group(1) == "dos":
                return "mbr"
            else:
                return m.group(1)
        else:
            return ""

    @staticmethod
    def syncDirs(srcList, dstDir):
        for fn in os.listdir(dstDir):
            if fn not in srcList:
                Util.forceDelete(os.path.join(dstDir, fn))
        for fn in srcList:
            fullfn = os.path.join(dstDir, fn)
            if not os.path.exists(fullfn):
                os.mkdir(fullfn)

    @staticmethod
    def strListMaxLen(strList):
        maxLen = 0
        for lname in strList:
            if len(lname) > maxLen:
                maxLen = len(lname)
        return maxLen

    @staticmethod
    def listRemoveDuplication(theList):
        ret = []
        theSet = set()
        for k in theList:
            if k not in theSet:
                ret.append(k)
                theSet.add(k)
        return ret

    @staticmethod
    def listCheckTrueFalse(theList, key=None):
        if key is None:
            def key(x):
                return x

        bAlwaysTrue = True
        for item in theList:
            ret = key(item)
            if ret:
                assert bAlwaysTrue
            else:
                bAlwaysTrue = False

    @staticmethod
    def pad(string, length):
        '''Pad a string with spaces.'''
        if len(string) <= length:
            return string + ' ' * (length - len(string))
        else:
            return string[:length - 3] + '...'

    @staticmethod
    def getDirLastUpdateTime(dirpath):
        out = Util.shellCall("find \"%s\" -printf \"%%TY%%Tm%%Td%%TH%%TM%%TS\\n\" | /bin/sort | /bin/tail -1" % (dirpath))
        out = re.search(r'^(.*)\.', out).group(1)
        return datetime.strptime(out, "%Y%m%d%H%M%S")

    @staticmethod
    def listDirWithoutKeepFiles(dirpath):
        ret = []
        for fn in os.listdir(dirpath):
            if fn.startswith(".keep"):
                continue
            ret.append((fn, os.path.join(dirpath, fn)))
        return ret

    @staticmethod
    def getLoadAvgStr():
        try:
            avg = os.getloadavg()
        except OSError:
            return 'unknown'

        max_avg = max(avg)
        if max_avg < 10:
            digits = 2
        elif max_avg < 100:
            digits = 1
        else:
            digits = 0

        return ", ".join(("%%.%df" % (digits)) % x for x in avg)

    @staticmethod
    def getOnDiskSwapSizeInGb():
        # see: https://opensource.com/article/19/2/swap-space-poll
        # we believe that as long as an on-disk swap exists, it should be ready (have enough capacity) to be used for hibernation
        # zram can always be used if our on-disk swap is not favored due to its (potentially) excessive consumption of disk space, and we think it should be understandable that hibernation is not possible in such cases
        # see also:
        #   https://wiki.manjaro.org/index.php?title=Swap
        while True:
            sz = Util.getPhysicalMemorySizeInGb()
            if sz <= 2:
                ret = sz * 3
                break
            if sz <= 8:
                ret = max(2 * 3, sz * 2)
                break
            if sz <= 64:
                ret = max(8 * 2, sz * 3 // 2)
                break
            ret = max(64 * 3 // 2, sz)
            break

        # with kswap, generally we can use 1/3 of the original size
        return ret // 3

    @staticmethod
    def getInMemorySwapSizeInMb():
        ret = Util.getPhysicalMemorySizeInGb() * 1024
        ret = ret // 10
        if ret < 2048:
            ret = min(ret, 2048)
        return ret

    @staticmethod
    def formatSize(value):
        # value is in bytes
        if value > 1024 * 1024 * 1024 * 1024:
            return "%.1fTiB" % (value / 1024 / 1024 / 1024 / 1024)
        elif value > 1024 * 1024 * 1024:
            return "%.1fGiB" % (value / 1024 / 1024 / 1024)
        elif value > 1024 * 1024:
            return "%.1fMiB" % (value / 1024 / 1024)
        elif value > 1024:
            return "%.1fKiB" % (value / 1024)
        else:
            assert False

    @staticmethod
    def formatFlops(value):
        # value is in gflops
        if value > 1024:
            return "%.1fTFLOPs" % (value / 1024)
        else:
            return "%.1fGFLOPs" % (value)

    @staticmethod
    def getPhysicalMemorySizeInGb():
        with open("/proc/meminfo", "r") as f:
            # We return memory size in GB.
            # Since the memory size shown in /proc/meminfo is always a
            # little less than the real size because various sort of
            # reservation, so we do a "+1"
            m = re.search("^MemTotal:\\s+(\\d+)", f.read())
            return int(m.group(1)) // 1024 // 1024 + 1

    @staticmethod
    def findBackendGraphicsDevices():
        ret = []
        context = pyudev.Context()
        for device in context.list_devices(subsystem='drm'):
            if "uaccess" in device.tags:
                continue
            if re.fullmatch("card[0-9]+", device.sys_name) is None:
                continue
            assert device.device_node is not None
            ret.append(device.device_node)
        return ret

    @staticmethod
    def getVendorIdAndDeviceIdByDevNode(path):
        # FIXME:
        # 1. should not udev, we can get sysfs directory major and minor id
        # 2. some device don't have "device" directory in sysfs (why)
        # 3. maybe we should raise Exceptionn when failure
        context = pyudev.Context()
        for device in context.list_devices():
            if device.device_node == path:
                fn1 = os.path.join(device.sys_path, "device", "vendor")
                fn2 = os.path.join(device.sys_path, "device", "device")
                return (int(pathlib.Path(fn1).read_text(), 16), int(pathlib.Path(fn2).read_text(), 16))
        return None

    @staticmethod
    def testZipFile(filename):
        with zipfile.ZipFile(filename, 'r', zipfile.ZIP_DEFLATED) as z:
            return (z.testzip() is None)

    @staticmethod
    def expandRsyncPatternToParentDirectories(pattern):
        ret = [pattern]
        m = re.fullmatch("(.*)/(\\*+)?", pattern)
        if m is not None:
            pattern = m.group(1)
        pattern = os.path.dirname(pattern)
        while pattern not in ["", "/"]:
            ret.append(pattern)
            pattern = os.path.dirname(pattern)
        return reversed(ret)

    @staticmethod
    def getPhysicalMemorySize():
        with open("/proc/meminfo", "r") as f:
            # We return memory size in GB.
            # Since the memory size shown in /proc/meminfo is always a
            # little less than the real size because various sort of
            # reservation, so we do a "+1"
            m = re.search("^MemTotal:\\s+(\\d+)", f.read())
            return int(m.group(1)) // 1024 // 1024 + 1

    @staticmethod
    def md5hash(s):
        return hashlib.md5(s.encode('utf-8')).hexdigest()

    @staticmethod
    def terminal_width():
        '''Determine width of terminal window.'''
        try:
            width = int(os.environ['COLUMNS'])
            if width > 0:
                return width
        except:
            pass
        try:
            query = struct.pack('HHHH', 0, 0, 0, 0)
            response = fcntl.ioctl(1, termios.TIOCGWINSZ, query)
            width = struct.unpack('HHHH', response)[1]
            if width > 0:
                return width
        except:
            pass
        return 80

    @staticmethod
    def realPathSplit(path):
        """os.path.split() only split a path into 2 component, I believe there are reasons, but it is really inconvenient.
           So I write this function to split a unix path into basic components.
           For example: /home/abc/bcd/def -> ["/", "home", "abc", "bcd", "def"]"""

        folders = []
        while True:
            path, folder = os.path.split(path)
            if folder != "":
                folders.append(folder)
            else:
                if path != "":
                    folders.append(path)
                break
        folders.reverse()
        return folders

    @staticmethod
    def devPathIsDiskOrPartition(devPath):
        if re.fullmatch("/dev/sd[a-z]", devPath) is not None:
            return True
        if re.fullmatch("(/dev/sd[a-z])([0-9]+)", devPath) is not None:
            return False
        if re.fullmatch("/dev/xvd[a-z]", devPath) is not None:
            return True
        if re.fullmatch("(/dev/xvd[a-z])([0-9]+)", devPath) is not None:
            return False
        if re.fullmatch("/dev/vd[a-z]", devPath) is not None:
            return True
        if re.fullmatch("(/dev/vd[a-z])([0-9]+)", devPath) is not None:
            return False
        if re.fullmatch("/dev/nvme[0-9]+n[0-9]+", devPath) is not None:
            return True
        if re.fullmatch("(/dev/nvme[0-9]+n[0-9]+)p([0-9]+)", devPath) is not None:
            return False
        assert False

    @staticmethod
    def devPathPartitionToDiskAndPartitionId(partitionDevPath):
        m = re.fullmatch("(/dev/sd[a-z])([0-9]+)", partitionDevPath)
        if m is not None:
            return (m.group(1), int(m.group(2)))
        m = re.fullmatch("(/dev/xvd[a-z])([0-9]+)", partitionDevPath)
        if m is not None:
            return (m.group(1), int(m.group(2)))
        m = re.fullmatch("(/dev/vd[a-z])([0-9]+)", partitionDevPath)
        if m is not None:
            return (m.group(1), int(m.group(2)))
        m = re.fullmatch("(/dev/nvme[0-9]+n[0-9]+)p([0-9]+)", partitionDevPath)
        if m is not None:
            return (m.group(1), int(m.group(2)))
        assert False

    @staticmethod
    def devPathPartitionToDisk(partitionDevPath):
        return Util.devPathPartitionToDiskAndPartitionId(partitionDevPath)[0]

    @staticmethod
    def devPathDiskOrPartitionToDisk(diskOrPartitionDevPath):
        if Util.devPathIsDiskOrPartition(diskOrPartitionDevPath):
            return diskOrPartitionDevPath
        else:
            return Util.devPathPartitionToDiskAndPartitionId(diskOrPartitionDevPath)[0]

    @staticmethod
    def devPathDiskToPartition(diskDevPath, partitionId):
        m = re.fullmatch("/dev/sd[a-z]", diskDevPath)
        if m is not None:
            return diskDevPath + str(partitionId)
        m = re.fullmatch("/dev/xvd[a-z]", diskDevPath)
        if m is not None:
            return diskDevPath + str(partitionId)
        m = re.fullmatch("/dev/vd[a-z]", diskDevPath)
        if m is not None:
            return diskDevPath + str(partitionId)
        m = re.fullmatch("/dev/nvme[0-9]+n[0-9]+", diskDevPath)
        if m is not None:
            return diskDevPath + "p" + str(partitionId)
        assert False

    @staticmethod
    def isBlkDevUsbStick(devPath):
        devName = os.path.basename(devPath)

        remfile = "/sys/block/%s/removable" % (devName)
        if not os.path.exists(remfile):
            return False
        if not Util.str01ToBool(pathlib.Path(remfile).read_text().rstrip("\n")):
            return False

        ueventFile = "/sys/block/%s/device/uevent" % (devName)
        if "DRIVER=sd" not in pathlib.Path(ueventFile).read_text().split("\n"):
            return False

        return True

    @staticmethod
    def getBlkDevModel(devPath):
        ret = Util.cmdCall("lsblk", "-o", "MODEL", "-n", devPath)
        ret = ret.strip("\r\n")
        if ret == "":
            return "unknown"
        else:
            return ret

    @staticmethod
    def getBlkDevSize(devPath):
        out = Util.cmdCall("blockdev", "--getsz", devPath)
        return int(out) * 512        # unit is byte

    @staticmethod
    def getBlkDevUuid(devPath):
        """UUID is also called FS-UUID, PARTUUID is another thing"""

        ret = Util.cmdCall("blkid", devPath)
        m = re.search("UUID=\"(\\S*)\"", ret, re.M)
        if m is not None:
            return m.group(1)
        else:
            return ""

    @staticmethod
    def getBlkDevFsType(devPath):
        ret = Util.cmdCall("blkid", "-o", "export", devPath)
        m = re.search("^TYPE=(\\S+)$", ret, re.M)
        if m is not None:
            return m.group(1).lower()
        else:
            return ""

    @staticmethod
    def getBlkDevHardwareWriteCacheStatus(diskDevPath):
        m = re.fullmatch("/dev/sd[a-z]", diskDevPath)
        if m is not None:
            ret = Util.cmdCall("hdparm", "-W", diskDevPath)
            if "(on)" in ret:
                return True
            elif "(off)" in ret:
                return False
            else:
                assert False

        m = re.fullmatch("/dev/nvme[0-9]+n[0-9]+", diskDevPath)
        if m is not None:
            ret = Util.cmdCall("nvme", "get-feature", "-f", "6", diskDevPath)
            m = re.search("Current value:0x0000000(0|1)", ret)
            return (m.group(1) == "1")

    @staticmethod
    def setBlkDevHardwareWriteCacheStatus(diskDevPath, value):
        m = re.fullmatch("/dev/sd[a-z]", diskDevPath)
        if m is not None:
            Util.cmdCall("hdparm", "-W", "1" if value else "0", diskDevPath)
            return

        m = re.fullmatch("/dev/nvme[0-9]+n[0-9]+", diskDevPath)
        if m is not None:
            # FIXME
            assert False

    @staticmethod
    def getHostArch():
        # Code copied from linux kernel Makefile:
        #   /usr/bin/uname -m | /bin/sed -e s/i.86/i386/ -e s/sun4u/sparc64/
        #                                -e s/arm.*/arm/ -e s/sa110/arm/
        #                                -e s/s390x/s390/ -e s/parisc64/parisc/
        #                                -e s/ppc.*/powerpc/ -e s/mips.*/mips/
        #                                -e s/sh.*/sh/
        ret = platform.machine()
        ret = re.sub("i.86", "i386", ret)
        ret = re.sub("sun4u", "sparc64", ret)
        ret = re.sub("arm.*", "arm", ret)
        ret = re.sub("sall0", "arm", ret)
        ret = re.sub("s390x", "s390", ret)
        ret = re.sub("paris64", "parisc", ret)
        ret = re.sub("ppc.*", "powerpc", ret)
        ret = re.sub("mips.*", "mips", ret)
        ret = re.sub("sh.*", "sh", ret)
        return ret

    @staticmethod
    def isTwoDirSame(dir1, dir2):
        # FIXME: we could use python to do this
        return Util.cmdCallWithRetCode("diff", "-r", dir1, dir2)[0] == 0

    @staticmethod
    def touchFile(filename):
        assert not os.path.exists(filename)
        f = open(filename, 'w')
        f.close()

    @staticmethod
    def compareVersion(verstr1, verstr2):
        """eg: 3.9.11-gentoo-r1 or 3.10.7-gentoo"""

        partList1 = verstr1.split("-")
        partList2 = verstr2.split("-")

        verList1 = partList1[0].split(".")
        verList2 = partList2[0].split(".")

        if len(verList1) == 3 and len(verList2) == 3:
            ver1 = int(verList1[0]) * 10000 + int(verList1[1]) * 100 + int(verList1[2])
            ver2 = int(verList2[0]) * 10000 + int(verList2[1]) * 100 + int(verList2[2])
        elif len(verList1) == 2 and len(verList2) == 2:
            ver1 = int(verList1[0]) * 100 + int(verList1[1])
            ver2 = int(verList2[0]) * 100 + int(verList2[1])
        elif len(verList1) == 1 and len(verList2) == 1:
            ver1 = int(verList1[0])
            ver2 = int(verList2[0])
        else:
            assert False

        if ver1 > ver2:
            return 1
        elif ver1 < ver2:
            return -1

        if len(partList1) >= 2 and len(partList2) == 1:
            return 1
        elif len(partList1) == 1 and len(partList2) >= 2:
            return -1

        p1 = "-".join(partList1[1:])
        p2 = "-".join(partList2[1:])
        if p1 > p2:
            return 1
        elif p1 < p2:
            return -1

        return 0

    @staticmethod
    def removeDirContentExclude(dirPath, excludeList):
        for fn in os.listdir(dirPath):
            if fn not in excludeList:
                Util.forceDelete(os.path.join(dirPath, fn))

    @staticmethod
    def removeEmptyDir(dirname):
        if len(os.listdir(dirname)) == 0:
            os.rmdir(dirname)

    @staticmethod
    def isCfgFileReallyNotEmpty(filename):
        with open(filename, "r") as f:
            for line in f.read().split("\n"):
                if line.strip() == "":
                    continue
                if line.startswith("#"):
                    continue
                return True
        return False

    @staticmethod
    def ensureAncesterDir(filename):
        assert os.path.isabs(filename)

        splist = []
        while True:
            filename, bf = os.path.split(filename)
            if bf == "":
                break
            splist.insert(0, bf)

        curd = "/"
        for d in splist[:-1]:
            curd = os.path.join(curd, d)
            if not os.path.isdir(curd):
                os.mkdir(curd)

    @staticmethod
    def getDirFreeSpace(dirname):
        """Returns free space in MB"""

        ret = Util.cmdCall("df", "-m", dirname)
        m = re.search("^.* + [0-9]+ +[0-9]+ +([0-9]+) + [0-9]+% .*$", ret, re.M)
        return int(m.group(1))

    @staticmethod
    def getMountDeviceForPath(pathname):
        buf = Util.cmdCall("mount")
        for line in buf.split("\n"):
            m = re.search("^(.*) on (.*) type ", line)
            if m is not None and m.group(2) == pathname:
                return m.group(1)
        return None

    @staticmethod
    def isMountPoint(pathname):
        return Util.getMountDeviceForPath(pathname) is not None

    @staticmethod
    def isDirAncestor(path1, path2):
        """check if path2 is the ancestor of path1"""
        return path1.startswith(path2 + "/")

    @staticmethod
    def getHomeDir(userName):
        if userName == "root":
            return "/root"
        else:
            return os.path.join("/home", userName)

    @staticmethod
    def cmdCall(cmd, *kargs):
        # call command to execute backstage job
        #
        # scenario 1, process group receives SIGTERM, SIGINT and SIGHUP:
        #   * callee must auto-terminate, and cause no side-effect
        #   * caller must be terminated by signal, not by detecting child-process failure
        # scenario 2, caller receives SIGTERM, SIGINT, SIGHUP:
        #   * caller is terminated by signal, and NOT notify callee
        #   * callee must auto-terminate, and cause no side-effect, after caller is terminated
        # scenario 3, callee receives SIGTERM, SIGINT, SIGHUP:
        #   * caller detects child-process failure and do appopriate treatment

        ret = subprocess.run([cmd] + list(kargs),
                             stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                             text=True)
        if ret.returncode > 128:
            # for scenario 1, caller's signal handler has the oppotunity to get executed during sleep
            time.sleep(1.0)
        if ret.returncode != 0:
            print(ret.stdout)
            ret.check_returncode()
        return ret.stdout.rstrip()

    @staticmethod
    def cmdCallWithRetCode(cmd, *kargs):
        ret = subprocess.run([cmd] + list(kargs),
                             stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                             text=True)
        if ret.returncode > 128:
            time.sleep(1.0)
        return (ret.returncode, ret.stdout.rstrip())

    @staticmethod
    def cmdCallWithInput(cmd, inStr, *kargs):
        ret = subprocess.run([cmd] + list(kargs),
                             stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                             input=inStr, text=True)
        if ret.returncode > 128:
            time.sleep(1.0)
        if ret.returncode != 0:
            print(ret.stdout)
            ret.check_returncode()
        return ret.stdout.rstrip()

    @staticmethod
    def cmdCallTestSuccess(cmd, *kargs):
        ret = subprocess.run([cmd] + list(kargs),
                             stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                             text=True)
        if ret.returncode > 128:
            time.sleep(1.0)
        return (ret.returncode == 0)

    @staticmethod
    def shellCall(cmd):
        # call command with shell to execute backstage job
        # scenarios are the same as Util.cmdCall

        ret = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                             shell=True, text=True)
        if ret.returncode > 128:
            # for scenario 1, caller's signal handler has the oppotunity to get executed during sleep
            time.sleep(1.0)
        if ret.returncode != 0:
            print(ret.stdout)
            ret.check_returncode()
        return ret.stdout.rstrip()

    @staticmethod
    def shellCallWithRetCode(cmd):
        ret = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                             shell=True, text=True)
        if ret.returncode > 128:
            time.sleep(1.0)
        return (ret.returncode, ret.stdout.rstrip())

    @staticmethod
    def cmdExec(cmd, *kargs):
        # call command to execute frontend job
        #
        # scenario 1, process group receives SIGTERM, SIGINT and SIGHUP:
        #   * callee must auto-terminate, and cause no side-effect
        #   * caller must be terminate AFTER child-process, and do neccessary finalization
        #   * termination information should be printed by callee, not caller
        # scenario 2, caller receives SIGTERM, SIGINT, SIGHUP:
        #   * caller should terminate callee, wait callee to stop, do neccessary finalization, print termination information, and be terminated by signal
        #   * callee does not need to treat this scenario specially
        # scenario 3, callee receives SIGTERM, SIGINT, SIGHUP:
        #   * caller detects child-process failure and do appopriate treatment
        #   * callee should print termination information

        # FIXME, the above condition is not met, Util.shellExec has the same problem

        ret = subprocess.run([cmd] + list(kargs), text=True)
        if ret.returncode > 128:
            time.sleep(1.0)
        ret.check_returncode()

    @staticmethod
    def shellExec(cmd):
        ret = subprocess.run(cmd, shell=True, text=True)
        if ret.returncode > 128:
            time.sleep(1.0)
        ret.check_returncode()

    @staticmethod
    def getFreeTcpPort(start_port=10000, end_port=65536):
        for port in range(start_port, end_port):
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            try:
                s.bind((('', port)))
                return port
            except socket.error:
                continue
            finally:
                s.close()
        raise Exception("No valid tcp port in [%d,%d]." % (start_port, end_port))

    @staticmethod
    def waitTcpService(ip, port):
        ip = ip.replace(".", "\\.")
        while True:
            out = Util.cmdCall("netstat", "-lant")
            m = re.search("tcp +[0-9]+ +[0-9]+ +(%s:%d) +.*" % (ip, port), out)
            if m is not None:
                return
            time.sleep(1.0)

    @staticmethod
    def newBuffer(ch, li):
        ret = bytearray()
        i = 0
        while i < li:
            ret.append(ch)
            i += 1
        return bytes(ret)

    @staticmethod
    def getMakeConfVar(makeConfFile, varName):
        """Returns variable value, returns "" when not found
           Multiline variable definition is not supported yet"""

        buf = ""
        with open(makeConfFile, 'r') as f:
            buf = f.read()

        m = re.search("^%s=\"(.*)\"$" % (varName), buf, re.MULTILINE)
        if m is None:
            return ""
        varVal = m.group(1)

        while True:
            m = re.search("\\${(\\S+)?}", varVal)
            if m is None:
                break
            varName2 = m.group(1)
            varVal2 = Util.getMakeConfVar(makeConfFile, varName2)
            if varVal2 is None:
                varVal2 = ""

            varVal = varVal.replace(m.group(0), varVal2)

        return varVal

    @staticmethod
    def genSelfSignedCertAndKey(cn, keysize):
        k = crypto.PKey()
        k.generate_key(crypto.TYPE_RSA, keysize)

        cert = crypto.X509()
        cert.get_subject().CN = cn
        cert.set_serial_number(random.randint(0, 65535))
        cert.gmtime_adj_notBefore(100 * 365 * 24 * 60 * 60 * -1)
        cert.gmtime_adj_notAfter(100 * 365 * 24 * 60 * 60)
        cert.set_issuer(cert.get_subject())
        cert.set_pubkey(k)
        cert.sign(k, 'sha1')

        return (cert, k)

    @staticmethod
    def dumpCertAndKey(cert, key, certFile, keyFile):
        with open(certFile, "wb") as f:
            buf = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
            f.write(buf)
            os.fchmod(f.fileno(), 0o644)

        with open(keyFile, "wb") as f:
            buf = crypto.dump_privatekey(crypto.FILETYPE_PEM, key)
            f.write(buf)
            os.fchmod(f.fileno(), 0o600)

    @staticmethod
    def getCpuArch():
        ret = platform.machine()
        if ret == "x86_64":
            return "amd64"
        else:
            return ret

    @staticmethod
    def getCpuModel():
        return Util.cmdCall("uname", "-p")

    @staticmethod
    def repoIsSysFile(fbasename):
        """fbasename value is like "sys-devel", "sys-devel/gcc", "profiles", etc"""

        if fbasename.startswith("."):
            return True
        if fbasename == "licenses" or fbasename.startswith("licenses/"):
            return True
        if fbasename == "metadata" or fbasename.startswith("metadata/"):
            return True
        if fbasename == "profiles" or fbasename.startswith("profiles/"):
            return True
        if fbasename == "eclass" or fbasename.startswith("eclass/"):
            return True
        return False

    @staticmethod
    def repoGetCategoryDirList(repoDir):
        ret = Util.getFileList(repoDir, 1, "d")
        ret = [x for x in ret if not Util.repoIsSysFile(x)]
        return ret

    @staticmethod
    def repoGetEbuildDirList(repoDir):
        ret = Util.getFileList(repoDir, 2, "d")
        ret = [x for x in ret if not Util.repoIsSysFile(x)]
        return ret

    @staticmethod
    def repoRemovePackageAndCategory(repoDir, pkgName):
        ebuildDir = os.path.join(repoDir, pkgName)
        shutil.rmtree(ebuildDir)
        categoryDir = os.path.dirname(ebuildDir)
        if os.listdir(categoryDir) == []:
            os.rmdir(categoryDir)

    @staticmethod
    def repoGetMasters(repoDir):
        buf = pathlib.Path(os.path.join(repoDir, "metadata", "layout.conf")).read_text()
        m = re.search(r"^\s*masters\s*=\s*(.*)\s*$", buf, re.M)
        if m is None:
            return []
        else:
            ret = m.group(1)
            ret = ret.split(" ")
            ret = [x for x in ret if x != ""]
            return ret

    @staticmethod
    def repoGetRepoName(repoDir):
        layoutFn = os.path.join(repoDir, "metadata", "layout.conf")
        if os.path.exists(layoutFn):
            m = re.search("repo-name = (\\S+)", pathlib.Path(layoutFn).read_text(), re.M)
            if m is not None:
                return m.group(1)

        repoNameFn = os.path.join(repoDir, "profiles", "repo_name")
        if os.path.exists(repoNameFn):
            ret = pathlib.Path(repoNameFn).read_text()
            ret = ret.rstrip("\n").rstrip()
            ret = ret.replace(" ", "-")                         # it seems this translation is neccessary
            return ret

        # fatal error: can not get repoName
        return None

    @staticmethod
    def repoGetThinManifests(repoDir):
        buf = pathlib.Path(os.path.join(repoDir, "metadata", "layout.conf")).read_text()
        m = re.search(r"^\s*thin-manifests\s*=\s*true\s*$", buf, re.M)
        return m is not None

    @staticmethod
    def repoSetThinManifests(repoDir, value):
        # we only support set to True
        assert value

        buf = pathlib.Path(os.path.join(repoDir, "metadata", "layout.conf")).read_text()
        m = re.search(r"^\s*thin-manifests\s*=\s*(.*)\s*$", buf, re.M)
        if m is not None:
            if m.group(1) == "true":
                return
            buf = buf.replace(m.group(0), "thin-manifests = true")
        else:
            buf += "\nthin-manifests = true\n"
        pathlib.Path(os.path.join(repoDir, "metadata", "layout.conf")).write_text(buf)

    @staticmethod
    def wgetSpider(url):
        return Util.cmdCallTestSuccess("wget", "--spider", url)

    @staticmethod
    def wgetDownload(url, localFile=None, bUseTmpFile=False):
        if localFile is None:
            assert not bUseTmpFile
            Util.cmdExec("wget", "-q", "--show-progress", url)
        else:
            tmpFile = localFile + ".__download__" if bUseTmpFile else localFile
            Util.cmdExec("wget", "-q", "--show-progress", "-O", tmpFile, url)
            if bUseTmpFile:
                os.rename(tmpFile, localFile)

    @staticmethod
    def downloadIfNewer(url, fullfn):
        if os.path.exists(fullfn):
            for resp in Util.robustUrlOpen(urllib.request.Request(url, method="HEAD")):
                remoteTm = datetime.strptime(resp.headers["Last-Modified"], "%a, %d %b %Y %H:%M:%S %Z")
                localTm = datetime.utcfromtimestamp(os.path.getmtime(fullfn))
                if remoteTm <= localTm:
                    return localTm
        for resp in Util.robustUrlOpen(url):
            pathlib.Path(fullfn).write_text(resp.read())
            remoteTm = datetime.strptime(resp.headers["Last-Modified"], "%a, %d %b %Y %H:%M:%S %Z")
            os.utime(fullfn, (remoteTm.timestamp(), remoteTm.timestamp()))
            return remoteTm

    @staticmethod
    def udevIsPureUaccessRuleFile(filepath):
        if not os.path.basename(filepath).startswith("72-"):
            return False

        lineList = [x.strip() for x in pathlib.Path(filepath).read_text().split("\n")]

        # find and check first line
        firstLineNo = -1
        firstLineTagName = None
        for i in range(0, len(lineList)):
            line = lineList[i]
            if line != "" and not line.startswith("#"):
                firstLineNo = i
                m = re.fullmatch('ACTION=="remove", GOTO="(.*)_end"', line)
                if m is not None:
                    firstLineTagName = m.group(1)
                break
        if firstLineNo == -1:
            return False
        if firstLineTagName is None:
            return False

        # find and check last line
        lastLineNo = -1
        for i in reversed(range(firstLineNo + 1, len(lineList))):
            line = lineList[i]
            if line != "" and not line.startswith("#"):
                if re.fullmatch('LABEL="%s_end"' % (firstLineTagName), line) is not None:
                    lastLineNo = i
                break
        if lastLineNo == -1:
            return False

        # check middle lines
        pat = '.*, TAG-="uaccess", TAG-="seat", TAG-="master-of-seat", ENV{ID_SEAT}="", ENV{ID_AUTOSEAT}="", ENV{ID_FOR_SEAT}=""'
        for i in range(firstLineNo + 1, lastLineNo):
            line = lineList[i]
            if line != "" and not line.startswith("#"):
                if re.fullmatch(pat, line) is None:
                    return False

        return True

    @staticmethod
    def portageIsPkgInstalled(pkgName):
        """pkgName can be package-name or package-atom"""

        vartree = portage.db[portage.root]["vartree"]
        varCpvList = vartree.dbapi.match(pkgName)
        return len(varCpvList) != 0

    @staticmethod
    def portageIsPkgInstallable(pkgName):
        """pkgName can be package-name or package-atom"""

        porttree = portage.db[portage.root]["porttree"]
        cpvList = porttree.dbapi.match(pkgName)
        return len(cpvList) > 0

    @staticmethod
    def portageGetInstalledPkgAtomList(portageDbDir):
        pkgAtomList = []
        for fbasename in sorted(Util.getFileList(portageDbDir, 2, "d")):
            if Util.repoIsSysFile(fbasename):
                continue
            if fbasename.split("/")[1].startswith("-MERGING"):
                continue
            pkgAtomList.append(fbasename)
        return pkgAtomList

    @staticmethod
    def portageGetInstalledFileSet(expanded=False):
        fileSet = set()

        if True:
            cmdStr = r"cat /var/db/pkg/*/*/CONTENTS "
            cmdStr += r'| sed -e "s:^obj \(.*\) [[:xdigit:]]\+ [[:digit:]]\+$:\1:" '
            cmdStr += r'| sed -e "s:^sym \(.*\) -> .* .*$:\1:" '
            cmdStr += r'| sed -e "s:^dir \(.*\)$:\1:" '
            ret = Util.shellCall(cmdStr)
            fileSet = set(ret.split("\n"))

        if expanded:
            # deal with .keep
            nret = set()
            for f in fileSet:
                if os.path.isdir(f) and os.path.exists(os.path.join(f, ".keep")):
                    nret.add(os.path.join(f, ".keep"))
            fileSet |= nret

            # deal with *.py
            nret = set()
            for f in fileSet:
                if f.endswith(".py"):
                    if os.path.exists(f + "c"):
                        nret.add(f + "c")
                    if os.path.exists(f + "o"):
                        nret.add(f + "o")
            fileSet |= nret

            # deal with __pycache__
            for dn in ["/usr/lib", "/usr/lib64", "/usr/libexec"]:
                ret = Util.cmdCall("find", dn, "-regex", r'.*/__pycache__\(/.*\)?')
                fileSet |= set(ret.split("\n"))

            # deal with directory symlink
            nret = set()
            for f in fileSet:
                f2 = os.path.join(os.path.realpath(os.path.dirname(f)), os.path.basename(f))
                if f2 != f:
                    nret.add(f2)
            fileSet |= nret

        return fileSet

    @staticmethod
    def portageGetPkgEnabledUseFlags(pkgAtom):
        vartree = portage.db[portage.root]["vartree"]
        cpv_list = vartree.dbapi.match(pkgAtom)
        latest_cpv = cpv_list[0]
        return vartree.dbapi.aux_get(latest_cpv, ["USE"])[0].split()

    @staticmethod
    def portageReadRepoThirdPartyMirrors(repoDir):
        mirrorDict = dict()
        for line in pathlib.Path(os.path.join(repoDir, "profiles", "thirdpartymirrors")).read_text().split("\n"):
            if line != "" and not line.startswith("#"):
                items = line.strip().split(" ")
                mirrorDict[items[0]] = []
                for i in items[1:]:
                    if i != "":
                        mirrorDict[items[0]].append(i)
        return mirrorDict

    @staticmethod
    def portageParseVarDbPkgContentFile(filename):
        # portage must be patched
        #
        # returns [(type, path, XXX)]
        #   when type == "dir", XXX is permission, owner, group
        #   when type == "obj", XXX is md5sum, permission, owner, group
        #   when type == "sym", XXX is target, owner, group

        ret = []
        with open(filename, "r", encoding="UTF-8") as f:
            for line in f.readlines():
                elem_list = line.strip().split()
                if elem_list[0] == "dir":
                    item = ("dir", " ".join(elem_list[1:-3]), int(elem_list[-3], 8), int(elem_list[-2]), int(elem_list[-1]))
                    ret.append(item)
                elif elem_list[0] == "obj":
                    item = ("obj", " ".join(elem_list[1:-5]), elem_list[-5], int(elem_list[-3], 8), int(elem_list[-2]), int(elem_list[-1]))
                    ret.append(item)
                elif elem_list[0] == "sym":
                    middle_list = " ".join(elem_list[1:-3]).split(" -> ")
                    assert len(middle_list) == 2
                    item = ("sym", middle_list[0], middle_list[1], int(elem_list[-2]), int(elem_list[-1]))
                    ret.append(item)
                else:
                    assert False
        return ret

    @staticmethod
    def portageDetectJobCountAndLoadAverage(machine):
        # determine parallelism parameters
        # note: when doing emerge by fpemud-os-sysman, there's other additional filter mechanisms
        if not Util.hwcfgHasFan(machine):
            jobcount = 1
            loadavg = 1
        else:
            jobcount = multiprocessing.cpu_count() + 2
            loadavg = max(1, multiprocessing.cpu_count())

        return (jobcount, loadavg)

    @staticmethod
    def isTrivalFileOrDir(filename):
        if os.path.islink(filename):
            return False
        if stat.S_ISCHR(os.stat(filename).st_mode):
            return False
        if stat.S_ISBLK(os.stat(filename).st_mode):
            return False
        if stat.S_ISFIFO(os.stat(filename).st_mode):
            return False
        if stat.S_ISSOCK(os.stat(filename).st_mode):
            return False
        return True

    @staticmethod
    def getFileList(dirName, level, typeList):
        """typeList is a string, value range is "d,f,l,a"
           returns basename"""

        ret = []
        for fbasename in os.listdir(dirName):
            fname = os.path.join(dirName, fbasename)

            if os.path.isdir(fname) and level - 1 > 0:
                for i in Util.getFileList(fname, level - 1, typeList):
                    ret.append(os.path.join(fbasename, i))
                continue

            appended = False
            if not appended and ("a" in typeList or "d" in typeList) and os.path.isdir(fname):         # directory
                ret.append(fbasename)
            if not appended and ("a" in typeList or "f" in typeList) and os.path.isfile(fname):        # file
                ret.append(fbasename)
            if not appended and ("a" in typeList or "l" in typeList) and os.path.islink(fname):        # soft-link
                ret.append(fbasename)

        return ret

    @staticmethod
    def listLeafDirs(dirName):
        ret = []

        dirName = os.path.abspath(dirName)
        if dirName == "/":
            prefixLen = 1
        else:
            prefixLen = len(dirName) + 1

        for root, dirs, files in os.walk(dirName):
            if root == dirName:
                continue
            if len(dirs) == 0:
                ret.append(root[prefixLen:])

        return ret

    @staticmethod
    def hashDir(dirname):
        h = hashlib.sha1()
        for root, dirs, files in os.walk(dirname):
            for filepath in files:
                with open(os.path.join(root, filepath), "rb") as f1:
                    buf = f1.read(4096)
                    while buf != b'':
                        h.update(hashlib.sha1(buf).digest())
                        buf = f1.read(4096)
        return h.hexdigest()

    @staticmethod
    def readListFile(filename):
        ret = []
        with open(filename, "r") as f:
            for line in f.read().split("\n"):
                line = line.strip()
                if line != "" and not line.startswith("#"):
                    ret.append(line)
        return ret

    @staticmethod
    def isDomainNamePrivate(domainName):
        tldList = [".intranet", ".internal", ".private", ".corp", ".home", ".lan"]    # from RFC6762
        tldList.append(".local")
        return any(domainName.endswith(x) for x in tldList)

    @staticmethod
    def tryPrivateDomainName(domainName):
        # return True: the private domain name is accessabile
        # return False: the private domain name is not accessabile after some test

        assert Util.isDomainNamePrivate(domainName)

        while True:
            try:
                socket.gethostbyname(domainName)
                return True
            except socket.gaierror as e:
                if e.errno == -2:           # Name or service not known
                    return False
                elif e.errno == -3:         # Temporary failure in name resolution
                    pass
                elif e.errno == -5:         # No address associated with hostname
                    return False
                else:
                    raise
                sys.stderr.write(e.strerror)
                time.sleep(1.0)

    @staticmethod
    def isUrlPrivate(url):
        ret = urllib.parse.urlparse(url)
        if ret.scheme == "mirror":
            return False
        return Util.isDomainNamePrivate(ret.hostname)

    @staticmethod
    def tryPrivateUrl(url):
        # return True: the private URL is accessabile
        # return False: the private URL is not accessabile after some test

        assert Util.isUrlPrivate(url)

        domainName = urllib.parse.urlparse(url).hostname
        return Util.tryPrivateDomainName(domainName)

    @staticmethod
    def gitConfigGet(dirName, name):
        # FIXME: should change to use libgit2
        gitDir = os.path.join(dirName, ".git")
        cmdStr = "GIT_PAGER=cat git --git-dir=\"%s\" --work-tree=\"%s\" config --get-all %s" % (gitDir, dirName, name)
        ret = subprocess.check_output(cmdStr, shell=True, text=True).rstrip("\n")

        if name in ["remote.origin.fetch"]:
            return ret.split("\n")

        assert "\n" not in ret
        return ret

    @classmethod
    def gitConfigCompare(cls, dirName, name, value):
        if name == "remote.origin.fetch":
            if not isinstance(value, list):
                value = [value]

            tlist = cls.gitConfigGet(dirName, name)
            for item in value:
                idx = tlist.index(item)
                if idx < 0:
                    return False
                tlist.pop(idx)

            if len(tlist) == 0:
                return True
            elif len(tlist) == 1:
                # it is the default value git auto adds
                # yes it is for repositories that has only one remote, and we only use this kind of repositories
                return tlist[0] == "+refs/heads/*:refs/remotes/origin/*"
            else:
                return False

        if name == "remote.origin.url":
            if not isinstance(value, list):
                value = [value]
            return cls.gitConfigGet(dirName, name) in value

        return cls.gitConfigGet(dirName, name) == value

    @classmethod
    def gitClean(cls, dest_directory):
        for fullfn in glob.glob(os.path.join(dest_directory, ".git", "**", "*.lock"), recursive=True):
            # FIXME:
            # should detect if this lock file is currently occupied by a git process?
            # git simply creates a plain file, or creates a real lock file?
            cls.forceDelete(fullfn)
        subprocess.check_call(["/usr/bin/git", "-C", dest_directory, "reset", "--hard"])  # revert any modifications
        subprocess.check_call(["/usr/bin/git", "-C", dest_directory, "clean", "-xfd"])    # delete untracked files

    @classmethod
    def gitPull(cls, dest_directory, reclone_on_failure=False, clone_url=None, clone_config=None, depth=None, quiet=False):
        recoverableError = {
            "fatal: unknown index entry format": "match",
            "fatal: refusing to merge unrelated histories": "match",
            "fatal: Updating an unborn branch with changes added to the index.": "match",
            "error: .* did not send all necessary objects": "regex-match",
            "fatal: .* is corrupt": "regex-match",
        }

        if reclone_on_failure:
            assert clone_url is not None
        else:
            assert clone_url is None

        if quiet:
            quietArg = "-q"
        elif sys.stderr.isatty():
            quietArg = "--progress"    # Util.shellExec() use pipe to do advanced process, we add "--progress" so that progress can still be displayed
        else:
            quietArg = ""

        mode = "pull"
        while reclone_on_failure:
            if not os.path.exists(dest_directory):
                mode = "clone"
                break
            if not os.path.isdir(os.path.join(dest_directory, ".git")):
                mode = "clone"
                break
            if not cls.gitConfigCompare(dest_directory, "remote.origin.url", clone_url):
                mode = "clone"
                break
            if clone_config is not None:
                for k, v in clone_config.items():
                    if not cls.gitConfigCompare(dest_directory, k, v):
                        mode = "clone"
                        break
            break

        if depth is not None:
            depthArg = "--depth %d" % (depth)
        else:
            depthArg = ""

        while True:
            if mode == "pull":
                try:
                    cls.gitClean(dest_directory)
                except BaseException:
                    if not reclone_on_failure:
                        raise
                    mode = "clone"
                    continue

                try:
                    cmd = "git -C \"%s\" pull --rebase --no-stat %s %s" % (dest_directory, quietArg, depthArg)
                    subprocess.check_call(cmd, shell=True)
                    break
                except subprocess.CalledProcessError as e:
                    bFound = False
                    for recoverableErrorStr, ret in recoverableError.items():
                        if ret == "match":
                            if recoverableErrorStr in str(e.stdout):
                                bFound = True
                                break
                        elif ret == "regex-match":
                            if re.search(recoverableErrorStr, str(e.stdout), re.M):
                                if not reclone_on_failure:
                                    bFound = True
                                    break
                        else:
                            assert False
                    if bFound:
                        if not reclone_on_failure:
                            raise
                        mode = "clone"
                    continue

            if mode == "clone":
                cls.forceDelete(dest_directory)
                if clone_config is not None:
                    configArg = "--config %s" % (" ".join(['%s="%s"' % (k, v) for k, v in clone_config.items()]))       # FIXME: is it space separated?
                else:
                    configArg = ""
                cmd = "git clone %s %s %s \"%s\" \"%s\"" % (quietArg, depthArg, configArg, clone_url, dest_directory)
                subprocess.check_call(cmd, shell=True)
                mode = "pull"                            # sometimes we may clone a bad repository which is not pullable, we pull after clone to make sure it is pullable
                continue

            assert False

    @staticmethod
    def gitIsDirty(dirName):
        ret = Util._gitCall(dirName, "status")
        if re.search("^You have unmerged paths.$", ret, re.M) is not None:
            return True
        if re.search("^Changes to be committed:$", ret, re.M) is not None:
            return True
        if re.search("^Changes not staged for commit:$", ret, re.M) is not None:
            return True
        if re.search("^All conflicts fixed but you are still merging.$", ret, re.M) is not None:
            return True
        return False

    @staticmethod
    def gitHasUntrackedFiles(dirName):
        ret = Util._gitCall(dirName, "status")
        if re.search("^Untracked files:$", ret, re.M) is not None:
            return True
        return False

    @staticmethod
    def _gitCall(dirName, command):
        gitDir = os.path.join(dirName, ".git")
        cmdStr = "git --git-dir=\"%s\" --work-tree=\"%s\" %s" % (gitDir, dirName, command)
        return Util.shellCall(cmdStr)

    @staticmethod
    def svnGetUrl(dirName):
        ret = Util.cmdCall("svn", "info", dirName)
        m = re.search("^URL: (.*)$", ret, re.M)
        return m.group(1)

    @classmethod
    def svnUpdate(cls, dest_directory, recheckout_on_failure=False, checkout_url=None):
        assert False

    @staticmethod
    def encodePath(src_path):
        # Use the convert algorithm of systemd:
        # * Some unit names reflect paths existing in the file system namespace.
        # * Example: a device unit dev-sda.device refers to a device with the device node /dev/sda in the file system namespace.
        # * If this applies, a special way to escape the path name is used, so that the result is usable as part of a filename.
        # * Basically, given a path, "/" is replaced by "-", and all unprintable characters and the "-" are replaced by C-style
        #   "\x20" escapes. The root directory "/" is encoded as single dash, while otherwise the initial and ending "/" is
        #   removed from all paths during transformation. This escaping is reversible.
        # Note:
        # * src_path must be a normalized path, we don't accept path like "///foo///bar/"
        # * the encoding of src_path is a bit messy
        # * what about path like "/foo\/bar/foobar2"?

        assert os.path.isabs(src_path)

        if src_path == "/":
            return "-"

        newPath = ""
        for c in src_path.strip("/"):
            if c == "/":
                newPath += "-"
            elif re.fullmatch("[a-zA-Z0-9:_\\.]", c) is not None:
                newPath += c
            else:
                newPath += "\\x%02x" % (ord(c))
        return newPath

    @staticmethod
    def decodePath(dst_path):
        if dst_path == "-":
            return "/"

        newPath = ""
        for i in range(0, len(dst_path)):
            if dst_path[i] == "-":
                newPath += "/"
            elif dst_path[i] == "\\":
                m = re.search("^\\\\x([0-9])+", dst_path[i:])
                if m is None:
                    raise ValueError("encoded path is invalid")
                newPath += chr(int(m.group(1)))
            else:
                newPath += dst_path[i]
        return "/" + newPath

    @staticmethod
    def verifyFileMd5(filename, md5sum):
        with open(filename, "rb") as f:
            thash = hashlib.md5()
            while True:
                block = f.read(65536)
                if len(block) == 0:
                    break
                thash.update(block)
            return thash.hexdigest() == md5sum

    @staticmethod
    def isBufferAllZero(buf):
        for b in buf:
            if b != 0:
                return False
        return True

    @staticmethod
    def isFileStartWithAllZero(filename, length):
        with open(filename, 'rb') as f:
            if all(x == b'\0' for x in f.read(length)):
                return True
        return False

    @staticmethod
    def libUsed(binFile):
        """Return a list of the paths of the shared libraries used by binFile"""

        LDD_STYLE1 = re.compile(r'^\t(.+?)\s\=\>\s(.+?)?\s\(0x.+?\)$')
        LDD_STYLE2 = re.compile(r'^\t(.+?)\s\(0x.+?\)$')

        try:
            raw_output = Util.cmdCall("ldd", "--", binFile)
        except subprocess.CalledProcessError as e:
            if 'not a dynamic executable' in e.output:
                raise Exception("not a dynamic executable")
            else:
                raise

        # We can expect output like this:
        # [tab]path1[space][paren]0xaddr[paren]
        # or
        # [tab]path1[space+]=>[space+]path2?[paren]0xaddr[paren]
        # path1 can be ignored if => appears
        # path2 could be empty

        if 'statically linked' in raw_output:
            return []

        result = []
        for line in raw_output.splitlines():
            match = LDD_STYLE1.match(line)
            if match is not None:
                if match.group(2):
                    result.append(match.group(2))
                continue

            match = LDD_STYLE2.match(line)
            if match is not None:
                result.append(match.group(1))
                continue

            assert False

        result.remove("linux-vdso.so.1")
        return result

    @staticmethod
    def unixHasUser(username):
        try:
            pwd.getpwnam(username)
            return True
        except KeyError:
            return False

    @staticmethod
    def unixHasGroup(groupname):
        try:
            grp.getgrnam(groupname)
            return True
        except KeyError:
            return False

    @staticmethod
    def unixIsUserInGroup(username, groupname):
        try:
            return username in grp.getgrnam(groupname).gr_mem
        except KeyError:
            return False

    @staticmethod
    def unixVerifyUserPassword(username, password):
        try:
            for line in Util.readListFile("/etc/shadow"):
                parts = line.split(':')
                if parts[0] == username:
                    if len(parts) < 2:
                        return False
                    if parts[1].startswith(('!', '*')):
                        # parts[1] is hashed password, skip locked or disabled accounts
                        return False
                    return passlib.hosts.linux_context.verify(password, parts[1])
        except FileNotFoundError:
            pass

        # user not found
        return False

    @staticmethod
    def geoGetCountry():
        """Returns (country-code, country-name)"""
        return ("CN", "China")

    @staticmethod
    def hwcfgGetComponentByUdevDeviceNode(machine, componentType, devPath):
        for c in machine.get_components():
            if c.get_type() == componentType:
                if c.get_udev_device().device_node == devPath:
                    return c
        return None

    @staticmethod
    def hwcfgGetCpuVendorModel(machine):
        for d in machine.get_devices():
            if d.get_type() == strict_hwcfg.DeviceType.CPU_CORE:
                return (d.get_vendor(), d.get_model())
        for c in machine.get_components():
            if c.get_type() == strict_hwcfg.ComponentType.CPU:
                return (c.get_vendor(), c.get_model())
        assert False

    @staticmethod
    def hwcfgGetMoboComponent(machine):
        for c in machine.get_components():
            if c.get_type() == strict_hwcfg.ComponentType.MOTHER_BOARD:
                return c
        return None

    @staticmethod
    def hwcfgHasFan(machine):
        for d in machine.get_devices():
            if d.get_type() == strict_hwcfg.DeviceType.FAN:
                return True
        for c in machine.get_components():
            if c.get_type() in [strict_hwcfg.ComponentType.CPU_FAN, strict_hwcfg.ComponentType.CHASSIS_FAN]:
                return True
        return False

    @staticmethod
    def hwcfgGetGraphicsCardComponents(machine):
        ret = []
        for c in machine.get_components():
            if c.get_type() == strict_hwcfg.ComponentType.GRAPHICS_CARD:
                ret.append(c)
        return ret

    @staticmethod
    def loopMount(filepath, loopFile, offset, length):
        Util.touchFile(loopFile)
        try:
            cmdList = [
                "fuseloop",
                "-O", str(offset),
                "-S", str(length),
                filepath,
                loopFile,
            ]
            subprocess.check_call(cmdList, stderr=subprocess.DEVNULL)         # fuseloop prints errors about /etc/mtab
        except BaseException:
            os.unlink(loopFile)
            raise

    @staticmethod
    def loopUnMount(loopFile):
        while True:
            try:
                subprocess.check_call(["umount", loopFile])
                break
            except subprocess.CalledProcessError as e:
                if e.returncode == 32:
                    # means device is busy, it sucks that fuseloop umount can return this result instead of waiting until operation is done
                    print("__wait2__")
                    time.sleep(1)
                else:
                    # as a cleanup operation, we must go on even if there's error
                    break
        os.unlink(loopFile)


class AvahiServiceBrowser:

    """
    Exampe:
        obj = AvahiServiceBrowser("_http._tcp")
        obj.run()
        obj.get_result_list()
    """

    def __init__(self, service):
        self.service = service

    def run(self):
        self._result_dict = dict()

        self._server = None
        self._browser = None
        self._error_message = None
        try:
            self._server = Gio.DBusProxy.new_for_bus_sync(Gio.BusType.SYSTEM,
                                                          Gio.DBusProxyFlags.NONE,
                                                          None,
                                                          "org.freedesktop.Avahi",
                                                          "/",
                                                          "org.freedesktop.Avahi.Server")

            path = self._server.ServiceBrowserNew("(iissu)",
                                                  -1,                                   # interface = IF_UNSPEC
                                                  0,                                    # protocol = PROTO_INET
                                                  self.service,                         # type
                                                  "",                                   # domain
                                                  0)                                    # flags
            self._browser = Gio.DBusProxy.new_for_bus_sync(Gio.BusType.SYSTEM,
                                                           Gio.DBusProxyFlags.NONE,
                                                           None,
                                                           "org.freedesktop.Avahi",
                                                           path,
                                                           "org.freedesktop.Avahi.ServiceBrowser")
            self._browser.connect("g-signal", self._signal_handler)

            self._mainloop = GLib.MainLoop()
            self._mainloop.run()
            if self._error_message is not None:
                raise Exception(self._error_message)
        except GLib.Error as e:
            # treat dbus error as success but with no result
            if e.domain in ["g-io-error-quark", "g-dbus-error-quark"]:
                return
            raise
        finally:
            self._error_message = None
            if self._browser is not None:
                self._browser.Free()
                self._browser = None
            self._server = None

    def get_result_list(self):
        return self._result_dict.values()

    def _signal_handler(self, proxy, sender, signal, param):
        if signal == "ItemNew":
            interface, protocol, name, stype, domain, flags = param.unpack()
            self._server.ResolveService("(iisssiu)",
                                        interface,
                                        protocol,
                                        name,
                                        stype,
                                        domain,
                                        -1,                                     # interface = IF_UNSPEC
                                        0,                                      # protocol = PROTO_INET
                                        result_handler=self._service_resolved,
                                        error_handler=self._failure_handler)

        if signal == "ItemRemove":
            interface, protocol, name, stype, domain, flags = param.unpack()
            key = (interface, protocol, name, stype, domain)
            if key in self._result_dict:
                del self._result_dict[key]

        if signal == "AllForNow":
            self._mainloop.quit()

        if signal == "Failure":
            self._failure_handler(param)

        return True

    def _service_resolved(self, proxy, result, user_data):
        interface, protocol, name, stype, domain, host, aprotocol, address, port, txt, flags = result
        key = (interface, protocol, name, stype, domain)
        self._result_dict[key] = (name, address, int(port))

    def _failure_handler(self, error):
        self._error_message = error
        self._mainloop.quit()


class PhysicalDiskMounts:

    """This class is a better psutil.disk_partitions()"""

    class Entry:

        def __init__(self, p):
            self._p = p

        @property
        def device(self):
            return self._p.device

        @property
        def mountpoint(self):
            return self._p.mountpoint

        @property
        def fstype(self):
            return self._p.fstype

        @property
        def opts(self):
            return self._p.opts

        @property
        def mnt_opt_list(self):
            return self._p.opts.split(",")

        def __repr__(self):
            return "<%s %r>" % (self.__class__.__name__, self.__dict__)

    class NotFoundError(Exception):
        pass

    @classmethod
    def get_entries(cls):
        return [cls.Entry(p) for p in psutil.disk_partitions()]

    @classmethod
    def find_root_entry(cls):
        ret = cls.find_entry_by_mount_point("/")
        if ret is None:
            raise cls.NotFoundError("no rootfs mount point")
        else:
            return ret

    @classmethod
    def find_entry_by_mount_point(cls, mount_point_path):
        for p in psutil.disk_partitions():
            if p.mountpoint == mount_point_path:
                return cls.Entry(p)
        return None

    @classmethod
    def find_entries_by_filter(cls, filter):
        ret = []
        for p in psutil.disk_partitions():
            r = cls.Entry(p)
            if filter(r):
                ret.append(r)
        return ret


class TmpMount:

    def __init__(self, path, options=""):
        self._path = path
        self._opts = options
        self._tmppath = tempfile.mkdtemp()

        try:
            cmd = ["mount"]
            if self._opts != "":
                cmd.append("-o")
                cmd.append(self._opts)
            cmd.append(self._path)
            cmd.append(self._tmppath)
            subprocess.run(cmd, check=True, text=True)
        except:
            os.rmdir(self._tmppath)
            raise

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.close()

    @property
    def device(self):
        return self._path

    @property
    def mountpoint(self):
        return self._tmppath

    @property
    def fstype(self):
        return Util.getBlkDevFsType(self._path)

    @property
    def opts(self):
        return self._opts

    def close(self):
        subprocess.run(["umount", self._tmppath], check=True, text=True)
        os.rmdir(self._tmppath)


class DirListMount:

    @staticmethod
    def standardDirList(tdir):
        mountList = []
        if True:
            tstr = os.path.join(tdir, "proc")
            mountList.append((tstr, "-t proc -o nosuid,noexec,nodev proc %s" % (tstr)))
        if True:
            tstr = os.path.join(tdir, "sys")
            mountList.append((tstr, "--rbind /sys %s" % (tstr), "--make-rslave %s" % (tstr)))
        if True:
            tstr = os.path.join(tdir, "dev")
            mountList.append((tstr, "--rbind /dev %s" % (tstr), "--make-rslave %s" % (tstr)))
        if True:
            tstr = os.path.join(tdir, "run")
            mountList.append((tstr, "--bind /run %s" % (tstr)))
        if True:
            tstr = os.path.join(tdir, "tmp")
            mountList.append((tstr, "-t tmpfs -o mode=1777,strictatime,nodev,nosuid tmpfs %s" % (tstr)))
        return mountList

    def __init__(self, mountList):
        self.okList = []
        for item in mountList:      # mountList = (directory, mount-commad-1, mount-command-2, ...)
            dir = item[0]
            if not os.path.exists(dir):
                os.makedirs(dir)
            for i in range(1, len(item)):
                mcmd = "mount %s" % (item[i])
                ret = subprocess.run(mcmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
                if ret.returncode == 0:
                    self.okList.insert(0, dir)
                else:
                    for dir2 in self.okList:
                        subprocess.run(["umount", "-l", dir2], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
                    raise Exception("error when executing \"%s\"" % (mcmd))

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        for d in self.okList:
            subprocess.run(["umount", "-l", d], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)


class SingletonProcess:

    class AlreadyExistException(Exception):
        pass

    def __init__(self, filename):
        self._lockfile = filename
        self._lockFd = os.open(self._lockfile, os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, 0o600)
        try:
            fcntl.lockf(self._lockFd, fcntl.LOCK_EX | fcntl.LOCK_NB)
            return
        except Exception as e:
            os.close(self._lockFd)
            self._lockFd = None
            if isinstance(e, IOError):
                if e.errno == errno.EACCES or e.errno == errno.EAGAIN:
                    raise self.AlreadyExistException()
            raise

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.close()

    def close(self):
        os.close(self._lockFd)
        self._lockFd = None
        os.unlink(self._lockfile)
        self._lockfile = None


class TempChdir:

    def __init__(self, dirname):
        self.olddir = os.getcwd()
        os.chdir(dirname)

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        os.chdir(self.olddir)


class InfoPrinter:

    class _InfoPrinterInfoIndenter:

        def __init__(self, parent, message, bRecallable=False):
            self._parent = parent
            self._bRecallable = bRecallable

            self._savedIndenter = self._parent._curIndenter
            self._parent._curIndenter = self

            self._printLen = -1
            self._parent.printInfo(message)
            self._printLen = len(message)

            self._parent.incIndent()

        def __enter__(self):
            return self

        def __exit__(self, type, value, traceback):
            self._parent.decIndent()

            if self._bRecallable and self._printLen >= 0:
                sys.stdout.write("\r" + self._parent._t.clear_eol)       # clear current line
                sys.stdout.flush()

            self._parent._curIndenter = self._savedIndenter

    def __init__(self, indent=0):
        self._t = blessed.Terminal()
        self._indent = indent
        self._curIndenter = None

    def incIndent(self):
        self._indent = self._indent + 1

    def decIndent(self):
        assert self._indent > 0
        self._indent = self._indent - 1

    def getIndent(self):
        return self._indent

    def getIndentStr(self):
        return "\t" * self._indent

    def printInfo(self, s):
        line = ""
        line += self._t.green("*") + " "
        line += self.getIndentStr()
        line += s

        if self._curIndenter is not None and self._curIndenter._bRecallable:
            if self._curIndenter._printLen == -1:
                print(line, end='')
            else:
                self._curIndenter._bRecallable = False
                print("")
                print(line)
        else:
            print(line)

    def printError(self, s):
        line = ""
        line += self._t.red("*") + " "
        line += self.getIndentStr()
        line += s

        if self._curIndenter is not None and self._curIndenter._bRecallable:
            if self._curIndenter._printLen == -1:
                print(line, end='')
            else:
                self._curIndenter._bRecallable = False
                print("")
                print(line)
        else:
            print(line)

    def printInfoAndIndent(self, s, bRecallable=False):
        return self._InfoPrinterInfoIndenter(self, s, bRecallable)


class PrintLoadAvgThread(threading.Thread):

    def __init__(self, msg):
        super().__init__()

        self._min_display_latency = 2
        self._max_width = 80
        self._t = blessed.Terminal()

        self._msg = msg
        self._width = min(self._t.width, self._max_width)
        self._stopEvent = threading.Event()
        self._firstTime = True

    def __enter__(self):
        self.start()
        return self

    def __exit__(self, type, value, traceback):
        self.stop()

    def start(self):
        self._print_message()
        super().start()

    def stop(self):
        self._stopEvent.set()
        self.join()

    def run(self):
        while not self._stopEvent.is_set():
            self._print_message()
            self._stopEvent.wait(self._min_display_latency)
        sys.stdout.write("\n")

    def _print_message(self):
        if self._firstTime:
            self._firstTime = False
        else:
            sys.stdout.write("\r" + self._t.clear_eol)                                     # clear current line

        sys.stdout.write(self._msg)                                                        # print message
        sys.stdout.write(" " * (self._width - len(self._msg)))                             # print padding
        sys.stdout.write("Load avg: %s" % (Util.getLoadAvgStr()))                        # print load average
        sys.stdout.flush()


class ParallelRunSequencialPrint:

    def __init__(self):
        self.preFuncList = []
        self.postFuncList = []
        self.taskDataList = []
        self.stdoutList = []

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.run()

    # pre_func can't be a coroutine because there's no "async lambda" in python
    def add_task(self, start_coro, start_coro_param, wait_coro, pre_func=None, post_func=None):
        self.preFuncList.append(pre_func)
        self.postFuncList.append(post_func)
        self.taskDataList.append((start_coro, start_coro_param, wait_coro))

    def run(self):
        loop = asyncio.get_event_loop()

        tlist = []
        for start_coro, start_coro_param, wait_coro in self.taskDataList:
            proc, outf = loop.run_until_complete(start_coro(*start_coro_param, loop=loop))
            self.stdoutList.append(outf)
            tlist.append((proc, wait_coro))

        pool = asyncio_pool.AioPool(loop=loop)
        pool.spawn_n(self._showResult())
        for proc, wait_coro in tlist:
            pool.spawn_n(wait_coro(proc))
        loop.run_until_complete(pool.join())

        self.preFuncList = []
        self.postFuncList = []
        self.taskDataList = []
        self.stdoutList = []

    async def _showResult(self):
        for i in range(0, len(self.preFuncList)):
            if self.preFuncList[i] is not None:
                self.preFuncList[i]()
            while True:
                buf = await self.stdoutList[i].read(1)      # no way to read all data in buffer, sucks
                if buf == b'':
                    break
                sys.stdout.buffer.write(buf)
                sys.stdout.flush()
            if self.postFuncList[i] is not None:
                self.postFuncList[i]()


class SysfsHwMon:

    SENSOR_TYPE_TEMP = "temp"

    def get_sensors(self, hwmon_name_pattern, sensor_label_pattern, sensor_type=None):
        # return [(hwmon_name, sensor_label, sensor_type, sysfs_path_prefix)]

        assert sensor_type is None or sensor_type in [self.SENSOR_TYPE_TEMP]

        ret = []
        if not os.path.exists("/sys/class/hwmon"):
            return ret

        for dn in os.listdir("/sys/class/hwmon"):
            fulldn = os.path.join("/sys/class/hwmon", dn)

            namefn = os.path.join(fulldn, "name")
            chipName = pathlib.Path(namefn).read_text()
            if not fnmatch.fnmatch(chipName, hwmon_name_pattern):
                continue

            if sensor_type is None:
                pat = os.path.join("/sys/class/hwmon", fulldn, "*_label")
            else:
                pat = os.path.join("/sys/class/hwmon", fulldn, "%s*_label" % (sensor_type))
            for fullfn in glob.glob(pat):
                label = pathlib.Path(namefn).read_text()
                if not fnmatch.fnmatch(label, sensor_label_pattern):
                    continue
                m = re.fullmatch(r'((.*)[0-9]+)_label', os.path.basename(fullfn))
                ret.append((chipName, label, m.group(1), os.path.join(fulldn, m.group(2))))

        return ret

    def get_sensor(self, hwmon_name, sensor_label, sensor_type=None):
        # return (hwmon_name, sensor_label, sensor_type, sysfs_path_prefix)

        ret = self.get_sensors(hwmon_name, sensor_label, sensor_type)
        if len(ret) == 1:
            return ret[0]
        elif len(ret) == 0:
            return None
        else:
            assert False


class CloudCacheGentoo:

    def __init__(self, cacheDir):
        self._cloudReleases = gstage4.cloud_data.GentooReleases()
        self._cloudSnapshots = gstage4.cloud_data.GentooSnapshots()

        self._dir = cacheDir
        self._releasesDir = os.path.join(self._dir, "releases")
        self._snapshotsDir = os.path.join(self._dir, "snapshots")

        self._bSynced = (os.path.exists(self._releasesDir) and len(os.listdir(self._releasesDir)) > 0)

    def sync(self):
        os.makedirs(self._releasesDir, exist_ok=True)
        os.makedirs(self._snapshotsDir, exist_ok=True)

        # fill arch directories
        Util.syncDirs(self._cloudReleases.get_arch_list(), self._releasesDir)

        # fill variant and release directories, we only accept stage3 variant
        for arch in self._cloudReleases.get_arch_list():
            archDir = os.path.join(self._releasesDir, arch)
            stage3ReleaseVariantList = [x for x in self._cloudReleases.get_release_variant_list(arch) if x.startswith("stage3-")]
            Util.syncDirs(stage3ReleaseVariantList, archDir)
            for variant in stage3ReleaseVariantList:
                Util.syncDirs(self._cloudReleases.get_release_version_list(arch), os.path.join(archDir, variant))

        # fill snapshots directory
        Util.syncDirs(self._cloudSnapshots.get_snapshot_version_list(), self._snapshotsDir)

        self._bSynced = True

    def get_arch_list(self):
        assert self._bSynced
        return os.listdir(self._releasesDir)

    def get_subarch_list(self, arch):
        assert self._bSynced
        ret = set()
        for d in os.listdir(os.path.join(self._releasesDir, arch)):
            ret.add(d.split("-")[1])
        return sorted(list(ret))

    def get_release_variant_list(self, arch):
        assert self._bSynced
        return os.listdir(os.path.join(self._releasesDir, arch))

    def get_release_version_list(self, arch):
        assert self._bSynced
        return os.listdir(os.path.join(self._releasesDir, arch, self.get_release_variant_list(arch)[0]))

    def get_snapshot_version_list(self):
        assert self._bSynced
        return os.listdir(self._snapshotsDir)

    def get_stage3(self, arch, subarch, release_variant, release_version, cached_only=False):
        assert self._bSynced

        fullReleaseVariant = self._stage3FullReleaseVariant(subarch, release_variant)

        myDir = os.path.join(self._releasesDir, arch, fullReleaseVariant, release_version)
        fn, fnDigest = self._cloudReleases.gen_stage3_fn(arch, fullReleaseVariant, release_version)
        fullfn = os.path.join(myDir, fn)
        fullfnDigest = os.path.join(myDir, fnDigest)

        if os.path.exists(fullfn) and os.path.exists(fullfnDigest):
            print("Files already downloaded.")
            return (fullfn, fullfnDigest)
        if cached_only:
            raise FileNotFoundError("the specified stage3 does not exist")

        url, urlDigest = self._cloudReleases.gen_stage3_url(arch, fullReleaseVariant, release_version)
        Util.wgetDownload(url, fullfn)
        Util.wgetDownload(urlDigest, fullfnDigest)

        return (fullfn, fullfnDigest)

    def get_latest_stage3(self, arch, subarch, release_variant, cached_only=False):
        assert self._bSynced

        fullReleaseVariant = self._stage3FullReleaseVariant(subarch, release_variant)

        variantDir = os.path.join(self._releasesDir, arch, fullReleaseVariant)
        for ver in sorted(os.listdir(variantDir), reverse=True):
            myDir = os.path.join(variantDir, ver)
            fn, fnDigest = self._cloudReleases.gen_stage3_fn(arch, fullReleaseVariant, ver)
            fullfn = os.path.join(myDir, fn)
            fullfnDigest = os.path.join(myDir, fnDigest)

            if os.path.exists(fullfn) and os.path.exists(fullfnDigest):
                print("Files already downloaded.")
                return (fullfn, fullfnDigest)

            if not cached_only:
                url, urlDigest = self._cloudReleases.gen_stage3_url(arch, fullReleaseVariant, ver)
                if Util.wgetSpider(url):
                    Util.wgetDownload(url, fullfn)
                    Util.wgetDownload(urlDigest, fullfnDigest)
                    return (fullfn, fullfnDigest)

        raise FileNotFoundError("no stage3 found")

    def get_snapshot(self, snapshot_version, cached_only=False):
        assert self._bSynced

        myDir = os.path.join(self._snapshotsDir, snapshot_version)
        fullfn = os.path.join(myDir, self._cloudSnapshots.gen_snapshot_fn(snapshot_version))

        if os.path.exists(fullfn):
            print("Files already downloaded.")
            return fullfn
        if cached_only:
            raise FileNotFoundError("the specified snapshot does not exist")

        url = self._cloudSnapshots.gen_snapshot_url(snapshot_version)
        Util.wgetDownload(url, fullfn, True)
        return fullfn

    def get_latest_snapshot(self, cached_only=False):
        assert self._bSynced

        for ver in sorted(os.listdir(self._snapshotsDir), reverse=True):
            myDir = os.path.join(self._snapshotsDir, ver)
            fullfn = os.path.join(myDir, self._cloudSnapshots.gen_snapshot_fn(ver))

            if os.path.exists(fullfn):
                print("Files already downloaded.")
                return fullfn

            if not cached_only:
                url = self._cloudSnapshots.gen_snapshot_url(ver)
                if Util.wgetSpider(url):
                    Util.wgetDownload(url, fullfn, True)
                    return fullfn

        raise FileNotFoundError("no snapshot found")

    def _stage3FullReleaseVariant(self, subarch, release_variant):
        ret = "stage3-%s" % (subarch)
        if release_variant != "":
            ret += "-%s" % (release_variant)
        return ret


class CcacheLocalService:

    """
    We think ccache can be used as a local service if the following conditions are met:
       1. /usr/bin/ccache exists
       2. ccache_dir exists (use value specified in /etc/ccache.conf or use default /root/.cache/ccache)
       3. no user specific configuration for root
    """

    def __init__(self):
        self._ccacheDir = None

        if not os.path.exists("/usr/bin/ccache"):
            return

        tdir = None
        try:
            buf = pathlib.Path("/etc/ccache.conf").read_text()
            m = re.search("^cache_dir = (.*)$", buf, re.M)
            if m is None:
                raise Exception("no \"cache_dir\" specified in %s" % (self._cfgFile))
            tdir = m.group(1)
        except FileNotFoundError:
            tdir = "/root/.cache/ccache"
        if not os.path.isdir(tdir):
            return

        if os.path.exists("/root/.config/ccache"):
            raise Exception("%s should not exist" % (self._rootCfgDir))

        self._ccacheDir = tdir

    def is_enabled(self):
        return self._ccacheDir is not None

    def get_ccache_dir(self):
        assert self._ccacheDir is not None
        return self._ccacheDir


class PortagePythonExecConfFile:

    def __init__(self):
        self._pythonExecConf = "/etc/python-exec/python-exec.conf"

        self._verList = []

        # check file existence
        if not os.path.exists(self._pythonExecConf):
            raise Exception("File %s does not exist.")

        # get all python versions
        allPythonVerList = subprocess.check_output(["python-exec2c", "-l"]).split("\n")
        allPythonVerList = [x for x in allPythonVerList if x != ""]

        # parse file
        i = 0
        for line in pathlib.Path(self._pythonExecConf).read_text().split("\n"):
            line = line.strip()

            if line == "" or line.startswith("#"):
                i += 1
                continue

            # ignore disabled
            if line.startswith("-"):
                i += 1
                continue

            m = re.fullmatch(r"python([0-9\.]+)", line)
            if m is None:
                raise Exception("Invalid line %d in file %s." % (i + 1, self._pythonExecConf))

            if m.group(1) in self._verList:
                raise Exception("Duplicate version %s found in file %s line %d." % (m.group(1), self._pythonExecConf, i + 1))

            if m.group(1) not in allPythonVerList:
                raise Exception("Version %s is not supported in file %s line %d." % (m.group(1), self._pythonExecConf, i + 1))

            self._verList.append(m.group(1))
            i += 1

    def get_pyhton_version_list(self):
        return self._verList


class DynObject:
    pass
