from __future__ import absolute_import
import glob
import os
import solv
import tempfile
import subprocess
import sys
import rpm
import urlparse

from pkgr.jobs import mkjobs
from pkgr.repocmdline import RepoCmdline
from pkgr.repoconfig import RepoConfig
from pkgr.reporepomd import RepoRepomd
from pkgr.reposystem import RepoSystem
from pkgr.repounknown import RepoUnknown
from pkgr.errors import ImproperlyConfigured
from pkgr.errors import RPMError
from pkgr.util import mkdir_p
import logging

logger = logging.getLogger('pkgr.Pkgr')

RPMLOG_LEVELS = {
    'emergency':   rpm.RPMLOG_EMERG,
    'alert':   rpm.RPMLOG_ALERT,
    'critical':    rpm.RPMLOG_CRIT,
    'error':     rpm.RPMLOG_ERR,
    'warning': rpm.RPMLOG_WARNING,
    'notice':  rpm.RPMLOG_NOTICE,
    'info':    rpm.RPMLOG_INFO,
    'debug':   rpm.RPMLOG_DEBUG
    }  
    
REPOSEARCHDIRS = ["/etc/pkgr"]

class Pkgr(object):
    _arch = None
    _solvpool = None
    _jobs = None
    _root = "/"
    _sysrepo = None
    _cache_dir = "/var/lib/solv"
    _repos = None
    def get_all_groups(self):
        all_groups = set()
        for repo in self.get_repos():
            repo_groups = repo.get_groups()
            if repo_groups is None:
                continue
                
            for group in repo_groups.get_groups():
                all_groups.add(group)
        return list(all_groups)
                
    
    def __init__(self):
        self._jobs = []
        self._repos = set()
        
    def get_cache_dir(self):
        ''' Gets the cache dir'''
        return self._cache_dir
        
    def set_cache_dir(self, p):
        ''' Sets the cache dir. should be something like root + /var/lib/pkgr. '''
        self._cache_dir = p
        
    def set_root(self, root):
        ''' Sets the installation root. '''
        self._root = root
        
    def get_root(self):
        ''' Returns the installation root. '''
        return self._root
        
    def set_arch(self, arch):
        ''' Sets the architecture. '''
        self._arch = arch
        
    def get_arch(self):
        ''' Returns the architecture. '''
        return self._arch
        
    def _solv_load_stub(self, repodata):
        repo = repodata.repo.appdata
        if repo:
            return repo.load_ext(repodata)
        return False
    
    def get_solv_pool(self):
        ''' Returns a configured solv.Pool. '''
        if self._solvpool is None:
            self._solvpool = solv.Pool()
            arch = self.get_arch()
            if arch is None:
                raise ImproperlyConfigured()
                
            self._solvpool.setarch(arch)
            self._solvpool.set_loadcallback(self._solv_load_stub)
        return self._solvpool
        
    def post_repo_load(self):
        ''' Should be called after adding the repos.
        '''
        pool = self.get_solv_pool()
        addedprovides = pool.addfileprovides_queue()
        #print "Added provides", addedprovides
        if addedprovides:
            self.get_sysrepo().updateaddedprovides(addedprovides)
            for repo in self.get_repos():
                repo.updateaddedprovides(addedprovides)
        
        pool.createwhatprovides()
        
    def get_repos(self):
        ''' Returns a list of all repos added. '''
        return self._repos
        
    def add_repo(self, repo):
        ''' Adds the repo to the pool.
        '''
        if repo in self.get_repos():
            raise Exception("Repo %s already added." % repo)
            
        repo.set_root_dir(self.get_root())
        repo.set_cache_dir(self.get_cache_dir())
        
        self.get_repos().add(repo)
        
        pool = self.get_solv_pool()
        repo.add_too_pool(pool)
        
                
    def get_configured_repos(self, dirs=None):
        ''' Reads dirs for repo files and returns them as RepoRepomd
            instances.
        '''
        if dirs is None:
            dirs =  REPOSEARCHDIRS
            
        repos = []
        for reposdir in dirs:
            if not os.path.isdir(reposdir):
                continue
                
            for reponame in sorted(glob.glob('%s/*.repo.yaml' % reposdir)):
                rc = RepoConfig()          
                rc.load_file(reponame)
                
                repoattr = {
                    'enabled': rc.get_enabled(),
                    'priority': rc.get_priority(),
                    'type': rc.get_repo_type(),
                    'metadata_expire': rc.get_expires_seconds()
                }
                
                urltype, url = rc.get_url()
                repoattr[urltype] = url
                
                if repoattr['type'] == 'rpm-md':
                    repo = RepoRepomd(rc.get_name(), 'repomd', repoattr)
                else:
                    repo = RepoUnknown(rc.get_name(), 'unknown', repoattr)
                repo.set_cache_dir(self.get_cache_dir())
                repo.set_root_dir(self.get_root())
                repo.set_url(urltype, url)
                repos.append(repo)
        return repos
    
    def get_sysrepo(self):
        ''' Returns the system repo.
        '''
        if self._sysrepo is None:
            pool = self.get_solv_pool()
            self._sysrepo = RepoSystem('@System', 'system')
            logger.debug('Loading sys repo from %s' % self.get_root())
            self._sysrepo.set_root_dir(self.get_root())
            self._sysrepo.set_cache_dir(self.get_cache_dir())
            self._sysrepo.add_too_pool(pool)
        return self._sysrepo
     
    def names_to_jobs(self, cmd, names):
        ''' Create a job list given a cmd [erase,install,update] and some names
        '''
        pool = self.get_solv_pool()
        
        logger.debug('Names to jobs %s', names)
        # also search cmdline repo
        cmdlinerepo = self.load_cmdline_repo(names)
        
        # convert arguments into jobs
        jobs = []
        for arg in names:
            if cmdlinerepo and arg in cmdlinerepo['packages']:
                jobs.append(pool.Job(Job.SOLVER_SOLVABLE, cmdlinerepo['packages'][arg]))
            else:
                njobs = mkjobs(pool, cmd, arg)
                if not njobs:
                    logger.debug("nothing matches '%s'" % arg)
                    continue
                jobs += njobs
        return jobs
        
    def load_cmdline_repo(self, names):
        ''' the command line repo exists to handle rpms "provided" on the command line
        '''
        cmdlinerepo = None
        for arg in names:
            if not (arg.endswith(".rpm") and os.access(arg, os.R_OK)):
                continue
                
            if not cmdlinerepo:
                cmdlinerepo = RepoCmdline('@commandline', 'cmdline')
                cmdlinerepo.add_too_pool(pool)
                cmdlinerepo['packages'] = {}
            cmdlinerepo['packages'][arg] = cmdlinerepo.get_solv_handle().add_rpm(arg, solv.Repo.REPO_REUSE_REPODATA|solv.Repo.REPO_NO_INTERNALIZE)
            
        if cmdlinerepo:
            cmdlinerepo.get_solv_handle().internalize()
        
        return cmdlinerepo
        
    def search(self, query):
        ''' Returns a list of Solvable instances with query in the package name.
        '''
        matches = {}
        di = self.get_solv_pool().Dataiterator(0, solv.SOLVABLE_NAME, query, solv.Dataiterator.SEARCH_SUBSTRING|solv.Dataiterator.SEARCH_NOCASE)
        
        for d in di:
            matches[d.solvid] = True
            
        for solvid in sorted(matches.keys()):
            yield solvid
            
    def search_provides(self, query_glob):
        ''' Returns a list of Solvable instances providing glob query.
        '''
        matches = {}
        di = self.get_solv_pool().Dataiterator(0, solv.SOLVABLE_PROVIDES, query_glob, solv.Dataiterator.SEARCH_GLOB|solv.Dataiterator.SEARCH_NOCASE)
        
        for d in di:
            matches[d.solvid] = True
            
        for solvid in sorted(matches.keys()):
            yield solvid
            
    def set_jobs(self, jobs):
        ''' Sets the job list to work on.
        '''
        self._jobs = jobs

    def get_jobs(self):
        ''' Returns the current job list
        '''
        return self._jobs
        
    def handle_problems(self, jobs, problems):
        ''' Handler for handling job problems.
        
            Called when a solv transaction has problems to resolve.
        '''
        if not problems:
            return
            
        for problem in problems:
            print "Problem %d:" % problem.id
            r = problem.findproblemrule()
            ri = r.info()
            print ri.problemstr()
            solutions = problem.solutions()
            for solution in solutions:
                print "  Solution %d:" % solution.id
                elements = solution.elements(True)
                for element in elements:
                    print "  - %s" % element.str()
                print
            sol = ''
            while not (sol == 's' or sol == 'q' or (sol.isdigit() and int(sol) >= 1 and int(sol) <= len(solutions))):
                sys.stdout.write("Please choose a solution: ")
                sys.stdout.flush()
                sol = sys.stdin.readline().strip()
            if sol == 's':
                # skip problem
                continue        
            if sol == 'q':
                # quit
                return
                
            solution = solutions[int(sol) - 1]
            for element in solution.elements():
                newjob = element.Job()
                if element.type == solv.Solver.SOLVER_SOLUTION_JOB:
                    jobs[element.jobidx] = newjob
                else:
                    if newjob and newjob not in jobs:
                        jobs.append(newjob)
                        
    def _build_new_packages_file_map(self, newpkgs):
        pool = self.get_solv_pool()
        sysrepo = self.get_sysrepo()
        
        rpmcachedir = os.path.join(self.get_cache_dir(), 'rpms')
        if not os.path.exists(rpmcachedir):
            mkdir_p(rpmcachedir)
        
        newpkgsfp = {}
        if newpkgs:
            downloadsize = 0
            for p in newpkgs:
                downloadsize += p.lookup_num(solv.SOLVABLE_DOWNLOADSIZE)
            print "Downloading %d packages, %d K" % (len(newpkgs), downloadsize)
            for p in newpkgs:
                repo = p.repo.appdata
                location, medianr = p.lookup_location()
                if not location:
                    continue
                if repo.type == 'commandline':
                    f = solv.xfopen(location)
                    if not f:
                        raise Exception("%s not found from command line repo", location)
                    newpkgsfp[p.id] = f
                    continue
                if not sysrepo.get_solv_handle().isempty() and os.access('/usr/bin/applydeltarpm', os.X_OK):
                    pname = p.name
                    di = p.repo.Dataiterator(solv.SOLVID_META, solv.DELTA_PACKAGE_NAME, pname, solv.Dataiterator.SEARCH_STRING)
                    di.prepend_keyname(solv.REPOSITORY_DELTAINFO)
                    for d in di:
                        dp = d.parentpos()
                        if dp.lookup_id(solv.DELTA_PACKAGE_EVR) != p.evrid or dp.lookup_id(solv.DELTA_PACKAGE_ARCH) != p.archid:
                            continue
                        baseevrid = dp.lookup_id(solv.DELTA_BASE_EVR)
                        candidate = None
                        for installedp in pool.whatprovides(p.nameid):
                            if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
                                candidate = installedp
                        if not candidate:
                            continue
                        seq = dp.lookup_str(solv.DELTA_SEQ_NAME) + '-' + dp.lookup_str(solv.DELTA_SEQ_EVR) + '-' + dp.lookup_str(solv.DELTA_SEQ_NUM)
                        st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, '-c', '-s', seq])
                        if st:
                            continue
                        chksum = dp.lookup_checksum(solv.DELTA_CHECKSUM)
                        if not chksum:
                            continue
                        dloc = dp.lookup_str(solv.DELTA_LOCATION_DIR) + '/' + dp.lookup_str(solv.DELTA_LOCATION_NAME) + '-' + dp.lookup_str(solv.DELTA_LOCATION_EVR) + '.' + dp.lookup_str(solv.DELTA_LOCATION_SUFFIX)
                        #f = repo.download(dloc, False, chksum)
                        f = repo.download_repo_file(dloc, checksum=chksum)
                        if not f:
                            continue
                        nf = tempfile.TemporaryFile()
                        nf = os.dup(nf.fileno())
                        st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, "/dev/fd/%d" % solv.xfileno(f), "/dev/fd/%d" % nf])
                        solv.xfclose(f)
                        os.lseek(nf, 0, os.SEEK_SET)
                        newpkgsfp[p.id] = solv.xfopen_fd("", nf)
                        break
                    if p.id in newpkgsfp:
                        sys.stdout.write("d")
                        sys.stdout.flush()
                        continue
    
                # XXX: FOR SOME REASON CHECKSUMS ARENT PASSED HERE
                chksum = p.lookup_checksum(solv.SOLVABLE_CHECKSUM)
                #f = repo.download(location, False, chksum)
                
                loc = os.path.join(rpmcachedir, os.path.basename(urlparse.urlsplit(location).path))
                f = repo.download_repo_file(location, checksum=chksum, location=loc)
                
                if not f:
                    raise Exception("\n%s: %s not found in repository" % (repo.name, location))
                    
                    
                newpkgsfp[p.id] = loc
                sys.stdout.write(".")
                sys.stdout.flush()
            print
        return newpkgsfp
      
    
    def run_transaction(self, trans):
        ''' Runs a solv transaction trans as a rpm transaction.
        '''
        newpkgs = trans.newpackages()
        logger.debug('New packages: %s', newpkgs)
    
        self.newpkgsfp = self._build_new_packages_file_map(newpkgs)
    
        #pm.addMacro("_dbpath", os.path.join(self.get_root(), 'var', 'lib', 'rpm'))
        
        rpmverbosity = 'debug'
        # set up the transaction to record output from scriptlets
        self._script_output_file = tempfile.NamedTemporaryFile()
        #self._readpipe = io_r
        self._script_output_fd = open(self._script_output_file.name, 'w+b')
        #rpm.setScriptFd(self._script_output_fd)
        
        if rpmverbosity not in RPMLOG_LEVELS:
            raise ValueError('rpmverbosity not one of %s' % RPMLOG_LEVELS.keys())
        
        #rpm.setVerbosity(RPMLOG_LEVELS.get(rpmverbosity))
        #rpm.setLogFile(self._script_output_fd)
        
        logging.debug('Logging rpm to %s', self._script_output_file.name)
        
        
        ts = rpm.TransactionSet(self.get_root())
        ts.initDB()
        #ts.rebuildDB()~(rpm.RPMVSF_NORSA|rpm.RPMVSF_NODSA)
        ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
        #ts.setFlags(rpm.RPMTRANS_FLAG_NOSCRIPTS|rpm.RPMTRANS_FLAG_NOTRIGGERS)
        
        # erases need to keep aware of Solvables
        self.erasenamehelper = {}
        
        for p in trans.steps():
            type = trans.steptype(p, solv.Transaction.SOLVER_TRANSACTION_RPM_ONLY)
            if type == solv.Transaction.SOLVER_TRANSACTION_ERASE:
                rpmdbid = p.lookup_num(solv.RPM_RPMDBID)
                self.erasenamehelper[p.name] = p
                if not rpmdbid:
                    raise Exception("internal error: installed package %s has no rpmdbid\n", p)
                ts.addErase(int(rpmdbid))
            elif type == solv.Transaction.SOLVER_TRANSACTION_INSTALL:
                f = open(self.newpkgsfp[p.id])
                h = ts.hdrFromFdno(f.fileno())
                os.lseek(f.fileno(), 0, os.SEEK_SET)
                ts.addInstall(h, (p, h), 'u')
            elif type == solv.Transaction.SOLVER_TRANSACTION_MULTIINSTALL:
                f = open(self.newpkgsfp[p.id])
                h = ts.hdrFromFdno(f.fileno())
                os.lseek(f.fileno(), 0, os.SEEK_SET)
                ts.addInstall(h, (p, h), 'i')
        checkproblems = ts.check()
        if checkproblems:
            print checkproblems
            raise Exception("Error with rpm transaction check: %s" % str(checkproblems))
            
        ts.check()
        if ts.problems():
            raise Exception('RPM Check had the following problems: %s' % str([problem.type for problem in ts.problems()]))
        # No problems, order the transaction and let rpm free memory
        # which is not needed from this point onwards.
        ts.order()
        ts.clean()
        runproblems = ts.run(self._rpm_run_callback, None)     
        
    def _rpm_run_callback(self, reason, amount, total, package_header, d):
        package = None
        header = None
        if package_header and len(package_header) == 2:
            package,header = package_header
            
        if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
            logger.log(5, 'Open file %s %s %s %s', amount, total, package, d)
            self.last_open = file(self.newpkgsfp[package.id])
            return self.last_open.fileno()
        elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
            logger.log(5, 'Close file %s %s %s %s', amount, total, package, d)
            #return self.newpkgsfp[package.id].close()
            self.last_open.close()
        elif reason == rpm.RPMCALLBACK_INST_START:
            logger.log(5, 'Install start %s %s %s %s', amount, total, package, d)
        elif reason == rpm.RPMCALLBACK_UNINST_START:
            # argh, p is just the name of the package
            if package in self.erasenamehelper:
                package = self.erasenamehelper[package]
                logger.log(5, 'Erase start %s %s %s %s', amount, total, package, d)
        elif reason == rpm.RPMCALLBACK_CPIO_ERROR:
            logger.error('CPIO_ERROR ERROR %s %s %s %s', amount, total, package, d)
        elif reason == rpm.RPMCALLBACK_UNPACK_ERROR:
            logger.error('UNPACK_ERROR ERROR %s %s %s %s', amount, total, package, d)
        elif reason == rpm.RPMCALLBACK_SCRIPT_ERROR:
            logger.error('SCRIPT_ERROR ERROR %s %s %s %s', amount, total, package, d)
