Adjust layout, will help with using eclipse
This commit is contained in:
parent
b94b37422a
commit
2cb4e338f7
5 changed files with 0 additions and 0 deletions
188
src/bin/pungi.py
Executable file
188
src/bin/pungi.py
Executable file
|
|
@ -0,0 +1,188 @@
|
|||
#!/usr/bin/python -tt
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
import os
|
||||
import pypungi.config
|
||||
import pypungi.gather
|
||||
import pypungi.pungi
|
||||
import yum
|
||||
import pykickstart.parser
|
||||
import pykickstart.version
|
||||
import subprocess
|
||||
|
||||
def main():
|
||||
|
||||
config = pypungi.config.Config()
|
||||
|
||||
(opts, args) = get_arguments(config)
|
||||
|
||||
# You must be this high to ride if you're going to do root tasks
|
||||
if os.geteuid () != 0 and (opts.do_all or opts.do_buildinstall):
|
||||
print >> sys.stderr, "You must run pungi as root"
|
||||
return 1
|
||||
|
||||
if opts.do_all or opts.do_buildinstall:
|
||||
try:
|
||||
selinux = subprocess.Popen('/usr/sbin/getenforce',
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=open('/dev/null', 'w')).communicate()[0].strip('\n')
|
||||
if selinux == 'Enforcing':
|
||||
print >> sys.stdout, "WARNING: SELinux is enforcing. This may lead to a compose with selinux disabled."
|
||||
print >> sys.stdout, "Consider running with setenforce 0."
|
||||
except:
|
||||
pass
|
||||
|
||||
# Set up the kickstart parser and pass in the kickstart file we were handed
|
||||
ksparser = pykickstart.parser.KickstartParser(pykickstart.version.makeVersion())
|
||||
ksparser.readKickstart(opts.config)
|
||||
|
||||
if opts.sourceisos:
|
||||
config.set('default', 'arch', 'source')
|
||||
|
||||
for part in ksparser.handler.partition.partitions:
|
||||
if part.mountpoint == 'iso':
|
||||
config.set('default', 'cdsize', str(part.size))
|
||||
|
||||
config.set('default', 'force', str(opts.force))
|
||||
|
||||
# Set up our directories
|
||||
if not os.path.exists(config.get('default', 'destdir')):
|
||||
try:
|
||||
os.makedirs(config.get('default', 'destdir'))
|
||||
except OSError, e:
|
||||
print >> sys.stderr, "Error: Cannot create destination dir %s" % config.get('default', 'destdir')
|
||||
sys.exit(1)
|
||||
else:
|
||||
print >> sys.stdout, "Warning: Reusing existing destination directory."
|
||||
|
||||
cachedir = config.get('default', 'cachedir')
|
||||
|
||||
if not os.path.exists(cachedir):
|
||||
try:
|
||||
os.makedirs(cachedir)
|
||||
except OSError, e:
|
||||
print >> sys.stderr, "Error: Cannot create cache dir %s" % cachedir
|
||||
sys.exit(1)
|
||||
|
||||
# Actually do work.
|
||||
if not opts.sourceisos:
|
||||
if opts.do_all or opts.do_gather:
|
||||
mygather = pypungi.gather.Gather(config, ksparser)
|
||||
mygather.getPackageObjects()
|
||||
mygather.downloadPackages()
|
||||
mygather.makeCompsFile()
|
||||
if not opts.nosource:
|
||||
mygather.getSRPMList()
|
||||
mygather.downloadSRPMs()
|
||||
|
||||
del mygather
|
||||
|
||||
mypungi = pypungi.pungi.Pungi(config)
|
||||
|
||||
if opts.do_all or opts.do_createrepo:
|
||||
mypungi.doCreaterepo()
|
||||
|
||||
if opts.do_all or opts.do_buildinstall:
|
||||
mypungi.doGetRelnotes()
|
||||
mypungi.doBuildinstall()
|
||||
|
||||
if opts.do_all or opts.do_createiso:
|
||||
mypungi.doCreateIsos(split=opts.nosplitmedia)
|
||||
|
||||
# Do things slightly different for src.
|
||||
if opts.sourceisos:
|
||||
# we already have all the content gathered
|
||||
mypungi = pypungi.pungi.Pungi(config)
|
||||
mypungi.topdir = os.path.join(config.get('default', 'destdir'),
|
||||
config.get('default', 'version'),
|
||||
config.get('default', 'flavor'),
|
||||
'source', 'SRPM')
|
||||
if opts.do_all or opts.do_createiso:
|
||||
mypungi.doCreateIsos(split=opts.nosplitmedia)
|
||||
|
||||
print "All done!"
|
||||
|
||||
if __name__ == '__main__':
|
||||
from optparse import OptionParser
|
||||
import sys
|
||||
import time
|
||||
|
||||
today = time.strftime('%Y%m%d', time.localtime())
|
||||
|
||||
def get_arguments(config):
|
||||
parser = OptionParser(version="%prog 1.2.4")
|
||||
|
||||
def set_config(option, opt_str, value, parser, config):
|
||||
config.set('default', option.dest, value)
|
||||
|
||||
# Pulled in from config file to be cli options as part of pykickstart conversion
|
||||
parser.add_option("--name", dest="name", type="string",
|
||||
action="callback", callback=set_config, callback_args=(config, ),
|
||||
help='the name for your distribution (defaults to "Fedora")')
|
||||
parser.add_option("--ver", dest="version", type="string",
|
||||
action="callback", callback=set_config, callback_args=(config, ),
|
||||
help='the version of your distribution (defaults to datestamp)')
|
||||
parser.add_option("--flavor", dest="flavor", type="string",
|
||||
action="callback", callback=set_config, callback_args=(config, ),
|
||||
help='the flavor of your distribution spin (optional)')
|
||||
parser.add_option("--destdir", dest="destdir", type="string",
|
||||
action="callback", callback=set_config, callback_args=(config, ),
|
||||
help='destination directory (defaults to current directory)')
|
||||
parser.add_option("--cachedir", dest="cachedir", type="string",
|
||||
action="callback", callback=set_config, callback_args=(config, ),
|
||||
help='package cache directory (defaults to /var/cache/pungi)')
|
||||
parser.add_option("--bugurl", dest="bugurl", type="string",
|
||||
action="callback", callback=set_config, callback_args=(config, ),
|
||||
help='the url for your bug system (defaults to http://bugzilla.redhat.com)')
|
||||
parser.add_option("--discs", dest="discs", type="string",
|
||||
action="callback", callback=set_config, callback_args=(config, ),
|
||||
help='the number of discs you want to create (defaults to 1)')
|
||||
parser.add_option("--nosource", action="store_true", dest="nosource",
|
||||
help='disable gathering of source packages (optional)')
|
||||
parser.add_option("--nosplitmedia", action="store_false", dest="nosplitmedia", default=True,
|
||||
help='disable creation of split media (optional)')
|
||||
parser.add_option("--sourceisos", default=False, action="store_true", dest="sourceisos",
|
||||
help='Create the source isos (other arch runs must be done)')
|
||||
parser.add_option("--force", default=False, action="store_true",
|
||||
help='Force reuse of an existing destination directory (will overwrite files)')
|
||||
|
||||
parser.add_option("-c", "--config", dest="config",
|
||||
help='Path to kickstart config file')
|
||||
parser.add_option("--all-stages", action="store_true", default=True, dest="do_all",
|
||||
help="Enable ALL stages")
|
||||
parser.add_option("-G", action="store_true", default=False, dest="do_gather",
|
||||
help="Flag to enable processing the Gather stage")
|
||||
parser.add_option("-C", action="store_true", default=False, dest="do_createrepo",
|
||||
help="Flag to enable processing the Createrepo stage")
|
||||
parser.add_option("-B", action="store_true", default=False, dest="do_buildinstall",
|
||||
help="Flag to enable processing the BuildInstall stage")
|
||||
parser.add_option("-I", action="store_true", default=False, dest="do_createiso",
|
||||
help="Flag to enable processing the CreateISO stage")
|
||||
|
||||
|
||||
(opts, args) = parser.parse_args()
|
||||
|
||||
if not opts.config:
|
||||
parser.print_help()
|
||||
sys.exit(0)
|
||||
|
||||
if not config.get('default', 'flavor').isalnum() and not config.get('default', 'flavor') == '':
|
||||
print >> sys.stderr, "Flavor must be alphanumeric."
|
||||
sys.exit(1)
|
||||
|
||||
if opts.do_gather or opts.do_createrepo or opts.do_buildinstall or opts.do_createiso:
|
||||
opts.do_all = False
|
||||
return (opts, args)
|
||||
|
||||
main()
|
||||
123
src/pypungi/__init__.py
Normal file
123
src/pypungi/__init__.py
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
#!/usr/bin/python -tt
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
class PungiBase(object):
|
||||
"""The base Pungi class. Set up config items and logging here"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
self.doLoggerSetup()
|
||||
|
||||
self.workdir = os.path.join(self.config.get('default', 'destdir'),
|
||||
'work',
|
||||
self.config.get('default', 'flavor'),
|
||||
self.config.get('default', 'arch'))
|
||||
|
||||
|
||||
|
||||
def doLoggerSetup(self):
|
||||
"""Setup our logger"""
|
||||
|
||||
logdir = os.path.join(self.config.get('default', 'destdir'), 'logs')
|
||||
|
||||
_ensuredir(logdir, None, force=True) # Always allow logs to be written out
|
||||
|
||||
if self.config.get('default', 'flavor'):
|
||||
logfile = os.path.join(logdir, '%s.%s.log' % (self.config.get('default', 'flavor'),
|
||||
self.config.get('default', 'arch')))
|
||||
else:
|
||||
logfile = os.path.join(logdir, '%s.log' % (self.config.get('default', 'arch')))
|
||||
|
||||
# Create the root logger, that will log to our file
|
||||
logging.basicConfig(level=logging.DEBUG,
|
||||
format='%(name)s.%(levelname)s: %(message)s',
|
||||
filename=logfile)
|
||||
|
||||
|
||||
def _doRunCommand(command, logger, rundir='/tmp', output=subprocess.PIPE, error=subprocess.PIPE, env=None):
|
||||
"""Run a command and log the output. Error out if we get something on stderr"""
|
||||
|
||||
|
||||
logger.info("Running %s" % subprocess.list2cmdline(command))
|
||||
|
||||
p1 = subprocess.Popen(command, cwd=rundir, stdout=output, stderr=error, universal_newlines=True, env=env)
|
||||
(out, err) = p1.communicate()
|
||||
|
||||
if out:
|
||||
logger.debug(out)
|
||||
|
||||
if p1.returncode != 0:
|
||||
logger.error("Got an error from %s" % command[0])
|
||||
logger.error(err)
|
||||
raise OSError, "Got an error from %s: %s" % (command[0], err)
|
||||
|
||||
def _link(local, target, logger, force=False):
|
||||
"""Simple function to link or copy a package, removing target optionally."""
|
||||
|
||||
if os.path.exists(target) and force:
|
||||
os.remove(target)
|
||||
|
||||
try:
|
||||
os.link(local, target)
|
||||
except OSError, e:
|
||||
if e.errno != 18: # EXDEV
|
||||
logger.error('Got an error linking from cache: %s' % e)
|
||||
raise OSError, e
|
||||
|
||||
# Can't hardlink cross file systems
|
||||
shutil.copy2(local, target)
|
||||
|
||||
def _ensuredir(target, logger, force=False, clean=False):
|
||||
"""Ensure that a directory exists, if it already exists, only continue
|
||||
if force is set."""
|
||||
|
||||
# We have to check existance of a logger, as setting the logger could
|
||||
# itself cause an issue.
|
||||
def whoops(func, path, exc_info):
|
||||
message = 'Could not remove %s' % path
|
||||
if logger:
|
||||
logger.error(message)
|
||||
else:
|
||||
sys.stderr(message)
|
||||
sys.exit(1)
|
||||
|
||||
if os.path.exists(target) and not os.path.isdir(target):
|
||||
message = '%s exists but is not a directory.' % target
|
||||
if logger:
|
||||
logger.error(message)
|
||||
else:
|
||||
sys.stderr(message)
|
||||
sys.exit(1)
|
||||
|
||||
if not os.path.isdir(target):
|
||||
os.makedirs(target)
|
||||
elif force and clean:
|
||||
shutil.rmtree(target, onerror=whoops)
|
||||
os.makedirs(target)
|
||||
elif force:
|
||||
return
|
||||
else:
|
||||
message = 'Directory %s already exists. Use --force to overwrite.' % target
|
||||
if logger:
|
||||
logger.error(message)
|
||||
else:
|
||||
sys.stderr(message)
|
||||
sys.exit(1)
|
||||
45
src/pypungi/config.py
Normal file
45
src/pypungi/config.py
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
#!/usr/bin/python -tt
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
import os
|
||||
import time
|
||||
import yum
|
||||
|
||||
from ConfigParser import SafeConfigParser
|
||||
|
||||
class Config(SafeConfigParser):
|
||||
def __init__(self):
|
||||
SafeConfigParser.__init__(self)
|
||||
|
||||
self.add_section('default')
|
||||
|
||||
self.set('default', 'osdir', 'os')
|
||||
self.set('default', 'sourcedir', 'source')
|
||||
self.set('default', 'debugdir', 'debug')
|
||||
self.set('default', 'isodir', 'iso')
|
||||
self.set('default', 'relnotefilere', 'GPL README-BURNING-ISOS-en_US.txt ^RPM-GPG')
|
||||
self.set('default', 'relnotedirre', '')
|
||||
self.set('default', 'relnotepkgs', 'fedora-release fedora-release-notes')
|
||||
self.set('default', 'product_path', 'Packages')
|
||||
self.set('default', 'cachedir', '/var/cache/pungi')
|
||||
self.set('default', 'arch', yum.rpmUtils.arch.getBaseArch(os.uname()[4]))
|
||||
self.set('default', 'name', 'Fedora')
|
||||
self.set('default', 'iso_basename', 'Fedora')
|
||||
self.set('default', 'version', time.strftime('%Y%m%d', time.localtime()))
|
||||
self.set('default', 'flavor', '')
|
||||
self.set('default', 'destdir', os.getcwd())
|
||||
self.set('default', 'bugurl', 'http://bugzilla.redhat.com')
|
||||
self.set('default', 'discs', '1')
|
||||
self.set('default', 'cdsize', '670.0')
|
||||
|
||||
429
src/pypungi/gather.py
Executable file
429
src/pypungi/gather.py
Executable file
|
|
@ -0,0 +1,429 @@
|
|||
#!/usr/bin/python -tt
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
import yum
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import pypungi
|
||||
import logging
|
||||
import urlgrabber.progress
|
||||
|
||||
class CallBack(urlgrabber.progress.TextMeter):
|
||||
"""A call back function used with yum."""
|
||||
|
||||
def progressbar(self, current, total, name=None):
|
||||
return
|
||||
|
||||
class PungiYum(yum.YumBase):
|
||||
"""Subclass of Yum"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.pungiconfig = config
|
||||
yum.YumBase.__init__(self)
|
||||
|
||||
def doLoggingSetup(self, debuglevel, errorlevel):
|
||||
"""Setup the logging facility."""
|
||||
|
||||
logdir = os.path.join(self.pungiconfig.get('default', 'destdir'), 'logs')
|
||||
if not os.path.exists(logdir):
|
||||
os.makedirs(logdir)
|
||||
if self.pungiconfig.get('default', 'flavor'):
|
||||
logfile = os.path.join(logdir, '%s.%s.log' % (self.pungiconfig.get('default', 'flavor'),
|
||||
self.pungiconfig.get('default', 'arch')))
|
||||
else:
|
||||
logfile = os.path.join(logdir, '%s.log' % (self.pungiconfig.get('default', 'arch')))
|
||||
|
||||
yum.logging.basicConfig(level=yum.logging.DEBUG, filename=logfile)
|
||||
self.logger.error('foobar')
|
||||
|
||||
def doFileLogSetup(self, uid, logfile):
|
||||
# This function overrides a yum function, allowing pungi to control
|
||||
# the logging.
|
||||
pass
|
||||
|
||||
class Gather(pypungi.PungiBase):
|
||||
def __init__(self, config, ksparser):
|
||||
pypungi.PungiBase.__init__(self, config)
|
||||
|
||||
# Set our own logging name space
|
||||
self.logger = logging.getLogger('Pungi.Gather')
|
||||
|
||||
# Create the stdout/err streams and only send INFO+ stuff there
|
||||
formatter = logging.Formatter('%(name)s:%(levelname)s: %(message)s')
|
||||
console = logging.StreamHandler()
|
||||
console.setFormatter(formatter)
|
||||
console.setLevel(logging.INFO)
|
||||
self.logger.addHandler(console)
|
||||
|
||||
self.ksparser = ksparser
|
||||
self.polist = []
|
||||
self.srpmlist = []
|
||||
self.resolved_deps = {} # list the deps we've already resolved, short circuit.
|
||||
|
||||
# Create a yum object to use
|
||||
self.ayum = PungiYum(config)
|
||||
self.ayum.doLoggingSetup(6, 6)
|
||||
yumconf = yum.config.YumConf()
|
||||
yumconf.debuglevel = 6
|
||||
yumconf.errorlevel = 6
|
||||
yumconf.cachedir = self.config.get('default', 'cachedir')
|
||||
yumconf.persistdir = os.path.join(self.workdir, 'yumlib')
|
||||
yumconf.installroot = os.path.join(self.workdir, 'yumroot')
|
||||
yumconf.uid = os.geteuid()
|
||||
yumconf.cache = 0
|
||||
yumconf.failovermethod = 'priority'
|
||||
yumvars = yum.config._getEnvVar()
|
||||
yumvars['releasever'] = self.config.get('default', 'version')
|
||||
yumvars['basearch'] = yum.rpmUtils.arch.getBaseArch(myarch=self.config.get('default', 'arch'))
|
||||
yumconf.yumvar = yumvars
|
||||
self.ayum._conf = yumconf
|
||||
self.ayum.repos.setCacheDir(self.ayum.conf.cachedir)
|
||||
|
||||
arch = self.config.get('default', 'arch')
|
||||
if arch == 'i386':
|
||||
yumarch = 'athlon'
|
||||
elif arch == 'ppc':
|
||||
yumarch = 'ppc64'
|
||||
elif arch == 'sparc':
|
||||
yumarch = 'sparc64v'
|
||||
else:
|
||||
yumarch = arch
|
||||
|
||||
self.ayum.compatarch = yumarch
|
||||
arches = yum.rpmUtils.arch.getArchList(yumarch)
|
||||
arches.append('src') # throw source in there, filter it later
|
||||
|
||||
# deal with our repos
|
||||
try:
|
||||
ksparser.handler.repo.methodToRepo()
|
||||
except:
|
||||
pass
|
||||
|
||||
for repo in ksparser.handler.repo.repoList:
|
||||
self.logger.info('Adding repo %s' % repo.name)
|
||||
thisrepo = yum.yumRepo.YumRepository(repo.name)
|
||||
thisrepo.name = repo.name
|
||||
# add excludes and such here when pykickstart gets them
|
||||
if repo.mirrorlist:
|
||||
thisrepo.mirrorlist = yum.parser.varReplace(repo.mirrorlist, self.ayum.conf.yumvar)
|
||||
self.logger.info('Mirrorlist for repo %s is %s' % (thisrepo.name, thisrepo.mirrorlist))
|
||||
else:
|
||||
thisrepo.baseurl = yum.parser.varReplace(repo.baseurl, self.ayum.conf.yumvar)
|
||||
self.logger.info('URL for repo %s is %s' % (thisrepo.name, thisrepo.baseurl))
|
||||
thisrepo.basecachedir = self.ayum.conf.cachedir
|
||||
thisrepo.enablegroups = True
|
||||
thisrepo.failovermethod = 'priority' # This is until yum uses this failover by default
|
||||
thisrepo.exclude = repo.excludepkgs
|
||||
thisrepo.includepkgs = repo.includepkgs
|
||||
self.ayum.repos.add(thisrepo)
|
||||
self.ayum.repos.enableRepo(thisrepo.id)
|
||||
self.ayum._getRepos(thisrepo=thisrepo.id, doSetup = True)
|
||||
|
||||
self.ayum.repos.setProgressBar(CallBack())
|
||||
self.ayum.repos.callback = CallBack()
|
||||
|
||||
self.ayum.cleanMetadata() # clean metadata that might be in the cache from previous runs
|
||||
self.ayum.cleanSqlite() # clean metadata that might be in the cache from previous runs
|
||||
|
||||
self.logger.info('Getting sacks for arches %s' % arches)
|
||||
self.ayum._getSacks(archlist=arches)
|
||||
|
||||
def _filtersrc(self, po):
|
||||
"""Filter out package objects that are of 'src' arch."""
|
||||
|
||||
if po.arch == 'src':
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def verifyCachePkg(self, po, path): # Stolen from yum
|
||||
"""check the package checksum vs the cache
|
||||
return True if pkg is good, False if not"""
|
||||
|
||||
(csum_type, csum) = po.returnIdSum()
|
||||
|
||||
try:
|
||||
filesum = yum.misc.checksum(csum_type, path)
|
||||
except yum.Errors.MiscError:
|
||||
return False
|
||||
|
||||
if filesum != csum:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def getPackageDeps(self, po):
|
||||
"""Add the dependencies for a given package to the
|
||||
transaction info"""
|
||||
|
||||
self.logger.info('Checking deps of %s.%s' % (po.name, po.arch))
|
||||
|
||||
reqs = po.requires
|
||||
provs = po.provides
|
||||
|
||||
for req in reqs:
|
||||
if self.resolved_deps.has_key(req):
|
||||
continue
|
||||
(r,f,v) = req
|
||||
if r.startswith('rpmlib(') or r.startswith('config('):
|
||||
continue
|
||||
if req in provs:
|
||||
continue
|
||||
|
||||
deps = self.ayum.whatProvides(r, f, v).returnPackages()
|
||||
if not deps:
|
||||
self.logger.warn("Unresolvable dependency %s in %s.%s" % (r, po.name, po.arch))
|
||||
continue
|
||||
|
||||
depsack = yum.packageSack.ListPackageSack(deps)
|
||||
|
||||
for dep in depsack.returnNewestByNameArch():
|
||||
self.ayum.tsInfo.addInstall(dep)
|
||||
self.logger.info('Added %s.%s for %s.%s' % (dep.name, dep.arch, po.name, po.arch))
|
||||
|
||||
self.resolved_deps[req] = None
|
||||
|
||||
def getPackagesFromGroup(self, group):
|
||||
"""Get a list of package names from a ksparser group object
|
||||
|
||||
Returns a list of package names"""
|
||||
|
||||
packages = []
|
||||
|
||||
# Check if we have the group
|
||||
if not self.ayum.comps.has_group(group.name):
|
||||
self.logger.error("Group %s not found in comps!" % group)
|
||||
return packages
|
||||
|
||||
# Get the group object to work with
|
||||
groupobj = self.ayum.comps.return_group(group.name)
|
||||
|
||||
# Add the mandatory packages
|
||||
packages.extend(groupobj.mandatory_packages.keys())
|
||||
|
||||
# Add the default packages unless we don't want them
|
||||
if group.include == 1:
|
||||
packages.extend(groupobj.default_packages.keys())
|
||||
|
||||
# Add the optional packages if we want them
|
||||
if group.include == 2:
|
||||
packages.extend(groupobj.default_packages.keys())
|
||||
packages.extend(groupobj.optional_packages.keys())
|
||||
|
||||
# Deal with conditional packages
|
||||
# Populate a dict with the name of the required package and value
|
||||
# of the package objects it would bring in. To be used later if
|
||||
# we match the conditional.
|
||||
for condreq, cond in groupobj.conditional_packages.iteritems():
|
||||
pkgs = self.ayum.pkgSack.searchNevra(name=condreq)
|
||||
if pkgs:
|
||||
pkgs = self.ayum.bestPackagesFromList(pkgs, arch=self.ayum.compatarch)
|
||||
if self.ayum.tsInfo.conditionals.has_key(cond):
|
||||
self.ayum.tsInfo.conditionals[cond].extend(pkgs)
|
||||
else:
|
||||
self.ayum.tsInfo.conditionals[cond] = pkgs
|
||||
|
||||
return packages
|
||||
|
||||
def getPackageObjects(self):
|
||||
"""Cycle through the list of packages, get package object
|
||||
matches, and resolve deps.
|
||||
|
||||
Returns a list of package objects"""
|
||||
|
||||
final_pkgobjs = {} # The final list of package objects
|
||||
searchlist = [] # The list of package names/globs to search for
|
||||
matchdict = {} # A dict of objects to names
|
||||
|
||||
# First remove the excludes
|
||||
self.ayum.conf.exclude.extend(self.ksparser.handler.packages.excludedList)
|
||||
self.ayum.excludePackages()
|
||||
|
||||
# Check to see if we need the base group
|
||||
if self.ksparser.handler.packages.addBase:
|
||||
self.ksparser.handler.packages.add(['@base'])
|
||||
|
||||
# Get a list of packages from groups
|
||||
for group in self.ksparser.handler.packages.groupList:
|
||||
searchlist.extend(self.getPackagesFromGroup(group))
|
||||
|
||||
# Add the adds
|
||||
searchlist.extend(self.ksparser.handler.packages.packageList)
|
||||
|
||||
# Make the search list unique
|
||||
searchlist = yum.misc.unique(searchlist)
|
||||
|
||||
# Search repos for things in our searchlist, supports globs
|
||||
(exactmatched, matched, unmatched) = yum.packages.parsePackages(self.ayum.pkgSack.returnPackages(), searchlist, casematch=1)
|
||||
matches = filter(self._filtersrc, exactmatched + matched)
|
||||
|
||||
# Populate a dict of package objects to their names
|
||||
for match in matches:
|
||||
matchdict[match.name] = match
|
||||
|
||||
# Get the newest results from the search
|
||||
mysack = yum.packageSack.ListPackageSack(matches)
|
||||
for match in mysack.returnNewestByNameArch():
|
||||
self.ayum.tsInfo.addInstall(match)
|
||||
self.logger.debug('Found %s.%s' % (match.name, match.arch))
|
||||
|
||||
for pkg in unmatched:
|
||||
if not pkg in matchdict.keys():
|
||||
self.logger.warn('Could not find a match for %s in any configured repo' % pkg)
|
||||
|
||||
if len(self.ayum.tsInfo) == 0:
|
||||
raise yum.Errors.MiscError, 'No packages found to download.'
|
||||
|
||||
moretoprocess = True
|
||||
while moretoprocess: # Our fun loop
|
||||
moretoprocess = False
|
||||
for txmbr in self.ayum.tsInfo:
|
||||
if not final_pkgobjs.has_key(txmbr.po):
|
||||
final_pkgobjs[txmbr.po] = None # Add the pkg to our final list
|
||||
self.getPackageDeps(txmbr.po) # Get the deps of our package
|
||||
moretoprocess = True
|
||||
|
||||
self.polist = final_pkgobjs.keys()
|
||||
self.logger.info('Finished gathering package objects.')
|
||||
|
||||
def getSRPMList(self):
|
||||
"""Cycle through the list of package objects and
|
||||
find the sourcerpm for them. Requires yum still
|
||||
configured and a list of package objects"""
|
||||
|
||||
for po in self.polist:
|
||||
srpm = po.sourcerpm.split('.src.rpm')[0]
|
||||
if not srpm in self.srpmlist:
|
||||
self.srpmlist.append(srpm)
|
||||
|
||||
def _downloadPackageList(self, polist, relpkgdir):
|
||||
"""Cycle through the list of package objects and
|
||||
download them from their respective repos."""
|
||||
|
||||
downloads = []
|
||||
for pkg in polist:
|
||||
downloads.append('%s.%s' % (pkg.name, pkg.arch))
|
||||
downloads.sort()
|
||||
self.logger.info("Download list: %s" % downloads)
|
||||
|
||||
pkgdir = os.path.join(self.config.get('default', 'destdir'),
|
||||
self.config.get('default', 'version'),
|
||||
self.config.get('default', 'flavor'),
|
||||
relpkgdir)
|
||||
|
||||
# Ensure the pkgdir exists, force if requested, and make sure we clean it out
|
||||
pypungi._ensuredir(pkgdir, self.logger, force=self.config.getboolean('default', 'force'), clean=True)
|
||||
|
||||
probs = self.ayum.downloadPkgs(polist)
|
||||
|
||||
if len(probs.keys()) > 0:
|
||||
self.logger.error("Errors were encountered while downloading packages.")
|
||||
for key in probs.keys():
|
||||
errors = yum.misc.unique(probs[key])
|
||||
for error in errors:
|
||||
self.logger.error("%s: %s" % (key, error))
|
||||
sys.exit(1)
|
||||
|
||||
for po in polist:
|
||||
basename = os.path.basename(po.relativepath)
|
||||
|
||||
local = po.localPkg()
|
||||
target = os.path.join(pkgdir, basename)
|
||||
|
||||
# Link downloaded package in (or link package from file repo)
|
||||
try:
|
||||
pypungi._link(local, target, self.logger, force=True)
|
||||
continue
|
||||
except:
|
||||
self.logger.error("Unable to link %s from the yum cache." % po.name)
|
||||
sys.exit(1)
|
||||
|
||||
self.logger.info('Finished downloading packages.')
|
||||
|
||||
def downloadPackages(self):
|
||||
"""Download the package objects obtained in getPackageObjects()."""
|
||||
|
||||
self._downloadPackageList(self.polist,
|
||||
os.path.join(self.config.get('default', 'arch'),
|
||||
self.config.get('default', 'osdir'),
|
||||
self.config.get('default', 'product_path')))
|
||||
|
||||
def makeCompsFile(self):
|
||||
"""Gather any comps files we can from repos and merge them into one."""
|
||||
|
||||
# get our list of repos
|
||||
repos = self.ayum.repos.repos.values()
|
||||
|
||||
compsstub = '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE comps PUBLIC "-//Red Hat, Inc.//DTD Comps info//EN" "comps.dtd">\n<comps>\n'
|
||||
|
||||
closestub = '\n</comps>\n'
|
||||
|
||||
ourcompspath = os.path.join(self.workdir, '%s-%s-comps.xml' % (self.config.get('default', 'name'), self.config.get('default', 'version')))
|
||||
|
||||
ourcomps = open(ourcompspath, 'w')
|
||||
|
||||
ourcomps.write(compsstub)
|
||||
|
||||
# iterate through the list and get what comps we can.
|
||||
# Strip the first three lines and the last line of substance off
|
||||
# once done, write it to our comps file
|
||||
for repo in repos:
|
||||
try:
|
||||
groupfile = repo.getGroups()
|
||||
except yum.Errors.RepoMDError, e:
|
||||
self.logger.warn("No group data found for %s" % repo.id)
|
||||
pass
|
||||
else:
|
||||
compslines = open(groupfile, 'r').readlines()
|
||||
for line in compslines:
|
||||
if line.startswith('</comps>'):
|
||||
end = compslines.index(line)
|
||||
|
||||
for line in compslines:
|
||||
if line.startswith('<comps>'):
|
||||
start = compslines.index(line) + 1
|
||||
|
||||
ourcomps.writelines(compslines[start:end])
|
||||
|
||||
ourcomps.write(closestub)
|
||||
ourcomps.close()
|
||||
|
||||
# Run the xslt filter over our comps file
|
||||
compsfilter = ['/usr/bin/xsltproc', '--novalid']
|
||||
compsfilter.append('-o')
|
||||
compsfilter.append(ourcompspath)
|
||||
compsfilter.append('/usr/share/pungi/comps-cleanup.xsl')
|
||||
compsfilter.append(ourcompspath)
|
||||
|
||||
pypungi._doRunCommand(compsfilter, self.logger)
|
||||
|
||||
def downloadSRPMs(self):
|
||||
"""Cycle through the list of srpms and
|
||||
find the package objects for them, Then download them."""
|
||||
|
||||
srpmpolist = []
|
||||
|
||||
for srpm in self.srpmlist:
|
||||
(sname, sver, srel) = srpm.rsplit('-', 2)
|
||||
try:
|
||||
srpmpo = self.ayum.pkgSack.searchNevra(name=sname, ver=sver, rel=srel, arch='src')[0]
|
||||
if not srpmpo in srpmpolist:
|
||||
srpmpolist.append(srpmpo)
|
||||
except IndexError:
|
||||
print >> sys.stderr, "Error: Cannot find a source rpm for %s" % srpm
|
||||
sys.exit(1)
|
||||
|
||||
# do the downloads
|
||||
self._downloadPackageList(srpmpolist, os.path.join('source', 'SRPMS'))
|
||||
604
src/pypungi/pungi.py
Executable file
604
src/pypungi/pungi.py
Executable file
|
|
@ -0,0 +1,604 @@
|
|||
#!/usr/bin/python -tt
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Library General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
import subprocess
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
sys.path.append('/usr/lib/anaconda-runtime')
|
||||
import splittree
|
||||
import shutil
|
||||
import re
|
||||
import pypungi
|
||||
|
||||
class Pungi(pypungi.PungiBase):
|
||||
def __init__(self, config):
|
||||
pypungi.PungiBase.__init__(self, config)
|
||||
|
||||
self.logger = logging.getLogger('Pungi.Pungi')
|
||||
|
||||
# Create the stdout/err streams and only send INFO+ stuff there
|
||||
formatter = logging.Formatter('%(name)s:%(levelname)s: %(message)s')
|
||||
console = logging.StreamHandler()
|
||||
console.setFormatter(formatter)
|
||||
console.setLevel(logging.INFO)
|
||||
self.logger.addHandler(console)
|
||||
|
||||
self.destdir = self.config.get('default', 'destdir')
|
||||
self.archdir = os.path.join(self.destdir,
|
||||
self.config.get('default', 'version'),
|
||||
self.config.get('default', 'flavor'),
|
||||
self.config.get('default', 'arch'))
|
||||
|
||||
self.topdir = os.path.join(self.archdir, 'os')
|
||||
self.isodir = os.path.join(self.archdir, self.config.get('default','isodir'))
|
||||
|
||||
pypungi._ensuredir(self.workdir, self.logger, force=True)
|
||||
|
||||
self.common_files = []
|
||||
self.infofile = os.path.join(self.config.get('default', 'destdir'),
|
||||
self.config.get('default', 'version'),
|
||||
'.composeinfo')
|
||||
|
||||
def writeinfo(self, line):
|
||||
"""Append a line to the infofile in self.infofile"""
|
||||
|
||||
|
||||
f=open(self.infofile, "a+")
|
||||
f.write(line.strip() + "\n")
|
||||
f.close()
|
||||
|
||||
def mkrelative(self, subfile):
|
||||
"""Return the relative path for 'subfile' underneath the version dir."""
|
||||
|
||||
basedir = os.path.join(self.destdir, self.config.get('default', 'version'))
|
||||
if subfile.startswith(basedir):
|
||||
return subfile.replace(basedir + os.path.sep, '')
|
||||
|
||||
def doCreaterepo(self):
|
||||
"""Run createrepo to generate repodata in the tree."""
|
||||
|
||||
|
||||
compsfile = os.path.join(self.workdir, '%s-%s-comps.xml' % (self.config.get('default', 'name'), self.config.get('default', 'version')))
|
||||
|
||||
# setup the cache dirs
|
||||
for target in ['createrepocache', 'repoviewcache']:
|
||||
pypungi._ensuredir(os.path.join(self.config.get('default', 'cachedir'),
|
||||
target),
|
||||
self.logger,
|
||||
force=True)
|
||||
|
||||
# setup the createrepo call
|
||||
createrepo = ['/usr/bin/createrepo']
|
||||
createrepo.append('--quiet')
|
||||
createrepo.append('--database')
|
||||
|
||||
createrepo.append('--groupfile')
|
||||
createrepo.append(compsfile)
|
||||
|
||||
createrepo.append('--cachedir')
|
||||
createrepo.append(os.path.join(self.config.get('default', 'cachedir'),
|
||||
'createrepocache'))
|
||||
|
||||
createrepo.append(self.topdir)
|
||||
|
||||
# run the command
|
||||
pypungi._doRunCommand(createrepo, self.logger)
|
||||
|
||||
# setup the repoview call
|
||||
repoview = ['/usr/bin/repoview']
|
||||
repoview.append('--quiet')
|
||||
|
||||
repoview.append('--state-dir')
|
||||
repoview.append(os.path.join(self.config.get('default', 'cachedir'),
|
||||
'repoviewcache'))
|
||||
|
||||
repoview.append('--title')
|
||||
if self.config.get('default', 'flavor'):
|
||||
repoview.append('%s %s: %s - %s' % (self.config.get('default', 'name'),
|
||||
self.config.get('default', 'version'),
|
||||
self.config.get('default', 'flavor'),
|
||||
self.config.get('default', 'arch')))
|
||||
else:
|
||||
repoview.append('%s %s - %s' % (self.config.get('default', 'name'),
|
||||
self.config.get('default', 'version'),
|
||||
self.config.get('default', 'arch')))
|
||||
|
||||
repoview.append(self.topdir)
|
||||
|
||||
# run the command
|
||||
pypungi._doRunCommand(repoview, self.logger)
|
||||
|
||||
|
||||
def doBuildinstall(self):
|
||||
"""Run anaconda-runtime's buildinstall on the tree."""
|
||||
|
||||
|
||||
# setup the buildinstall call
|
||||
buildinstall = ['/usr/lib/anaconda-runtime/buildinstall']
|
||||
#buildinstall.append('TMPDIR=%s' % self.workdir) # TMPDIR broken in buildinstall
|
||||
|
||||
buildinstall.append('--product')
|
||||
buildinstall.append(self.config.get('default', 'name'))
|
||||
|
||||
if not self.config.get('default', 'flavor') == "":
|
||||
buildinstall.append('--variant')
|
||||
buildinstall.append(self.config.get('default', 'flavor'))
|
||||
|
||||
buildinstall.append('--version')
|
||||
buildinstall.append(self.config.get('default', 'version'))
|
||||
|
||||
buildinstall.append('--release')
|
||||
buildinstall.append('%s %s' % (self.config.get('default', 'name'), self.config.get('default', 'version')))
|
||||
|
||||
buildinstall.append('--prodpath')
|
||||
buildinstall.append(self.config.get('default', 'product_path'))
|
||||
|
||||
if self.config.has_option('default', 'bugurl'):
|
||||
buildinstall.append('--bugurl')
|
||||
buildinstall.append(self.config.get('default', 'bugurl'))
|
||||
|
||||
buildinstall.append(self.topdir)
|
||||
|
||||
# run the command
|
||||
# TMPDIR is still broken with buildinstall.
|
||||
pypungi._doRunCommand(buildinstall, self.logger) #, env={"TMPDIR": self.workdir})
|
||||
|
||||
# write out the tree data for snake
|
||||
self.writeinfo('tree: %s' % self.mkrelative(self.topdir))
|
||||
|
||||
def doPackageorder(self):
|
||||
"""Run anaconda-runtime's pkgorder on the tree, used for splitting media."""
|
||||
|
||||
|
||||
pkgorderfile = open(os.path.join(self.workdir, 'pkgorder-%s' % self.config.get('default', 'arch')), 'w')
|
||||
# setup the command
|
||||
pkgorder = ['/usr/lib/anaconda-runtime/pkgorder']
|
||||
#pkgorder.append('TMPDIR=%s' % self.workdir)
|
||||
pkgorder.append(self.topdir)
|
||||
pkgorder.append(self.config.get('default', 'arch'))
|
||||
pkgorder.append(self.config.get('default', 'product_path'))
|
||||
|
||||
# run the command
|
||||
pypungi._doRunCommand(pkgorder, self.logger, output=pkgorderfile)
|
||||
pkgorderfile.close()
|
||||
|
||||
def doGetRelnotes(self):
|
||||
"""Get extra files from packages in the tree to put in the topdir of
|
||||
the tree."""
|
||||
|
||||
|
||||
docsdir = os.path.join(self.workdir, 'docs')
|
||||
relnoterpms = self.config.get('default', 'relnotepkgs').split()
|
||||
|
||||
fileres = []
|
||||
for pattern in self.config.get('default', 'relnotefilere').split():
|
||||
fileres.append(re.compile(pattern))
|
||||
|
||||
dirres = []
|
||||
for pattern in self.config.get('default', 'relnotedirre').split():
|
||||
dirres.append(re.compile(pattern))
|
||||
|
||||
pypungi._ensuredir(docsdir, self.logger, force=self.config.getboolean('default', 'force'), clean=True)
|
||||
|
||||
# Expload the packages we list as relnote packages
|
||||
pkgs = os.listdir(os.path.join(self.topdir, self.config.get('default', 'product_path')))
|
||||
|
||||
rpm2cpio = ['/usr/bin/rpm2cpio']
|
||||
cpio = ['cpio', '-imud']
|
||||
|
||||
for pkg in pkgs:
|
||||
pkgname = pkg.rsplit('-', 2)[0]
|
||||
for relnoterpm in relnoterpms:
|
||||
if pkgname == relnoterpm:
|
||||
extraargs = [os.path.join(self.topdir, self.config.get('default', 'product_path'), pkg)]
|
||||
try:
|
||||
p1 = subprocess.Popen(rpm2cpio + extraargs, cwd=docsdir, stdout=subprocess.PIPE)
|
||||
(out, err) = subprocess.Popen(cpio, cwd=docsdir, stdin=p1.stdout, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, universal_newlines=True).communicate()
|
||||
except:
|
||||
self.logger.error("Got an error from rpm2cpio")
|
||||
self.logger.error(err)
|
||||
raise
|
||||
|
||||
if out:
|
||||
self.logger.debug(out)
|
||||
|
||||
# Walk the tree for our files
|
||||
for dirpath, dirname, filelist in os.walk(docsdir):
|
||||
for filename in filelist:
|
||||
for regex in fileres:
|
||||
if regex.match(filename) and not os.path.exists(os.path.join(self.topdir, filename)):
|
||||
self.logger.info("Linking release note file %s" % filename)
|
||||
pypungi._link(os.path.join(dirpath, filename), os.path.join(self.topdir, filename), self.logger)
|
||||
self.common_files.append(filename)
|
||||
|
||||
# Walk the tree for our dirs
|
||||
for dirpath, dirname, filelist in os.walk(docsdir):
|
||||
for directory in dirname:
|
||||
for regex in dirres:
|
||||
if regex.match(directory) and not os.path.exists(os.path.join(self.topdir, directory)):
|
||||
self.logger.info("Copying release note dir %s" % directory)
|
||||
shutil.copytree(os.path.join(dirpath, directory), os.path.join(self.topdir, directory))
|
||||
|
||||
|
||||
def doSplittree(self):
|
||||
"""Use anaconda-runtime's splittree to split the tree into appropriate
|
||||
sized chunks."""
|
||||
|
||||
|
||||
timber = splittree.Timber()
|
||||
timber.arch = self.config.get('default', 'arch')
|
||||
timber.target_size = self.config.getfloat('default', 'cdsize') * 1024 * 1024
|
||||
timber.total_discs = self.config.getint('default', 'discs')
|
||||
timber.bin_discs = self.config.getint('default', 'discs')
|
||||
timber.src_discs = 0
|
||||
timber.release_str = '%s %s' % (self.config.get('default', 'name'), self.config.get('default', 'version'))
|
||||
timber.package_order_file = os.path.join(self.workdir, 'pkgorder-%s' % self.config.get('default', 'arch'))
|
||||
timber.dist_dir = self.topdir
|
||||
timber.src_dir = os.path.join(self.config.get('default', 'destdir'), self.config.get('default', 'version'), 'source', 'SRPMS')
|
||||
timber.product_path = self.config.get('default', 'product_path')
|
||||
timber.common_files = self.common_files
|
||||
#timber.reserve_size =
|
||||
|
||||
self.logger.info("Running splittree.")
|
||||
|
||||
output = timber.main()
|
||||
if output:
|
||||
self.logger.debug("Output from splittree: %s" % '\n'.join(output))
|
||||
|
||||
def doSplitSRPMs(self):
|
||||
"""Use anaconda-runtime's splittree to split the srpms into appropriate
|
||||
sized chunks."""
|
||||
|
||||
|
||||
timber = splittree.Timber()
|
||||
timber.arch = self.config.get('default', 'arch')
|
||||
#timber.total_discs = self.config.getint('default', 'discs')
|
||||
#timber.bin_discs = self.config.getint('default', 'discs')
|
||||
timber.src_discs = self.config.getint('default', 'discs')
|
||||
#timber.release_str = '%s %s' % (self.config.get('default', 'name'), self.config.get('default', 'version'))
|
||||
#timber.package_order_file = os.path.join(self.config.get('default', 'destdir'), 'pkgorder-%s' % self.config.get('default', 'arch'))
|
||||
timber.dist_dir = os.path.join(self.config.get('default', 'destdir'),
|
||||
self.config.get('default', 'version'),
|
||||
self.config.get('default', 'flavor'),
|
||||
'source', 'SRPM')
|
||||
timber.src_dir = os.path.join(self.config.get('default', 'destdir'),
|
||||
self.config.get('default', 'version'),
|
||||
self.config.get('default', 'flavor'),
|
||||
'source', 'SRPMS')
|
||||
#timber.product_path = self.config.get('default', 'product_path')
|
||||
#timber.reserve_size =
|
||||
# Set this ourselves, for creating our dirs ourselves
|
||||
timber.src_list = range(1, timber.src_discs + 1)
|
||||
|
||||
# this is stolen from splittree.py in anaconda-runtime. Blame them if its ugly (:
|
||||
for i in range(timber.src_list[0], timber.src_list[-1] + 1):
|
||||
pypungi._ensuredir('%s-disc%d/SRPMS' % (timper.dist_dir, i),
|
||||
self.logger,
|
||||
force=self.config.getboolean('default', 'force'),
|
||||
clean=True)
|
||||
timber.linkFiles(timber.dist_dir,
|
||||
"%s-disc%d" %(timber.dist_dir, i),
|
||||
timber.common_files)
|
||||
|
||||
self.logger.info("Splitting SRPMs")
|
||||
timber.splitSRPMS()
|
||||
self.logger.info("splitSRPMS complete")
|
||||
|
||||
def doCreateSplitrepo(self):
|
||||
"""Create the split metadata for the isos"""
|
||||
|
||||
|
||||
if self.config.getint('default', 'discs') > 1:
|
||||
discinfo = open('%s-disc1/.discinfo' % self.topdir, 'r').readlines()
|
||||
else:
|
||||
discinfo = open(os.path.join(self.topdir, '.discinfo'), 'r').readlines()
|
||||
mediaid = discinfo[0].rstrip('\n')
|
||||
|
||||
compsfile = os.path.join(self.workdir, '%s-%s-comps.xml' % (self.config.get('default', 'name'), self.config.get('default', 'version')))
|
||||
|
||||
# set up the process
|
||||
createrepo = ['/usr/bin/createrepo']
|
||||
createrepo.append('--quiet')
|
||||
createrepo.append('--database')
|
||||
|
||||
createrepo.append('--groupfile')
|
||||
createrepo.append(compsfile)
|
||||
|
||||
createrepo.append('--cachedir')
|
||||
createrepo.append(os.path.join(self.config.get('default', 'cachedir'),
|
||||
'createrepocache'))
|
||||
|
||||
createrepo.append('--baseurl')
|
||||
createrepo.append('media://%s' % mediaid)
|
||||
|
||||
createrepo.append('--outputdir')
|
||||
if self.config.getint('default', 'discs') == 1:
|
||||
pypungi._ensuredir('%s-disc1' % self.topdir, self.logger,
|
||||
force=self.config.getboolean('default', 'force'),
|
||||
clean=True) # rename this for single disc
|
||||
createrepo.append('%s-disc1' % self.topdir)
|
||||
|
||||
createrepo.append('--basedir')
|
||||
if self.config.getint('default', 'discs') == 1:
|
||||
createrepo.append(self.topdir)
|
||||
createrepo.append(self.topdir)
|
||||
else:
|
||||
createrepo.append('%s-disc1' % self.topdir)
|
||||
|
||||
if self.config.getint('default', 'discs') > 1:
|
||||
createrepo.append('--split')
|
||||
|
||||
for disc in range(1, self.config.getint('default', 'discs') + 1):
|
||||
createrepo.append('%s-disc%s' % (self.topdir, disc))
|
||||
|
||||
# run the command
|
||||
pypungi._doRunCommand(createrepo, self.logger)
|
||||
|
||||
# Write out a repo file for the disc to be used on the installed system
|
||||
self.logger.info('Creating media repo file.')
|
||||
repofile = open(os.path.join(self.topdir, 'media.repo'), 'w')
|
||||
repocontent = """[InstallMedia]
|
||||
name=%s %s
|
||||
mediaid=%s
|
||||
metadata_expire=-1
|
||||
gpgcheck=0
|
||||
cost=500
|
||||
""" % (self.config.get('default', 'name'), self.config.get('default', 'version'), mediaid)
|
||||
|
||||
repofile.write(repocontent)
|
||||
repofile.close()
|
||||
|
||||
def doCreateIsos(self, split=True):
|
||||
"""Create isos of the tree, optionally splitting the tree for split media."""
|
||||
|
||||
|
||||
isolist=[]
|
||||
anaruntime = '/usr/lib/anaconda-runtime/boot'
|
||||
discinfofile = os.path.join(self.topdir, '.discinfo') # we use this a fair amount
|
||||
|
||||
pypungi._ensuredir(self.isodir, self.logger,
|
||||
force=self.config.getboolean('default', 'force'),
|
||||
clean=True) # This is risky...
|
||||
|
||||
# setup the base command
|
||||
mkisofs = ['/usr/bin/mkisofs']
|
||||
mkisofs.extend(['-v', '-U', '-J', '-R', '-T']) # common mkisofs flags
|
||||
|
||||
x86bootargs = ['-b', 'isolinux/isolinux.bin', '-c', 'isolinux/boot.cat',
|
||||
'-no-emul-boot', '-boot-load-size', '4', '-boot-info-table']
|
||||
|
||||
ia64bootargs = ['-b', 'images/boot.img', '-no-emul-boot']
|
||||
|
||||
ppcbootargs = ['-part', '-hfs', '-r', '-l', '-sysid', 'PPC', '-no-desktop', '-allow-multidot', '-chrp-boot']
|
||||
|
||||
ppcbootargs.append('-map')
|
||||
ppcbootargs.append(os.path.join(anaruntime, 'mapping'))
|
||||
|
||||
ppcbootargs.append('-magic')
|
||||
ppcbootargs.append(os.path.join(anaruntime, 'magic'))
|
||||
|
||||
ppcbootargs.append('-hfs-bless') # must be last
|
||||
|
||||
sparcbootargs = ['-G', '/boot/isofs.b', '-B', '...', '-s', '/boot/silo.conf', '-sparc-label', '"sparc"']
|
||||
|
||||
# Check the size of the tree
|
||||
# This size checking method may be bunk, accepting patches...
|
||||
if not self.config.get('default', 'arch') == 'source':
|
||||
treesize = int(subprocess.Popen(mkisofs + ['-print-size', '-quiet', self.topdir], stdout=subprocess.PIPE).communicate()[0])
|
||||
else:
|
||||
srcdir = os.path.join(self.config.get('default', 'destdir'), self.config.get('default', 'version'), 'source', 'SRPMS')
|
||||
|
||||
treesize = int(subprocess.Popen(mkisofs + ['-print-size', '-quiet', srcdir], stdout=subprocess.PIPE).communicate()[0])
|
||||
# Size returned is 2KiB clusters or some such. This translates that to MiB.
|
||||
treesize = treesize * 2048 / 1024 / 1024
|
||||
|
||||
cdsize = self.config.getfloat('default', 'cdsize')
|
||||
|
||||
# Do some math to figure out how many discs we'd need
|
||||
if treesize < cdsize or not split:
|
||||
self.config.set('default', 'discs', '1')
|
||||
else:
|
||||
discs = int(treesize / cdsize + 1)
|
||||
self.config.set('default', 'discs', str(discs))
|
||||
if self.config.get('default', 'arch') == 'source':
|
||||
self.doSplitSRPMs()
|
||||
else:
|
||||
self.doPackageorder()
|
||||
self.doSplittree()
|
||||
|
||||
if not self.config.get('default', 'arch') == 'source':
|
||||
self.doCreateSplitrepo()
|
||||
|
||||
if treesize > 700: # we're larger than a 700meg CD
|
||||
isoname = '%s-%s-%s-DVD.iso' % (self.config.get('default', 'iso_basename'), self.config.get('default', 'version'),
|
||||
self.config.get('default', 'arch'))
|
||||
else:
|
||||
isoname = '%s-%s-%s.iso' % (self.config.get('default', 'iso_basename'), self.config.get('default', 'version'),
|
||||
self.config.get('default', 'arch'))
|
||||
|
||||
isofile = os.path.join(self.isodir, isoname)
|
||||
|
||||
if not self.config.get('default', 'arch') == 'source':
|
||||
# backup the main .discinfo to use a split one. This is an ugly hack :/
|
||||
content = open(discinfofile, 'r').readlines()
|
||||
shutil.move(discinfofile, os.path.join(self.config.get('default', 'destdir'),
|
||||
'.discinfo-%s' % self.config.get('default', 'arch')))
|
||||
content[content.index('ALL\n')] = ','.join([str(x) for x in range(1, self.config.getint('default', 'discs') + 1)]) + '\n'
|
||||
open(discinfofile, 'w').writelines(content)
|
||||
|
||||
# move the main repodata out of the way to use the split repodata
|
||||
shutil.move(os.path.join(self.topdir, 'repodata'), os.path.join(self.config.get('default', 'destdir'),
|
||||
'repodata-%s' % self.config.get('default', 'arch')))
|
||||
shutil.copytree('%s-disc1/repodata' % self.topdir, os.path.join(self.topdir, 'repodata'))
|
||||
|
||||
# setup the extra mkisofs args
|
||||
extraargs = []
|
||||
|
||||
if self.config.get('default', 'arch') == 'i386' or self.config.get('default', 'arch') == 'x86_64':
|
||||
extraargs.extend(x86bootargs)
|
||||
elif self.config.get('default', 'arch') == 'ia64':
|
||||
extraargs.extend(ia64bootargs)
|
||||
elif self.config.get('default', 'arch') == 'ppc':
|
||||
extraargs.extend(ppcbootargs)
|
||||
if self.config.getint('default', 'discs') == 1:
|
||||
extraargs.append(os.path.join(self.topdir, "ppc/mac")) # this may work for both cases.. test
|
||||
else:
|
||||
extraargs.append(os.path.join('%s-disc%s' % (self.topdir, disc), "ppc/mac"))
|
||||
elif self.config.get('default', 'arch') == 'sparc':
|
||||
extraargs.extend(sparcbootargs)
|
||||
|
||||
extraargs.append('-V')
|
||||
if treesize > 700:
|
||||
extraargs.append('%s %s %s DVD' % (self.config.get('default', 'name'),
|
||||
self.config.get('default', 'version'), self.config.get('default', 'arch')))
|
||||
else:
|
||||
extraargs.append('%s %s %s' % (self.config.get('default', 'name'),
|
||||
self.config.get('default', 'version'), self.config.get('default', 'arch')))
|
||||
|
||||
extraargs.append('-o')
|
||||
extraargs.append(isofile)
|
||||
|
||||
if not self.config.get('default', 'arch') == 'source':
|
||||
extraargs.append(self.topdir)
|
||||
else:
|
||||
extraargs.append(os.path.join(self.archdir, 'SRPMS'))
|
||||
|
||||
# run the command
|
||||
pypungi._doRunCommand(mkisofs + extraargs, self.logger)
|
||||
|
||||
# implant md5 for mediacheck on all but source arches
|
||||
if not self.config.get('default', 'arch') == 'source':
|
||||
pypungi._doRunCommand(['/usr/bin/implantisomd5', isofile], self.logger)
|
||||
|
||||
# shove the sha1sum into a file
|
||||
sha1file = open(os.path.join(self.isodir, 'SHA1SUM'), 'a')
|
||||
pypungi._doRunCommand(['/usr/bin/sha1sum', isoname], self.logger, rundir=self.isodir, output=sha1file)
|
||||
sha1file.close()
|
||||
|
||||
# return the .discinfo file
|
||||
if not self.config.get('default', 'arch') == 'source':
|
||||
shutil.move(os.path.join(self.config.get('default', 'destdir'), '.discinfo-%s' % self.config.get('default', 'arch')), discinfofile)
|
||||
|
||||
shutil.rmtree(os.path.join(self.topdir, 'repodata')) # remove our copied repodata
|
||||
shutil.move(os.path.join(self.config.get('default', 'destdir'),
|
||||
'repodata-%s' % self.config.get('default', 'arch')), os.path.join(self.topdir, 'repodata'))
|
||||
|
||||
# Write out a line describing the media
|
||||
self.writeinfo('media: %s' % isofile)
|
||||
|
||||
if self.config.getint('default', 'discs') > 1:
|
||||
for disc in range(1, self.config.getint('default', 'discs') + 1): # cycle through the CD isos
|
||||
isoname = '%s-%s-%s-disc%s.iso' % (self.config.get('default', 'iso_basename'), self.config.get('default', 'version'),
|
||||
self.config.get('default', 'arch'), disc)
|
||||
isofile = os.path.join(self.isodir, isoname)
|
||||
|
||||
extraargs = []
|
||||
|
||||
if disc == 1: # if this is the first disc, we want to set boot flags
|
||||
if self.config.get('default', 'arch') == 'i386' or self.config.get('default', 'arch') == 'x86_64':
|
||||
extraargs.extend(x86bootargs)
|
||||
elif self.config.get('default', 'arch') == 'ia64':
|
||||
extraargs.extend(ia64bootargs)
|
||||
elif self.config.get('default', 'arch') == 'ppc':
|
||||
extraargs.extend(ppcbootargs)
|
||||
extraargs.append(os.path.join('%s-disc%s' % (self.topdir, disc), "ppc/mac"))
|
||||
elif self.config.get('default', 'arch') == 'sparc':
|
||||
extraargs.extend(sparcbootargs)
|
||||
|
||||
extraargs.append('-V')
|
||||
extraargs.append('%s %s %s Disc %s' % (self.config.get('default', 'name'),
|
||||
self.config.get('default', 'version'), self.config.get('default', 'arch'), disc))
|
||||
|
||||
extraargs.append('-o')
|
||||
extraargs.append(isofile)
|
||||
|
||||
extraargs.append(os.path.join('%s-disc%s' % (self.topdir, disc)))
|
||||
|
||||
# run the command
|
||||
pypungi._doRunCommand(mkisofs + extraargs, self.logger)
|
||||
|
||||
# implant md5 for mediacheck on all but source arches
|
||||
if not self.config.get('default', 'arch') == 'source':
|
||||
pypungi._doRunCommand(['/usr/bin/implantisomd5', isofile], self.logger)
|
||||
|
||||
# shove the sha1sum into a file
|
||||
sha1file = open(os.path.join(self.isodir, 'SHA1SUM'), 'a')
|
||||
pypungi._doRunCommand(['/usr/bin/sha1sum', isoname], self.logger, rundir=self.isodir, output=sha1file)
|
||||
sha1file.close()
|
||||
|
||||
# keep track of the CD images we've written
|
||||
isolist.append(self.mkrelative(isofile))
|
||||
|
||||
# Write out a line describing the CD set
|
||||
self.writeinfo('mediaset: %s' % ' '.join(isolist))
|
||||
|
||||
# Now make rescue images
|
||||
if not self.config.get('default', 'arch') == 'source' and \
|
||||
os.path.exists('/usr/lib/anaconda-runtime/mk-rescueimage.%s' % self.config.get('default', 'arch')):
|
||||
isoname = '%s-%s-%s-rescuecd.iso' % (self.config.get('default', 'iso_basename'),
|
||||
self.config.get('default', 'version'), self.config.get('default', 'arch'))
|
||||
isofile = os.path.join(self.isodir, isoname)
|
||||
|
||||
# make the rescue tree
|
||||
rescue = ['/usr/lib/anaconda-runtime/mk-rescueimage.%s' % self.config.get('default', 'arch')]
|
||||
rescue.append(self.topdir)
|
||||
rescue.append(self.workdir)
|
||||
rescue.append(self.config.get('default', 'iso_basename'))
|
||||
rescue.append(self.config.get('default', 'product_path'))
|
||||
|
||||
# run the command
|
||||
pypungi._doRunCommand(rescue, self.logger)
|
||||
|
||||
# write the iso
|
||||
extraargs = []
|
||||
|
||||
if self.config.get('default', 'arch') == 'i386' or self.config.get('default', 'arch') == 'x86_64':
|
||||
extraargs.extend(x86bootargs)
|
||||
elif self.config.get('default', 'arch') == 'ia64':
|
||||
extraargs.extend(ia64bootargs)
|
||||
elif self.config.get('default', 'arch') == 'ppc':
|
||||
extraargs.extend(ppcbootargs)
|
||||
extraargs.append(os.path.join(self.workdir, "%s-rescueimage" % self.config.get('default', 'arch'), "ppc/mac"))
|
||||
elif self.config.get('default', 'arch') == 'sparc':
|
||||
extraargs.extend(sparcbootargs)
|
||||
|
||||
extraargs.append('-V')
|
||||
extraargs.append('%s %s %s Rescue' % (self.config.get('default', 'name'),
|
||||
self.config.get('default', 'version'), self.config.get('default', 'arch')))
|
||||
|
||||
extraargs.append('-o')
|
||||
extraargs.append(isofile)
|
||||
|
||||
extraargs.append(os.path.join(self.workdir, "%s-rescueimage" % self.config.get('default', 'arch')))
|
||||
|
||||
# run the command
|
||||
pypungi._doRunCommand(mkisofs + extraargs, self.logger)
|
||||
|
||||
# shove the sha1sum into a file
|
||||
sha1file = open(os.path.join(self.isodir, 'SHA1SUM'), 'a')
|
||||
pypungi._doRunCommand(['/usr/bin/sha1sum', isoname], self.logger, rundir=self.isodir, output=sha1file)
|
||||
sha1file.close()
|
||||
|
||||
# Do some clean up
|
||||
dirs = os.listdir(self.archdir)
|
||||
|
||||
for directory in dirs:
|
||||
if directory.startswith('os-disc') or directory.startswith('SRPM-disc'):
|
||||
if os.path.exists(os.path.join(self.workdir, directory)):
|
||||
shutil.rmtree(os.path.join(self.workdir, directory))
|
||||
shutil.move(os.path.join(self.archdir, directory), os.path.join(self.workdir, directory))
|
||||
|
||||
self.logger.info("CreateIsos is done.")
|
||||
Loading…
Add table
Add a link
Reference in a new issue