Merge branch 'master' into mead
Conflicts: builder/kojid cli/koji hub/kojihub.py www/kojiweb/buildinfo.chtml www/kojiweb/builds.chtml www/kojiweb/index.py
This commit is contained in:
commit
f273899018
65 changed files with 3462 additions and 6470 deletions
40
util/koji-gc
40
util/koji-gc
|
|
@ -124,6 +124,8 @@ def get_options():
|
|||
help=_("Process only packages matching PATTERN"))
|
||||
parser.add_option("--bypass-locks", metavar="PATTERN", action="append",
|
||||
help=_("Bypass locks for tags matching PATTERN"))
|
||||
parser.add_option("--purge", action="store_true", default=False,
|
||||
help=_("When pruning, attempt to delete the builds that are untagged"))
|
||||
parser.add_option("--trashcan-tag", default='trashcan', metavar="TAG",
|
||||
help=_("specify an alternate trashcan tag"))
|
||||
parser.add_option("--weburl", default="http://localhost/koji", metavar="URL",
|
||||
|
|
@ -375,7 +377,7 @@ def activate_session(session):
|
|||
else:
|
||||
session.krb_login(proxyuser=options.runas)
|
||||
except krbV.Krb5Error, e:
|
||||
error(_("Kerberos authentication failed: '%s' (%s)") % (e.message, e.err_code))
|
||||
error(_("Kerberos authentication failed: %s (%s)") % (e.args[1], e.args[0]))
|
||||
except socket.error, e:
|
||||
warn(_("Could not connect to Kerberos authentication service: '%s'") % e.args[1])
|
||||
if not options.noauth and not session.logged_in:
|
||||
|
|
@ -808,7 +810,10 @@ def get_build_sigs(build):
|
|||
return keys.keys()
|
||||
|
||||
def handle_prune():
|
||||
"""Untag old builds according to policy"""
|
||||
"""Untag old builds according to policy
|
||||
|
||||
If purge is True, will also attempt to delete the pruned builds afterwards
|
||||
"""
|
||||
#read policy
|
||||
if not options.config or not options.config.has_option('prune', 'policy'):
|
||||
print "Skipping prune step. No policies available."
|
||||
|
|
@ -823,6 +828,7 @@ def handle_prune():
|
|||
#get tags
|
||||
tags = [(t['name'], t) for t in session.listTags()]
|
||||
tags.sort()
|
||||
untagged = {}
|
||||
for tagname, taginfo in tags:
|
||||
if tagname == options.trashcan_tag:
|
||||
if options.debug:
|
||||
|
|
@ -896,15 +902,45 @@ def handle_prune():
|
|||
if action == 'untag':
|
||||
if options.test:
|
||||
print "Would have untagged %s from %s" % (nvr, tagname)
|
||||
untagged[nvr] = entry
|
||||
else:
|
||||
print "Untagging build %s from %s" % (nvr, tagname)
|
||||
try:
|
||||
session.untagBuildBypass(taginfo['id'], entry['build_id'], force=bypass)
|
||||
untagged[nvr] = entry
|
||||
except (xmlrpclib.Fault, koji.GenericError), e:
|
||||
print "Warning: untag operation failed: %s" % e
|
||||
pass
|
||||
# if action == 'keep' do nothing
|
||||
if options.purge and untagged:
|
||||
print "Attempting to purge %i builds" % len(untagged)
|
||||
if options.test:
|
||||
# we didn't actually untag, so we can't easily check which builds
|
||||
# would have been deleted
|
||||
print "Test mode. Skipping deletes."
|
||||
untagged = {}
|
||||
for nvr in untagged:
|
||||
build_id = untagged[nvr]['build_id']
|
||||
tags = [t['name'] for t in session.listTags(build_id)]
|
||||
if tags:
|
||||
#still tagged somewhere
|
||||
print "Skipping %s, still tagged: %s" % (nvr, tags)
|
||||
continue
|
||||
keys = get_build_sigs(build_id)
|
||||
#yes, could cache from above, but this is safer.
|
||||
#build could have been signed during run.
|
||||
if protected_sig(keys):
|
||||
print "Skipping %s, signatures: %s" % (nvr, keys)
|
||||
continue
|
||||
|
||||
if not options.test:
|
||||
print "Deleting untagged build: %s" % nvr
|
||||
try:
|
||||
session.deleteBuild(build_id, strict=False)
|
||||
except (xmlrpclib.Fault, koji.GenericError), e:
|
||||
print "Warning: deletion failed: %s" % e
|
||||
#server issue
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
|
|
|
|||
199
util/koji-shadow
199
util/koji-shadow
|
|
@ -41,6 +41,7 @@ import time
|
|||
import urllib2
|
||||
import urlgrabber.grabber as grabber
|
||||
import xmlrpclib # for ProtocolError and Fault
|
||||
import rpm
|
||||
|
||||
# koji.fp.o keeps stalling, probably network errors...
|
||||
# better to time out than to stall
|
||||
|
|
@ -104,6 +105,8 @@ def get_options():
|
|||
help=_("url of local XMLRPC server"))
|
||||
parser.add_option("-r", "--remote",
|
||||
help=_("url of remote XMLRPC server"))
|
||||
parser.add_option("--prefer-new", action="store_true", default=False,
|
||||
help=_("if there is a newer build locally prefer it for deps"))
|
||||
parser.add_option("--import-noarch", action="store_true",
|
||||
help=_("import missing noarch builds rather than rebuilding"))
|
||||
parser.add_option("--link-imports", action="store_true",
|
||||
|
|
@ -112,6 +115,25 @@ def get_options():
|
|||
help=_("topurl for remote server"))
|
||||
parser.add_option("--workpath", default="/tmp/koji-shadow",
|
||||
help=_("location to store work files"))
|
||||
parser.add_option("--auth-cert",
|
||||
help=_("Certificate for authentication"))
|
||||
parser.add_option("--auth-ca",
|
||||
help=_("CA certificate for authentication"))
|
||||
parser.add_option("--serverca",
|
||||
help=_("Server CA certificate"))
|
||||
parser.add_option("--rules",
|
||||
help=_("rules"))
|
||||
parser.add_option("--rules-greylist",
|
||||
help=_("greylist rules"))
|
||||
parser.add_option("--rules-blacklist",
|
||||
help=_("blacklist rules"))
|
||||
parser.add_option("--rules-ignorelist",
|
||||
help=_("Rules list of packages to ignore"))
|
||||
parser.add_option("--tag-build", action="store_true", default=False,
|
||||
help=_("tag sucessful builds into the tag we are building, default is to not tag"))
|
||||
parser.add_option("--arches",
|
||||
help=_("arches to use when creating tags"))
|
||||
|
||||
#parse once to get the config file
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
|
|
@ -136,7 +158,7 @@ def get_options():
|
|||
if not opt.dest:
|
||||
continue
|
||||
name = opt.dest
|
||||
alias = ('global', name)
|
||||
alias = ('main', name)
|
||||
if config.has_option(*alias):
|
||||
print "Using option %s from config file" % (alias,)
|
||||
if opt.action in ('store_true', 'store_false'):
|
||||
|
|
@ -148,6 +170,7 @@ def get_options():
|
|||
elif opt.type in ('float'):
|
||||
setattr(defaults, name, config.getfloat(*alias))
|
||||
else:
|
||||
print config.get(*alias)
|
||||
setattr(defaults, name, config.get(*alias))
|
||||
#config file options without a cmdline equivalent
|
||||
otheropts = [
|
||||
|
|
@ -160,7 +183,11 @@ def get_options():
|
|||
['noauth', None, 'boolean'],
|
||||
['server', None, 'string'],
|
||||
['remote', None, 'string'],
|
||||
['max_jobs', None, 'int']
|
||||
['max_jobs', None, 'int'],
|
||||
['serverca', None, 'string'],
|
||||
['auth_cert', None, 'string'],
|
||||
['auth_ca', None, 'string'],
|
||||
['arches', None, 'string'],
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -259,6 +286,9 @@ def activate_session(session):
|
|||
if options.noauth:
|
||||
#skip authentication
|
||||
pass
|
||||
elif os.path.isfile(options.auth_cert):
|
||||
# authenticate using SSL client cert
|
||||
session.ssl_login(options.auth_cert, options.auth_ca, options.serverca, proxyuser=options.runas)
|
||||
elif options.user:
|
||||
#authenticate using user/password
|
||||
session.login()
|
||||
|
|
@ -307,6 +337,10 @@ class TrackedBuild(object):
|
|||
self.tracker = tracker
|
||||
self.info = remote.getBuild(build_id)
|
||||
self.nvr = "%(name)s-%(version)s-%(release)s" % self.info
|
||||
self.name = "%(name)s" % self.info
|
||||
self.epoch = "%(epoch)s" % self.info
|
||||
self.version = "%(version)s" % self.info
|
||||
self.release = "%(release)s" % self.info
|
||||
self.srpm = None
|
||||
self.rpms = None
|
||||
self.children = {}
|
||||
|
|
@ -446,18 +480,35 @@ class TrackedBuild(object):
|
|||
builds = {} #track which builds we need for a rebuild
|
||||
bases = {} #track base install for buildroots
|
||||
tags = {} #track buildroot tag(s)
|
||||
remote.multicall = True
|
||||
unpack = []
|
||||
for br_id in buildroots:
|
||||
if seen.has_key(br_id):
|
||||
continue
|
||||
seen[br_id] = 1
|
||||
br_info = remote.getBuildroot(br_id, strict=True)
|
||||
tags.setdefault(br_info['tag_name'], 0)
|
||||
tags[br_info['tag_name']] += 1
|
||||
#br_info = remote.getBuildroot(br_id, strict=True)
|
||||
remote.getBuildroot(br_id, strict=True)
|
||||
unpack.append(('br_info', br_id))
|
||||
#tags.setdefault(br_info['tag_name'], 0)
|
||||
#tags[br_info['tag_name']] += 1
|
||||
#print "."
|
||||
for rinfo in remote.listRPMs(componentBuildrootID=br_id):
|
||||
builds[rinfo['build_id']] = 1
|
||||
if not rinfo['is_update']:
|
||||
bases.setdefault(rinfo['name'], {})[br_id] = 1
|
||||
remote.listRPMs(componentBuildrootID=br_id)
|
||||
unpack.append(('rpmlist', br_id))
|
||||
#for rinfo in remote.listRPMs(componentBuildrootID=br_id):
|
||||
# builds[rinfo['build_id']] = 1
|
||||
# if not rinfo['is_update']:
|
||||
# bases.setdefault(rinfo['name'], {})[br_id] = 1
|
||||
for (dtype, br_id), data in zip(unpack, remote.multiCall()):
|
||||
if dtype == 'br_info':
|
||||
[br_info] = data
|
||||
tags.setdefault(br_info['tag_name'], 0)
|
||||
tags[br_info['tag_name']] += 1
|
||||
elif dtype == 'rpmlist':
|
||||
[rpmlist] = data
|
||||
for rinfo in rpmlist:
|
||||
builds[rinfo['build_id']] = 1
|
||||
if not rinfo['is_update']:
|
||||
bases.setdefault(rinfo['name'], {})[br_id] = 1
|
||||
# we want to record the intersection of the base sets
|
||||
# XXX - this makes some assumptions about homogeneity that, while reasonable,
|
||||
# are not strictly required of the db.
|
||||
|
|
@ -509,6 +560,7 @@ class BuildTracker(object):
|
|||
self.blacklist = None
|
||||
self.whitelist = None
|
||||
self.greylist = None
|
||||
self.ignorelist = []
|
||||
self.substitute_idx = {}
|
||||
self.substitutions = {}
|
||||
if options.config.has_option('rules', 'whitelist'):
|
||||
|
|
@ -517,6 +569,8 @@ class BuildTracker(object):
|
|||
self.blacklist = options.config.get('rules', 'blacklist').split()
|
||||
if options.config.has_option('rules', 'greylist'):
|
||||
self.greylist = options.config.get('rules', 'greylist').split()
|
||||
if options.config.has_option('rules', 'ignorelist'):
|
||||
self.ignorelist = options.config.get('rules', 'ignorelist').split()
|
||||
if options.config.has_option('rules', 'substitutions'):
|
||||
#At present this is a simple multi-line format
|
||||
#one substitution per line
|
||||
|
|
@ -561,6 +615,38 @@ class BuildTracker(object):
|
|||
return grey
|
||||
return default
|
||||
|
||||
def rpmvercmp (self, (e1, v1, r1), (e2, v2, r2)):
|
||||
"""find out which build is newer"""
|
||||
rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
|
||||
if rc == 1:
|
||||
#first evr wins
|
||||
return 1
|
||||
elif rc == 0:
|
||||
#same evr
|
||||
return 0
|
||||
else:
|
||||
#second evr wins
|
||||
return -1
|
||||
|
||||
def newerBuild(self, build, tag):
|
||||
#XXX: secondary arches need a policy to say if we have newer builld localy it will be the substitute
|
||||
localLatestBuild = session.getLatestBuilds(tag, package=str(build.name))
|
||||
if not localLatestBuild == []:
|
||||
parentevr = (str(build.epoch), build.version, build.release)
|
||||
parentnvr = (str(build.name), build.version, build.release)
|
||||
latestevr = (str(localLatestBuild[0]['epoch']), localLatestBuild[0]['version'], localLatestBuild[0]['release'])
|
||||
newestRPM = self.rpmvercmp( parentevr, latestevr)
|
||||
if options.debug:
|
||||
print "remote evr: %s \nlocal evr: %s \nResult: %s" % (parentevr, latestevr, newestRPM)
|
||||
if newestRPM == -1:
|
||||
#the local is newer
|
||||
info = session.getBuild("%s-%s-%s" % (str(localLatestBuild[0]['name']), localLatestBuild[0]['version'], localLatestBuild[0]['release'] ))
|
||||
if info:
|
||||
build = LocalBuild(info)
|
||||
self.substitute_idx[parentnvr] = build
|
||||
return build
|
||||
return None
|
||||
|
||||
def getSubstitute(self, nvr):
|
||||
build = self.substitute_idx.get(nvr)
|
||||
if not build:
|
||||
|
|
@ -582,7 +668,7 @@ class BuildTracker(object):
|
|||
self.substitute_idx[nvr] = build
|
||||
return build
|
||||
|
||||
def scanBuild(self, build_id, from_build=None, depth=0):
|
||||
def scanBuild(self, build_id, from_build=None, depth=0, tag=None):
|
||||
"""Recursively scan a build and its dependencies"""
|
||||
#print build_id
|
||||
build = self.builds.get(build_id)
|
||||
|
|
@ -607,6 +693,10 @@ class BuildTracker(object):
|
|||
else:
|
||||
tail = ""
|
||||
head = " " * depth
|
||||
if build.name in self.ignorelist:
|
||||
print "%sIgnored Build: %s%s" % (head, build.nvr, tail)
|
||||
build.setState('ignore')
|
||||
return build
|
||||
check = self.checkFilter(build, grey=None)
|
||||
if check is None:
|
||||
#greylisted builds are ok as deps, but not primary builds
|
||||
|
|
@ -627,6 +717,12 @@ class BuildTracker(object):
|
|||
if depth > 0:
|
||||
print "%sDep replaced: %s->%s" % (head, build.nvr, replace)
|
||||
return build
|
||||
if options.prefer_new:
|
||||
latestBuild = self.newerBuild(build, tag)
|
||||
if latestBuild != None:
|
||||
build.substitute = latestBuild.nvr
|
||||
print "%sNewer build replaced: %s->%s" % (head, build.nvr, latestBuild.nvr)
|
||||
return build
|
||||
if build.state == "common":
|
||||
#we're good
|
||||
if build.rebuilt:
|
||||
|
|
@ -645,7 +741,7 @@ class BuildTracker(object):
|
|||
# before this point
|
||||
#
|
||||
elif options.import_noarch and build.isNoarch():
|
||||
self.importBuild(build)
|
||||
self.importBuild(build, tag)
|
||||
elif build.state == "noroot":
|
||||
#Can't rebuild it, this is what substitutions are for
|
||||
print "%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail)
|
||||
|
|
@ -658,33 +754,36 @@ class BuildTracker(object):
|
|||
newdeps = []
|
||||
#don't actually set build.revised_deps until we finish the dep scan
|
||||
for dep_id in build.deps:
|
||||
dep = self.scanBuild(dep_id, from_build=build, depth=depth+1)
|
||||
if dep.substitute:
|
||||
dep2 = self.getSubstitute(dep.substitute)
|
||||
if isinstance(dep2, TrackedBuild):
|
||||
self.scanBuild(dep2.id, from_build=build, depth=depth+1)
|
||||
elif dep2 is None:
|
||||
#dep is missing on both local and remote
|
||||
print "%sSubstitute dep unavailable: %s" % (head, dep2.nvr)
|
||||
#no point in continuing
|
||||
break
|
||||
#otherwise dep2 should be LocalBuild instance
|
||||
newdeps.append(dep2)
|
||||
elif dep.state in ('broken', 'brokendeps', 'noroot', 'blocked'):
|
||||
#no point in continuing
|
||||
build.setState('brokendeps')
|
||||
print "%sCan't rebuild %s, %s is %s" % (head, build.nvr, dep.nvr, dep.state)
|
||||
newdeps = None
|
||||
dep = self.scanBuild(dep_id, from_build=build, depth=depth+1, tag=tag)
|
||||
if dep.name in self.ignorelist:
|
||||
break
|
||||
else:
|
||||
newdeps.append(dep)
|
||||
# set rebuild order as we go
|
||||
# we do this /after/ the recursion, so our deps have a lower order number
|
||||
self.rebuild_order += 1
|
||||
build.order = self.rebuild_order
|
||||
if dep.substitute:
|
||||
dep2 = self.getSubstitute(dep.substitute)
|
||||
if isinstance(dep2, TrackedBuild):
|
||||
self.scanBuild(dep2.id, from_build=build, depth=depth+1, tag=tag)
|
||||
elif dep2 is None:
|
||||
#dep is missing on both local and remote
|
||||
print "%sSubstitute dep unavailable: %s" % (head, dep2.nvr)
|
||||
#no point in continuing
|
||||
break
|
||||
#otherwise dep2 should be LocalBuild instance
|
||||
newdeps.append(dep2)
|
||||
elif dep.state in ('broken', 'brokendeps', 'noroot', 'blocked'):
|
||||
#no point in continuing
|
||||
build.setState('brokendeps')
|
||||
print "%sCan't rebuild %s, %s is %s" % (head, build.nvr, dep.nvr, dep.state)
|
||||
newdeps = None
|
||||
break
|
||||
else:
|
||||
newdeps.append(dep)
|
||||
# set rebuild order as we go
|
||||
# we do this /after/ the recursion, so our deps have a lower order number
|
||||
self.rebuild_order += 1
|
||||
build.order = self.rebuild_order
|
||||
build.revised_deps = newdeps
|
||||
#scanning takes a long time, might as well start builds if we can
|
||||
self.checkJobs()
|
||||
self.checkJobs(tag)
|
||||
self.rebuildMissing()
|
||||
if len(self.builds) % 50 == 0:
|
||||
self.report()
|
||||
|
|
@ -697,7 +796,7 @@ class BuildTracker(object):
|
|||
for build in builds:
|
||||
for retry in xrange(10):
|
||||
try:
|
||||
self.scanBuild(build['id'])
|
||||
self.scanBuild(build['id'], tag=tag)
|
||||
if options.first_one:
|
||||
return
|
||||
except (socket.timeout, socket.error):
|
||||
|
|
@ -741,7 +840,7 @@ class BuildTracker(object):
|
|||
session.uploadWrapper(dst, serverdir, blocksize=65536)
|
||||
session.importRPM(serverdir, fn)
|
||||
|
||||
def importBuild(self, build):
|
||||
def importBuild(self, build, tag=None):
|
||||
'''import a build from remote hub'''
|
||||
if not build.srpm:
|
||||
print "No srpm for build %s, skipping import" % build.nvr
|
||||
|
|
@ -764,6 +863,8 @@ class BuildTracker(object):
|
|||
fname = os.path.basename(relpath)
|
||||
self._importURL(url, fname)
|
||||
build.updateState()
|
||||
if options.tag_build and not tag == None:
|
||||
self.tagSuccessful(build.nvr, tag)
|
||||
return True
|
||||
|
||||
def scan(self):
|
||||
|
|
@ -807,7 +908,7 @@ class BuildTracker(object):
|
|||
if data['name'] == 'admin':
|
||||
perm_id = data['id']
|
||||
break
|
||||
session.createTag(our_tag, perm=perm_id, arches='i386 ppc ppc64 x86_64')
|
||||
session.createTag(our_tag, perm=perm_id, arches=options.arches)
|
||||
taginfo = session.getTag(our_tag, strict=True)
|
||||
#we don't need a target, we trigger our own repo creation and
|
||||
#pass that repo_id to the build call
|
||||
|
|
@ -994,11 +1095,10 @@ class BuildTracker(object):
|
|||
|
||||
def _print_builds(self, mylist):
|
||||
"""small helper function for output"""
|
||||
for build_id in mylist:
|
||||
build = self.builds[build_id]
|
||||
for build in mylist:
|
||||
print " %s (%s)" % (build.nvr, build.state)
|
||||
|
||||
def checkJobs(self):
|
||||
def checkJobs(self, tag=None):
|
||||
"""Check outstanding jobs. Return true if anything changes"""
|
||||
ret = False
|
||||
for build_id, build in self.state_idx['pending'].items():
|
||||
|
|
@ -1022,6 +1122,8 @@ class BuildTracker(object):
|
|||
ret = True
|
||||
elif state == 'CLOSED':
|
||||
print "Task %i complete (build %s)" % (build.task_id, build.nvr)
|
||||
if options.tag_build and not tag == None:
|
||||
self.tagSuccessful(build.nvr, tag)
|
||||
build.updateState()
|
||||
ret = True
|
||||
if build.state != 'common':
|
||||
|
|
@ -1084,7 +1186,7 @@ class BuildTracker(object):
|
|||
break
|
||||
return ret
|
||||
|
||||
def runRebuilds(self):
|
||||
def runRebuilds(self, tag=None):
|
||||
"""Rebuild missing builds"""
|
||||
print "Determining rebuild order"
|
||||
#using self.state_idx to track build states
|
||||
|
|
@ -1095,7 +1197,7 @@ class BuildTracker(object):
|
|||
if not self.state_idx['missing'] and not self.state_idx['pending']:
|
||||
#we're done
|
||||
break
|
||||
changed1 = self.checkJobs()
|
||||
changed1 = self.checkJobs(tag)
|
||||
changed2 = self.rebuildMissing()
|
||||
if not changed1 and not changed2:
|
||||
time.sleep(30)
|
||||
|
|
@ -1103,18 +1205,25 @@ class BuildTracker(object):
|
|||
self.report_brief()
|
||||
print "Rebuilt %i builds" % (len(self.state_idx['common']) - initial_avail)
|
||||
|
||||
def tagSuccessful(self, nvr, tag):
|
||||
"""tag completed builds into final tags"""
|
||||
session.tagBuildBypass(tag, nvr)
|
||||
print "tagged %s to %s" % (nvr, tag)
|
||||
|
||||
|
||||
def main(args):
|
||||
tracker = BuildTracker()
|
||||
#binfo = remote.getBuild(args[0], strict=True)
|
||||
#tracker.scanBuild(binfo['id'])
|
||||
tag=None
|
||||
if options.build:
|
||||
binfo = remote.getBuild(options.build, strict=True)
|
||||
tracker.scanBuild(binfo['id'])
|
||||
else:
|
||||
tracker.scanTag(args[0])
|
||||
tag = args[0]
|
||||
tracker.scanTag(tag)
|
||||
tracker.report()
|
||||
tracker.runRebuilds()
|
||||
tracker.runRebuilds(tag)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
@ -1126,7 +1235,7 @@ if __name__ == "__main__":
|
|||
session_opts[k] = getattr(options,k)
|
||||
session = koji.ClientSession(options.server, session_opts)
|
||||
if not options.noauth:
|
||||
session.login()
|
||||
activate_session(session)
|
||||
#XXX - sane auth
|
||||
#XXX - config!
|
||||
remote_opts = {'anon_retry': True}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
#
|
||||
# kojira Start/Stop kojira
|
||||
#
|
||||
# chkconfig: 345 99 99
|
||||
# chkconfig: - 99 99
|
||||
# description: koji repo administrator
|
||||
# processname: kojira
|
||||
|
||||
|
|
|
|||
996
util/kojisd
996
util/kojisd
|
|
@ -1,996 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# kojisd: a tool to subscribe to builds between koji instances
|
||||
# Copyright (c) 2007-2008 Red Hat
|
||||
# Copyright (c) 2007-2008 Dennis Gilmore
|
||||
#
|
||||
# Koji is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation;
|
||||
# version 2.1 of the License.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this software; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
#
|
||||
# Authors:
|
||||
# Mike McLean <mikem@redhat.com>
|
||||
# Dennis Gilmore <dennis@ausil.us>
|
||||
|
||||
try:
|
||||
import krbV
|
||||
except ImportError:
|
||||
pass
|
||||
import koji
|
||||
import ConfigParser
|
||||
from email.MIMEText import MIMEText
|
||||
import fnmatch
|
||||
import optparse
|
||||
import os
|
||||
import pprint
|
||||
import smtplib
|
||||
import socket # for socket.error and socket.setdefaulttimeout
|
||||
import sys
|
||||
import time
|
||||
import xmlrpclib # for ProtocolError and Fault
|
||||
import urlgrabber.grabber as grabber
|
||||
import rpm
|
||||
|
||||
# koji.fp.o keeps stalling, probably network errors...
|
||||
# better to time out than to stall
|
||||
socket.setdefaulttimeout(180) #XXX - too short?
|
||||
|
||||
|
||||
OptionParser = optparse.OptionParser
|
||||
if optparse.__version__ == "1.4.1+":
|
||||
def _op_error(self, msg):
|
||||
self.print_usage(sys.stderr)
|
||||
msg = "%s: error: %s\n" % (self._get_prog_name(), msg)
|
||||
if msg:
|
||||
sys.stderr.write(msg)
|
||||
sys.exit(2)
|
||||
OptionParser.error = _op_error
|
||||
|
||||
|
||||
def _(args):
|
||||
"""Stub function for translation"""
|
||||
return args
|
||||
|
||||
def get_options():
|
||||
"""process options from command line and config file"""
|
||||
|
||||
usage = _("%prog [options]")
|
||||
parser = OptionParser(usage=usage)
|
||||
parser.add_option("-c", "--config-file", metavar="FILE",
|
||||
help=_("use alternate configuration file"))
|
||||
parser.add_option("--keytab", help=_("specify a Kerberos keytab to use"))
|
||||
parser.add_option("--principal", help=_("specify a Kerberos principal to use"))
|
||||
parser.add_option("--runas", metavar="USER",
|
||||
help=_("run as the specified user (requires special privileges)"))
|
||||
parser.add_option("--user", help=_("specify user"))
|
||||
parser.add_option("--password", help=_("specify password"))
|
||||
parser.add_option("--noauth", action="store_true", default=False,
|
||||
help=_("do not authenticate"))
|
||||
parser.add_option("-n", "--test", action="store_true", default=False,
|
||||
help=_("test mode"))
|
||||
parser.add_option("-d", "--debug", action="store_true", default=False,
|
||||
help=_("show debug output"))
|
||||
parser.add_option("--first-one", action="store_true", default=False,
|
||||
help=_("stop after scanning first build -- debugging"))
|
||||
parser.add_option("--debug-xmlrpc", action="store_true", default=False,
|
||||
help=_("show xmlrpc debug output"))
|
||||
parser.add_option("--skip-main", action="store_true", default=False,
|
||||
help=_("don't actually run main"))
|
||||
parser.add_option("--build",
|
||||
help=_("scan just this build"))
|
||||
parser.add_option("-s", "--server",
|
||||
help=_("url of local XMLRPC server"))
|
||||
parser.add_option("-r", "--remote",
|
||||
help=_("url of remote XMLRPC server"))
|
||||
parser.add_option("--validtags", action="append", default=[],
|
||||
help=_("List of valid tags to build for"))
|
||||
parser.add_option("--invalidtags", action="append", default=[],
|
||||
help=_("List of tags to not build for"))
|
||||
parser.add_option("--logfile", default="/var/log/kojisd.log",
|
||||
help=_("location of log file"))
|
||||
parser.add_option("--topdir", default="/mnt/koji",
|
||||
help=_(""))
|
||||
parser.add_option("--workpath", default="/mnt/koji/work/kojisd",
|
||||
help=_("location to save import files"))
|
||||
parser.add_option("--importarches", default="",
|
||||
help=_("arches to import"))
|
||||
parser.add_option("--buildarches", action="store", default="",
|
||||
help=_("arches to build"))
|
||||
parser.add_option("--pkgurl", action="store", default="",
|
||||
help=_("url to base packages on remote server"))
|
||||
#parse once to get the config file
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
defaults = parser.get_default_values()
|
||||
config = ConfigParser.ConfigParser()
|
||||
cf = getattr(options, 'config_file', None)
|
||||
if cf:
|
||||
if not os.access(cf, os.F_OK):
|
||||
parser.error(_("No such file: %s") % cf)
|
||||
assert False
|
||||
else:
|
||||
cf = '/etc/kojisd/kojisd.conf'
|
||||
if not os.access(cf, os.F_OK):
|
||||
cf = None
|
||||
if not cf:
|
||||
print "no config file"
|
||||
config = None
|
||||
else:
|
||||
config.read(cf)
|
||||
#allow config file to update defaults for certain options
|
||||
cfgmap = [
|
||||
['keytab', None, 'string'],
|
||||
['principal', None, 'string'],
|
||||
['runas', None, 'string'],
|
||||
['user', None, 'string'],
|
||||
['password', None, 'string'],
|
||||
['noauth', None, 'boolean'],
|
||||
['server', None, 'string'],
|
||||
['remote', None, 'string'],
|
||||
['importarches', None, 'list'],
|
||||
['serverca', None, 'string'],
|
||||
['cert', None, 'string'],
|
||||
['ca', None, 'string'],
|
||||
['validtags', None, 'list'],
|
||||
['invalidtags', None, 'list'],
|
||||
['logfile', None, 'string'],
|
||||
['topdir', None, 'string'],
|
||||
['workpath', None, 'string'],
|
||||
['buildarches', None, 'string'],
|
||||
['pkgurl', None, 'string'],
|
||||
]
|
||||
for name, alias, type in cfgmap:
|
||||
print "Checking %s" % name
|
||||
if alias is None:
|
||||
alias = ('kojisd', name)
|
||||
if config.has_option(*alias):
|
||||
print "Using option %s from config file" % (alias,)
|
||||
if type == 'integer':
|
||||
setattr(defaults, name, config.getint(*alias))
|
||||
elif type == 'boolean':
|
||||
setattr(defaults, name, config.getboolean(*alias))
|
||||
elif type == 'list':
|
||||
line = config.get(*alias)
|
||||
line = line.split()
|
||||
setattr(defaults, name, line)
|
||||
else:
|
||||
setattr(defaults, name, config.get(*alias))
|
||||
#parse again with updated defaults
|
||||
(options, args) = parser.parse_args(values=defaults)
|
||||
|
||||
return options, args
|
||||
|
||||
time_units = {
|
||||
'second' : 1,
|
||||
'minute' : 60,
|
||||
'hour' : 3600,
|
||||
'day' : 86400,
|
||||
'week' : 604800,
|
||||
}
|
||||
time_unit_aliases = [
|
||||
#[unit, alias, alias, ...]
|
||||
['week', 'weeks', 'wk', 'wks'],
|
||||
['hour', 'hours', 'hr', 'hrs'],
|
||||
['day', 'days'],
|
||||
['minute', 'minutes', 'min', 'mins'],
|
||||
['second', 'seconds', 'sec', 'secs', 's'],
|
||||
]
|
||||
def parse_duration(str):
|
||||
"""Parse time duration from string, returns duration in seconds"""
|
||||
ret = 0
|
||||
n = None
|
||||
unit = None
|
||||
def parse_num(s):
|
||||
try:
|
||||
return int(s)
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return float(s)
|
||||
except ValueError:
|
||||
pass
|
||||
return None
|
||||
for x in str.split():
|
||||
if n is None:
|
||||
n = parse_num(x)
|
||||
if n is not None:
|
||||
continue
|
||||
#perhaps the unit is appended w/o a space
|
||||
for names in time_unit_aliases:
|
||||
for name in names:
|
||||
if x.endswith(name):
|
||||
n = parse_num(x[:-len(name)])
|
||||
if n is None:
|
||||
continue
|
||||
unit = names[0]
|
||||
# combined at end
|
||||
break
|
||||
if unit:
|
||||
break
|
||||
else:
|
||||
raise ValueError, "Invalid time interval: %s" % str
|
||||
if unit is None:
|
||||
x = x.lower()
|
||||
for names in time_unit_aliases:
|
||||
for name in names:
|
||||
if x == name:
|
||||
unit = names[0]
|
||||
break
|
||||
if unit:
|
||||
break
|
||||
else:
|
||||
raise ValueError, "Invalid time interval: %s" % str
|
||||
ret += n * time_units[unit]
|
||||
n = None
|
||||
unit = None
|
||||
return ret
|
||||
|
||||
def error(msg=None, code=1):
|
||||
if msg:
|
||||
sys.stderr.write(msg + "\n")
|
||||
sys.stderr.flush()
|
||||
sys.exit(code)
|
||||
|
||||
def warn(msg):
|
||||
sys.stderr.write(msg + "\n")
|
||||
sys.stderr.flush()
|
||||
|
||||
def ensure_connection(session):
|
||||
try:
|
||||
ret = session.getAPIVersion()
|
||||
except xmlrpclib.ProtocolError:
|
||||
error(_("Error: Unable to connect to server"))
|
||||
if ret != koji.API_VERSION:
|
||||
warn(_("WARNING: The server is at API version %d and the client is at %d" % (ret, koji.API_VERSION)))
|
||||
|
||||
def activate_session(session):
|
||||
"""Test and login the session is applicable"""
|
||||
global options
|
||||
if options.noauth:
|
||||
#skip authentication
|
||||
pass
|
||||
elif os.path.isfile(options.cert):
|
||||
# authenticate using SSL client cert
|
||||
session.ssl_login(options.cert, options.ca, options.serverca, proxyuser=options.runas)
|
||||
elif options.user:
|
||||
# authenticate using user/password
|
||||
session.login()
|
||||
elif has_krb_creds():
|
||||
try:
|
||||
if options.keytab and options.principal:
|
||||
session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas)
|
||||
else:
|
||||
session.krb_login(proxyuser=options.runas)
|
||||
except krbV.Krb5Error, e:
|
||||
error(_("Kerberos authentication failed: %s (%s)") % (e.args[1], e.args[0]))
|
||||
except socket.error, e:
|
||||
warn(_("Could not connect to Kerberos authentication service: %s") % e.args[1])
|
||||
if not options.noauth and not session.logged_in:
|
||||
error(_("Unable to log in, no authentication methods available"))
|
||||
ensure_connection(session)
|
||||
if options.debug:
|
||||
print "successfully connected to hub"
|
||||
|
||||
def getHubTags(session):
|
||||
'''Determine the tags on the build hub'''
|
||||
tags = []
|
||||
allTags = session.listTags()
|
||||
for remoteTag in allTags:
|
||||
tags.append(remoteTag['name'])
|
||||
print "tags : %s" % buildTags
|
||||
return buildTags
|
||||
|
||||
def syncTags():
|
||||
''' sync the tags from the master to the slave. due to inheritance
|
||||
its easier to sync tags completely between the hubs
|
||||
'''
|
||||
toAddTags = []
|
||||
for tag in buildTags:
|
||||
if tag not in localTags:
|
||||
toAddTags.append(tag)
|
||||
orderToAddTags = []
|
||||
for tag in toAddTags:
|
||||
rawParents = remote.getFullInheritance(tag)
|
||||
for rawParent in rawParents:
|
||||
if rawParent['currdepth'] == 1:
|
||||
print tag
|
||||
print rawParent['name']
|
||||
orderToAddTags.append(["%s", "%s"] % (tag, rawParent['name']))
|
||||
|
||||
for tag, parent in orderToAddTags:
|
||||
session.createTag(tag, parent, arches=buildarches)
|
||||
# TODO: handle errors gracefully, order tag creation. handle targets
|
||||
return
|
||||
|
||||
def main(args):
|
||||
#XXX get tags
|
||||
|
||||
buildTags = []
|
||||
if options.validtags != None:
|
||||
buildTags = options.validtags
|
||||
else:
|
||||
buildTags = getHubTags(remote)
|
||||
for tag in options.invalidtags:
|
||||
if tag in buildTags:
|
||||
buildTags.remove(tag)
|
||||
print "BuildTags: %s" % buildTags
|
||||
#syncTags()
|
||||
|
||||
tracker = BuildTracker()
|
||||
# go through each tag and see what needs building
|
||||
for buildTag in buildTags :
|
||||
print "BuildTag: %s" % buildTag
|
||||
tracker.scanTag(buildTag)
|
||||
tracker.report()
|
||||
tracker.showOrder()
|
||||
tracker.runRebuilds()
|
||||
|
||||
|
||||
|
||||
def remote_buildroots(build_id):
|
||||
"""Return a list of buildroots for remote build"""
|
||||
#XXX - only used in old test code (foo)
|
||||
rpms = remote.listRPMs(build_id)
|
||||
brs = {}
|
||||
for rinfo in rpms:
|
||||
br_id = rinfo.get('buildroot_id')
|
||||
if not br_id:
|
||||
print "Warning: no buildroot for: %s" % rinfo
|
||||
continue
|
||||
brs[br_id] = 1
|
||||
return brs.keys()
|
||||
|
||||
def remote_br_builds(brlist):
|
||||
"""Given a list of buildroots, return build data of contents"""
|
||||
#XXX - only used in old test code (foo)
|
||||
seen = {}
|
||||
builds = {}
|
||||
for br_id in brlist:
|
||||
if seen.has_key(br_id):
|
||||
continue
|
||||
seen[br_id] = 1
|
||||
#print "."
|
||||
for rinfo in remote.listRPMs(componentBuildrootID=br_id):
|
||||
builds[rinfo['build_id']] = 1
|
||||
return dict([(b, remote.getBuild(b)) for b in builds])
|
||||
|
||||
def foo():
|
||||
"""just experimenting...."""
|
||||
binfo = remote.getBuild(args[0])
|
||||
buildroots = remote_buildroots(binfo['id'])
|
||||
if not buildroots:
|
||||
#nothing we can do
|
||||
return
|
||||
build_idx = remote_br_builds(buildroots)
|
||||
name_idx = {}
|
||||
for binfo2 in build_idx.itervalues():
|
||||
name_idx.setdefault(binfo2['name'], []).append(binfo2)
|
||||
names = name_idx.keys()
|
||||
missing = {}
|
||||
found = {}
|
||||
for name, builds in name_idx.iteritems():
|
||||
if len(builds) > 1:
|
||||
print "Warning: found multiple versions of %s: %s" % (name, builds)
|
||||
#pick latest (by completion time)
|
||||
order = [(b['completion_ts'], b) for b in builds]
|
||||
order.sort()
|
||||
build = order[-1][1]
|
||||
else:
|
||||
build = builds[0]
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % build
|
||||
build.setdefault('nvr', nvr)
|
||||
#see if our server has it
|
||||
ours = session.getBuild(nvr)
|
||||
if ours:
|
||||
ours.setdefault('nvr', nvr)
|
||||
found[name] = ours
|
||||
else:
|
||||
missing[name] = build
|
||||
names = found.keys()
|
||||
names.sort()
|
||||
for name in names:
|
||||
print "Found common build: %(nvr)s" % found[name]
|
||||
names = missing.keys()
|
||||
names.sort()
|
||||
for name in names:
|
||||
print "Missing remote build: %(nvr)s" % missing[name]
|
||||
|
||||
|
||||
class TrackedBuild(object):
|
||||
|
||||
def __init__(self, build_id, child=None, tracker=None):
|
||||
self.id = build_id
|
||||
self.tracker = tracker
|
||||
self.info = remote.getBuild(build_id)
|
||||
self.nvr = "%(name)s-%(version)s-%(release)s" % self.info
|
||||
self.children = {}
|
||||
self.state = None
|
||||
self.order = 0
|
||||
if child is not None:
|
||||
#children tracks the builds that were built using this one
|
||||
self.children[child] = 1
|
||||
#see if we have it
|
||||
ours = session.getBuild(self.nvr)
|
||||
self.rebuilt = False
|
||||
if ours is not None:
|
||||
state = koji.BUILD_STATES[ours['state']]
|
||||
if state == 'COMPLETE':
|
||||
self.setState("common")
|
||||
if ours['task_id']:
|
||||
self.rebuilt = True
|
||||
return
|
||||
elif state in ('FAILED', 'CANCELED'):
|
||||
#treat these as having no build
|
||||
pass
|
||||
else:
|
||||
# DELETED, BUILDING
|
||||
self.setState("broken")
|
||||
return
|
||||
self.setState("missing")
|
||||
self.getDeps() #sets deps, br_tag, base, order, (maybe state)
|
||||
|
||||
def setState(self, state):
|
||||
#print "%s -> %s" % (self.nvr, state)
|
||||
if state == self.state:
|
||||
return
|
||||
if self.state is not None and self.tracker:
|
||||
del self.tracker.state_idx[self.state][self.id]
|
||||
self.state = state
|
||||
if self.tracker:
|
||||
self.tracker.state_idx.setdefault(self.state, {})[self.id] = 1
|
||||
|
||||
def addChild(self, child):
|
||||
self.children[child] = 1
|
||||
|
||||
def setExtraArchesFromRPMs(self, rpms=None):
|
||||
if rpms is None:
|
||||
rpms = remote.listRPMs(self.id)
|
||||
arches = {}
|
||||
for rpminfo in rpms:
|
||||
arches.setdefault(rpminfo['arch'], 1)
|
||||
self.extraArches = [a for a in arches if koji.canonArch(a) != a]
|
||||
|
||||
def getBuildroots(self):
|
||||
"""Return a list of buildroots for remote build"""
|
||||
rpms = remote.listRPMs(self.id)
|
||||
#while we've got the rpm list, let's note the extra arches
|
||||
#XXX - really should reorganize this a bit
|
||||
self.setExtraArchesFromRPMs(rpms)
|
||||
brs = {}
|
||||
bad = []
|
||||
for rinfo in rpms:
|
||||
br_id = rinfo.get('buildroot_id')
|
||||
if not br_id:
|
||||
bad.append(rinfo)
|
||||
continue
|
||||
brs[br_id] = 1
|
||||
if brs and bad:
|
||||
print "Warning: some rpms for %s lacked buildroots:" % self.nvr
|
||||
for rinfo in bad:
|
||||
print " %(name)-%(version)-%(release).%(arch)" % rinfo
|
||||
return brs.keys()
|
||||
|
||||
def getDeps(self):
|
||||
buildroots = self.getBuildroots()
|
||||
if not buildroots:
|
||||
self.setState("noroot")
|
||||
return
|
||||
buildroots.sort()
|
||||
self.order = buildroots[-1]
|
||||
seen = {} #used to avoid scanning the same buildroot twice
|
||||
builds = {} #track which builds we need for a rebuild
|
||||
bases = {} #track base install for buildroots
|
||||
tags = {} #track buildroot tag(s)
|
||||
for br_id in buildroots:
|
||||
if seen.has_key(br_id):
|
||||
continue
|
||||
seen[br_id] = 1
|
||||
br_info = remote.getBuildroot(br_id, strict=True)
|
||||
tags.setdefault(br_info['tag_name'], 0)
|
||||
tags[br_info['tag_name']] += 1
|
||||
#print "."
|
||||
for rinfo in remote.listRPMs(componentBuildrootID=br_id):
|
||||
builds[rinfo['build_id']] = 1
|
||||
if not rinfo['is_update']:
|
||||
bases.setdefault(rinfo['name'], {})[br_id] = 1
|
||||
# we want to record the intersection of the base sets
|
||||
# XXX - this makes some assumptions about homogeneity that, while reasonable,
|
||||
# are not strictly required of the db.
|
||||
# The only way I can think of to break this is if some significant tag/target
|
||||
# changes happened during the build startup and some subtasks got the old
|
||||
# repo and others the new one.
|
||||
base = []
|
||||
for name, brlist in bases.iteritems():
|
||||
for br_id in buildroots:
|
||||
if br_id not in brlist:
|
||||
break
|
||||
else:
|
||||
#each buildroot had this as a base package
|
||||
base.append(name)
|
||||
if len(tags) > 1:
|
||||
print "Warning: found multiple buildroot tags for %s: %s" % (self.nvr, tags.keys())
|
||||
counts = [(n, tag) for tag, n in tags.iteritems()]
|
||||
sort(counts)
|
||||
tag = counts[-1][1]
|
||||
else:
|
||||
tag = tags.keys()[0]
|
||||
self.deps = builds
|
||||
self.br_tag = tag
|
||||
self.base = base
|
||||
|
||||
|
||||
class BuildTracker(object):
|
||||
|
||||
builds = {}
|
||||
state_idx = {}
|
||||
|
||||
def rpmvercmp (self, (e1, v1, r1), (e2, v2, r2)):
|
||||
"""find out which build is newer"""
|
||||
rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
|
||||
if rc == 1:
|
||||
return "first"
|
||||
elif rc == 0:
|
||||
return "same"
|
||||
else:
|
||||
return "second"
|
||||
|
||||
def scanBuild(self, build_id, tag, from_build=None, depth=0):
|
||||
"""Recursively scan a build and its dependencies"""
|
||||
#print build_id
|
||||
build = self.builds.get(build_id)
|
||||
if build:
|
||||
#already scanned
|
||||
if from_build:
|
||||
build.addChild(from_build.id)
|
||||
return build
|
||||
#otherwise...
|
||||
child_id = None
|
||||
if from_build:
|
||||
child_id = from_build.id
|
||||
build = TrackedBuild(build_id, child=child_id, tracker=self)
|
||||
#print build.id, build.nvr
|
||||
self.builds[build_id] = build
|
||||
if len(self.builds) % 50 == 0:
|
||||
self.report()
|
||||
if from_build:
|
||||
tail = " (from %s)" % from_build.nvr
|
||||
else:
|
||||
tail = ""
|
||||
head = " " * depth
|
||||
parentTask = remote.getBuild(int(build.id))
|
||||
latestBuild = session.getLatestBuilds(tag, package=parentTask['package_name'])
|
||||
if latestBuild:
|
||||
parentevr = (str(parentTask['epoch']), parentTask['version'], parentTask['release'])
|
||||
latestevr = (str(latestBuild[0]['epoch']), latestBuild[0]['version'], latestBuild[0]['release'])
|
||||
newestRPM = self.rpmvercmp( parentevr, latestevr)
|
||||
newBuild = remote.getBuild(latestBuild[0]['nvr'])
|
||||
else:
|
||||
# We get here when there is no build on the local hub
|
||||
newestRPM = "first"
|
||||
if newestRPM == "first":
|
||||
if build.state == "common":
|
||||
#we're good
|
||||
if build.rebuilt:
|
||||
print "%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail)
|
||||
else:
|
||||
print "%sCommon build %s%s" % (head, build.nvr, tail)
|
||||
elif build.state == "noroot":
|
||||
#we're fucked, so build with latest build root we have
|
||||
#TODO: build with the latest buildroot
|
||||
print "%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail)
|
||||
#get src url
|
||||
|
||||
if parentTask['task_id'] is not None:
|
||||
parentRequest = remote.getTaskRequest(parentTask['task_id'])
|
||||
session.build(parentRequest[0], tag, parentRequest[2])
|
||||
print "%sInfo: building %s%s"%(head, parentRequest[0], tail)
|
||||
build.state = "broken"
|
||||
else:
|
||||
print "Error: unable to queue %s to build it was imported upstream" % parentTask['nvr']
|
||||
elif build.state == "broken":
|
||||
#also fucked
|
||||
#TODO: find replacement package version
|
||||
print "%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail)
|
||||
elif build.state == "missing":
|
||||
# check to see if we just import this from the remote host.
|
||||
rpmfiles = remote.listRPMs(buildID=build.id, arches=options.importarches)
|
||||
rpmname = remote.getBuild(build.id)['package_name']
|
||||
# kernel is funky we should never just import it
|
||||
|
||||
# XXX: should this be a config of packages we dont import?
|
||||
|
||||
if rpmfiles and rpmname != "kernel":
|
||||
print "%sInfo: Importing build %s%s" %(head, build.nvr, tail)
|
||||
self.importBuild(build.id, tag, rpmfiles, rpmname, build.nvr )
|
||||
build.state = "imported"
|
||||
return build
|
||||
# lets see if we have a newer build
|
||||
|
||||
#scan its deps
|
||||
print "%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail)
|
||||
for dep_id in build.deps:
|
||||
for retry in xrange(10):
|
||||
try:
|
||||
self.scanBuild(dep_id, tag, from_build=build, depth=depth+1)
|
||||
except (socket.timeout, socket.error):
|
||||
print "retry"
|
||||
continue
|
||||
break
|
||||
else:
|
||||
print "Error: unable to scan dep: %i for %s" % (dep_id, build.nvr)
|
||||
continue
|
||||
elif newestRPM == "second":
|
||||
# newBuild will be None when the build does not exist on the remote hub
|
||||
if newBuild == None:
|
||||
#if the newer build does not exist on the remote hub fill in the info from the localhub
|
||||
# this should only ever haaaaappen during bootstrapping or if we build something on the
|
||||
# local hub to make sure a fix works before building on the remote system
|
||||
newBuild = session.getBuild(latestBuild[0]['nvr'])
|
||||
build.id = newBuild['id']
|
||||
build.tracker = None
|
||||
build.info = newBuild
|
||||
build.nvr = "%s" % newBuild['nvr']
|
||||
build.children = {}
|
||||
build.state = "common"
|
||||
else:
|
||||
build = TrackedBuild(newBuild['id'], child=None, tracker=self)
|
||||
build.state = "common"
|
||||
#self.builds.get(newBuild['task_id'])
|
||||
print "%sNewer build %s%s" % (head, build.nvr, tail)
|
||||
elif newestRPM == "same":
|
||||
#we're good
|
||||
if build.rebuilt:
|
||||
print "%sCommon/Latest build (rebuilt) %s%s" % (head, build.nvr, tail)
|
||||
else:
|
||||
print "%sCommon/Latest build %s%s" % (head, build.nvr, tail)
|
||||
return build
|
||||
|
||||
def importBuild(self, build_id, tag, rpmfiles, pkg, nvr):
|
||||
'''import and tag a build from remote hub'''
|
||||
fname = "%s-%s-%s.src.rpm" % (pkg, rpmfiles[0]['version'], rpmfiles[0]['release'])
|
||||
url = "%s/%s/%s/%s/src/%s" % (options.pkgurl, pkg, rpmfiles[0]['version'], rpmfiles[0]['release'], fname)
|
||||
print url
|
||||
file = grabber.urlopen(url, text = "%s.%s" % (pkg, 'src'))
|
||||
out = os.open(os.path.join(options.workpath, fname), os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0666)
|
||||
try:
|
||||
while 1:
|
||||
buf = file.read(4096)
|
||||
if not buf:
|
||||
break
|
||||
os.write(out, buf)
|
||||
finally:
|
||||
os.close(out)
|
||||
file.close()
|
||||
print 'Downloaded: %s' % fname
|
||||
session.importRPM('kojisd', fname)
|
||||
print 'Imported: %s' % fname
|
||||
for rpm in rpmfiles:
|
||||
fname = "%s-%s-%s.%s.rpm" % (rpm['name'], rpm['version'], rpm['release'], rpm['arch'])
|
||||
url = "%s/%s/%s/%s/%s/%s" % (options.pkgurl, pkg, rpm['version'], rpm['release'], rpm['arch'], fname)
|
||||
print url
|
||||
file = grabber.urlopen(url, text = "%s.%s" % (rpm['name'], rpm['arch']))
|
||||
out = os.open(os.path.join(options.workpath, fname), os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0666)
|
||||
try:
|
||||
while 1:
|
||||
buf = file.read(4096)
|
||||
if not buf:
|
||||
break
|
||||
os.write(out, buf)
|
||||
finally:
|
||||
os.close(out)
|
||||
file.close()
|
||||
print 'Downloaded: %s' % fname
|
||||
session.importRPM('kojisd', fname)
|
||||
print 'Imported: %s' % fname
|
||||
session.tagBuildBypass(tag, nvr)
|
||||
print 'Tagged: %s' % nvr
|
||||
|
||||
def scanTag(self, tag):
|
||||
"""Scan the latest builds in a remote tag"""
|
||||
taginfo = remote.getTag(tag)
|
||||
builds = remote.listTagged(taginfo['id'], latest=True)
|
||||
for build in builds:
|
||||
for retry in xrange(10):
|
||||
try:
|
||||
self.scanBuild(build['id'], tag)
|
||||
if options.first_one:
|
||||
return
|
||||
except (socket.timeout, socket.error):
|
||||
print "retry"
|
||||
continue
|
||||
break
|
||||
else:
|
||||
print "Error: unable to scan %(name)s-%(version)s-%(release)s" % build
|
||||
continue
|
||||
|
||||
def scan(self):
|
||||
"""Scan based on config file"""
|
||||
to_scan = []
|
||||
alltags = remote.listTags()
|
||||
|
||||
def rebuild(self, build):
|
||||
"""Rebuild a remote build using closest possible buildroot"""
|
||||
#first check that we can
|
||||
deps = []
|
||||
for build_id in build.deps:
|
||||
dep = self.builds.get(build_id)
|
||||
if not dep:
|
||||
print "Missing dependency %i for %s. Not scanned?" % (build_id, build.nvr)
|
||||
return
|
||||
if dep.state != 'common':
|
||||
print "Dependency missing for %s: %s (%s)" % (build.nvr, dep.nvr, dep.state)
|
||||
return
|
||||
deps.append(dep)
|
||||
#check/create tag
|
||||
our_tag = "SHADOWBUILD-%s" % build.br_tag
|
||||
taginfo = session.getTag(our_tag)
|
||||
parents = None
|
||||
if not taginfo:
|
||||
#XXX - not sure what is best here
|
||||
#how do we pick arches? for now just assume all....
|
||||
# config option for
|
||||
#XXX this call for perms is stupid, but it's all we've got
|
||||
perm_id = None
|
||||
for data in session.getAllPerms():
|
||||
if data['name'] == 'admin':
|
||||
perm_id = data['id']
|
||||
break
|
||||
# use config option for arches
|
||||
session.createTag(our_tag, perm=perm_id, arches='%s' % buildarches)
|
||||
taginfo = session.getTag(our_tag, strict=True)
|
||||
session.createBuildTarget(taginfo['name'], taginfo['id'], taginfo['id'])
|
||||
else:
|
||||
parents = session.getInheritanceData(taginfo['id'])
|
||||
if parents:
|
||||
print "Warning: shadow build tag has inheritance"
|
||||
#check package list
|
||||
pkgs = {}
|
||||
for pkg in session.listPackages(tagID=taginfo['id']):
|
||||
pkgs[pkg['package_name']] = pkg
|
||||
missing_pkgs = []
|
||||
for dep in deps:
|
||||
name = dep.info['name']
|
||||
if not pkgs.has_key(name):
|
||||
#guess owner
|
||||
owners = {}
|
||||
for pkg in session.listPackages(pkgID=name):
|
||||
owners.setdefault(pkg['owner_id'], []).append(pkg)
|
||||
if owners:
|
||||
order = [(len(v), k) for k, v in owners.iteritems()]
|
||||
order.sort()
|
||||
owner = order[-1][1]
|
||||
else:
|
||||
#just use ourselves
|
||||
owner=session.getLoggedInUser()['id']
|
||||
missing_pkgs.append((name, owner))
|
||||
#check build list
|
||||
cur_builds = {}
|
||||
for binfo in session.listTagged(taginfo['id']):
|
||||
#index by name in tagging order (latest first)
|
||||
cur_builds.setdefault(binfo['name'], []).append(binfo)
|
||||
to_untag = []
|
||||
to_tag = []
|
||||
for dep in deps:
|
||||
#XXX - assuming here that there is only one dep per 'name'
|
||||
# may want to check that this is true
|
||||
cur_order = cur_builds.get(dep.info['name'], [])
|
||||
tagged = False
|
||||
for binfo in cur_order:
|
||||
if binfo['nvr'] == dep.nvr:
|
||||
tagged = True
|
||||
#may not be latest now, but it will be after we do all the untagging
|
||||
else:
|
||||
# note that the untagging keeps older builds from piling up. In a sense
|
||||
# we're gc-pruning this tag ourselves every pass.
|
||||
to_untag.append(binfo)
|
||||
if not tagged:
|
||||
to_tag.append(dep)
|
||||
drop_groups = []
|
||||
build_group = None
|
||||
for group in session.getTagGroups(taginfo['id']):
|
||||
if group['name'] == 'build':
|
||||
build_group = group
|
||||
else:
|
||||
# we should have no other groups but build
|
||||
print "Warning: found stray group: %s" % group
|
||||
drop_groups.append(group['name'])
|
||||
if build_group:
|
||||
#TODO - fix build group package list based on base of build to shadow
|
||||
needed = dict([(n,1) for n in build.base])
|
||||
current = dict([(p['package'],1) for p in build_group['packagelist']])
|
||||
add_pkgs = [n for n in needed if not current.has_key(n)]
|
||||
drop_pkgs = [n for n in current if not needed.has_key(n)]
|
||||
#no group deps needed/allowed
|
||||
drop_deps = [(g['name'], 1) for g in build_group['grouplist']]
|
||||
if drop_deps:
|
||||
print "Warning: build group had deps: %r" % build_group
|
||||
else:
|
||||
add_pkgs = build.base
|
||||
drop_pkgs = []
|
||||
drop_deps = []
|
||||
#update package list, tagged packages, and groups in one multicall/transaction
|
||||
#(avoid useless repo regens)
|
||||
session.multicall = True
|
||||
for name, owner in missing_pkgs:
|
||||
session.packageListAdd(taginfo['id'], name, owner=owner)
|
||||
for binfo in to_untag:
|
||||
session.untagBuildBypass(taginfo['id'], binfo['id'])
|
||||
for dep in to_tag:
|
||||
session.tagBuildBypass(taginfo['id'], dep.nvr)
|
||||
#shouldn't need force here
|
||||
#set groups data
|
||||
if not build_group:
|
||||
# build group not present. add it
|
||||
session.groupListAdd(taginfo['id'], 'build', force=True)
|
||||
#using force in case group is blocked. This shouldn't be the case, but...
|
||||
for pkg_name in drop_pkgs:
|
||||
#in principal, our tag should not have inheritance, so the remove call is the right thing
|
||||
session.groupPackageListRemove(taginfo['id'], 'build', pkg_name)
|
||||
for pkg_name in add_pkgs:
|
||||
session.groupPackageListAdd(taginfo['id'], 'build', pkg_name)
|
||||
#we never add any blocks, so forcing shouldn't be required
|
||||
#TODO - adjust extra_arches for package to build
|
||||
#TODO - get event id to facilitate waiting on repo
|
||||
# not sure if getLastEvent is good enough
|
||||
# short of adding a new call, perhaps use getLastEvent together with event of
|
||||
# current latest repo for tag
|
||||
session.getLastEvent()
|
||||
results = session.multiCall()
|
||||
[event_id, event_ts] = results[-1]
|
||||
#TODO - verify / check results ?
|
||||
#TODO - call newRepo
|
||||
#TODO - upload src
|
||||
src = "" #XXX
|
||||
#TODO - wait for repo
|
||||
#TODO - kick off build
|
||||
#task_id = session.build(src, taginfo['name'], ... ) #XXX
|
||||
#TODO - add task/build to some sort of watch list
|
||||
#TODO - post-build validation
|
||||
|
||||
def report(self):
|
||||
print time.asctime()
|
||||
print "%i builds" % len(self.builds)
|
||||
states = self.state_idx.keys()
|
||||
states.sort()
|
||||
for s in states:
|
||||
print "%s: %i" % (s, len(self.state_idx[s]))
|
||||
|
||||
def runRebuilds(self):
|
||||
"""Rebuild missing builds"""
|
||||
print "Determining rebuild order"
|
||||
builds = [(b.order, b.id, b) for b in self.builds.itervalues()]
|
||||
builds.sort()
|
||||
b_avail = {}
|
||||
ok = 0
|
||||
bad = 0
|
||||
for order, build_id, build in builds:
|
||||
if build.state == 'common':
|
||||
b_avail[build_id] = 1
|
||||
elif build.state == 'missing':
|
||||
#check deps
|
||||
not_avail = [x for x in build.deps.iterkeys() if not b_avail.get(x)]
|
||||
if not_avail:
|
||||
print "Can't rebuild %s, missing %i deps" % (build.nvr, len(not_avail))
|
||||
b_avail[build_id] = 0
|
||||
bad += 1
|
||||
for dep_id in not_avail:
|
||||
dep = self.builds[dep_id]
|
||||
avail = b_avail.get(dep_id)
|
||||
if avail is None:
|
||||
print " %s (out of order?)" % dep.nvr
|
||||
elif not avail:
|
||||
print " %s (%s)" % (dep.nvr, dep.state)
|
||||
else:
|
||||
ok += 1
|
||||
print "rebuild: %s" % build.nvr
|
||||
self.rebuild(build)
|
||||
break #XXX
|
||||
b_avail[build_id] = 1
|
||||
else:
|
||||
print "build: %s, state: %s, #children: %i" \
|
||||
% (build.nvr, build.state, len(build.children))
|
||||
b_avail[build_id] = 0
|
||||
print "ok: %i, bad: %i" % (ok, bad)
|
||||
|
||||
def showOrder(self):
|
||||
"""Show order of rebuilds (for debugging)
|
||||
|
||||
This is sort of a dress rehearsal for the rebuild scheduler
|
||||
"""
|
||||
print "Determining rebuild order"
|
||||
builds = [(b.order, b.id, b) for b in self.builds.itervalues()]
|
||||
#builds = self.builds.items() # (id, build)
|
||||
builds.sort()
|
||||
b_avail = {}
|
||||
ok = 0
|
||||
bad = 0
|
||||
#for build_id, build in builds:
|
||||
for order, build_id, build in builds:
|
||||
if build.state == 'common':
|
||||
b_avail[build_id] = 1
|
||||
elif build.state == 'missing':
|
||||
#for sanity, check deps
|
||||
for dep_id in build.deps.iterkeys():
|
||||
dep = self.builds[dep_id]
|
||||
avail = b_avail.get(dep_id)
|
||||
if avail is None:
|
||||
print "Can't rebuild %s, missing %s (out of order?)" % (build.nvr, dep.nvr)
|
||||
b_avail[build_id] = 0
|
||||
bad += 1
|
||||
break
|
||||
elif not avail:
|
||||
print "Can't rebuild %s, missing %s (%s)" % (build.nvr, dep.nvr, dep.state)
|
||||
b_avail[build_id] = 0
|
||||
bad += 1
|
||||
break
|
||||
else:
|
||||
ok += 1
|
||||
print "rebuild: %s" % build.nvr
|
||||
b_avail[build_id] = 1
|
||||
else:
|
||||
print "build: %s, state: %s, #children: %i" \
|
||||
% (build.nvr, build.state, len(build.children))
|
||||
#show_children(build_id)
|
||||
b_avail[build_id] = 0
|
||||
print "ok: %i, bad: %i" % (ok, bad)
|
||||
|
||||
def bar():
|
||||
tracker = BuildTracker()
|
||||
#binfo = remote.getBuild(args[0], strict=True)
|
||||
#tracker.scanBuild(binfo['id'])
|
||||
if options.build:
|
||||
binfo = remote.getBuild(options.build, strict=True)
|
||||
tracker.scanBuild(binfo['id'])
|
||||
else:
|
||||
tracker.scanTag(args[0])
|
||||
tracker.report()
|
||||
tracker.showOrder()
|
||||
tracker.runRebuilds()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
options, args = get_options()
|
||||
print options
|
||||
session_opts = {}
|
||||
for k in ('user', 'password', 'debug_xmlrpc', 'debug'):
|
||||
session_opts[k] = getattr(options,k)
|
||||
print options.server
|
||||
session = koji.ClientSession(options.server, session_opts)
|
||||
if not options.noauth:
|
||||
activate_session(session)
|
||||
#XXX - sane auth
|
||||
#XXX - config!
|
||||
remote = koji.ClientSession(options.remote, session_opts)
|
||||
rv = 0
|
||||
try:
|
||||
rv = main(args)
|
||||
if not rv:
|
||||
rv = 0
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
except SystemExit:
|
||||
rv = 1
|
||||
#except:
|
||||
# if options.debug:
|
||||
# raise
|
||||
# else:
|
||||
# exctype, value = sys.exc_info()[:2]
|
||||
# rv = 1
|
||||
# print "%s: %s" % (exctype, value)
|
||||
try:
|
||||
session.logout()
|
||||
except:
|
||||
pass
|
||||
sys.exit(rv)
|
||||
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
[kojisd]
|
||||
; For user/pass authentication
|
||||
; user=kojisd
|
||||
; password=kojisd
|
||||
|
||||
; For Kerberos authentication
|
||||
; the principal to connect with
|
||||
;principal=koji/repo@EXAMPLE.COM
|
||||
; The location of the keytab for the principal above
|
||||
;keytab=/etc/kojira.keytab
|
||||
|
||||
; The URL for the building koji hub server
|
||||
server = http://sparc.koji.fedoraproject.org/kojihub
|
||||
|
||||
; The URL for the master koji hub server
|
||||
remote = http://koji.fedoraproject.org/kojihub
|
||||
|
||||
; The directory containing the repos/ directory
|
||||
;topdir = /mnt/koji
|
||||
|
||||
; Logfile
|
||||
;logfile = /var/log/kojisad.log
|
||||
|
||||
; kojisd daemon user cert for secondary hub
|
||||
clientcert = /etc/kojisd/fedora.cert
|
||||
clientca = /etc/kojisad/fedora-upload-ca.cert
|
||||
serverca = /etc/kojisad/fedora-server-ca.cert
|
||||
|
||||
|
||||
; tags on primary we want to build
|
||||
validtags = dist-f8 dist-f9 dist-f8-updates dist-f8-updates-candidate dist-f8-updates-testing
|
||||
;validtags = ['dist-f8', 'dist-f9', 'dist-f8-updates', 'dist-f8-updates-candidate', 'dist-f8-updates-testing']
|
||||
; tags from primary we do not want to build
|
||||
;invalidtags =
|
||||
|
||||
; Arches we import directly from the master hub
|
||||
importarches = noarch
|
||||
; Arches we will build for
|
||||
buildarches = 'sparcv9 sparc64'
|
||||
; Work directory, where we download files to
|
||||
;workpath = /mnt/koji/work/kojisd
|
||||
pkgurl = http://koji.fedoraproject.org/packages
|
||||
; user to run build as
|
||||
;buildas =
|
||||
|
||||
;configuration for SSL athentication
|
||||
|
||||
;client certificate
|
||||
cert = /etc/kojisd/kojisd_key_and_cert.pem
|
||||
|
||||
;certificate of the CA that issued the client certificate
|
||||
ca = /etc/pki/tls/certs/extras_cacert.pem
|
||||
|
||||
;certificate of the CA that issued the HTTP server certificate
|
||||
serverca = /etc/pki/tls/certs/extras_cacert.pem
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
#! /bin/sh
|
||||
#
|
||||
# kojisd Start/Stop kojisd
|
||||
#
|
||||
# chkconfig: 345 99 99
|
||||
# description: koji subscriber daemon
|
||||
# processname: kojisd
|
||||
|
||||
# This is an interactive program, we need the current locale
|
||||
|
||||
# Source function library.
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Check that we're a priviledged user
|
||||
[ `id -u` = 0 ] || exit 0
|
||||
|
||||
[ -f /etc/sysconfig/kojisd ] && . /etc/sysconfig/kojisd
|
||||
|
||||
prog="kojisd"
|
||||
|
||||
# Check that networking is up.
|
||||
if [ "$NETWORKING" = "no" ]
|
||||
then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
[ -f /usr/sbin/kojisd ] || exit 0
|
||||
|
||||
RETVAL=0
|
||||
|
||||
start() {
|
||||
echo -n $"Starting $prog: "
|
||||
cd /
|
||||
ARGS=""
|
||||
[ "$FORCE_LOCK" == "Y" ] && ARGS="$ARGS --force-lock"
|
||||
[ "$KOJIRA_DEBUG" == "Y" ] && ARGS="$ARGS --debug"
|
||||
[ "$KOJIRA_VERBOSE" == "Y" ] && ARGS="$ARGS --verbose"
|
||||
if [ -n "$RUNAS" -a "$RUNAS" != "root" ]; then
|
||||
daemon --user "$RUNAS" /usr/sbin/kojisd $ARGS
|
||||
else
|
||||
daemon /usr/sbin/kojisd $ARGS
|
||||
fi
|
||||
RETVAL=$?
|
||||
echo
|
||||
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/kojisd
|
||||
return $RETVAL
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping $prog: "
|
||||
killproc kojisd
|
||||
RETVAL=$?
|
||||
echo
|
||||
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/kojisd
|
||||
return $RETVAL
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
# See how we were called.
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
status)
|
||||
status $prog
|
||||
;;
|
||||
restart|reload)
|
||||
restart
|
||||
;;
|
||||
condrestart)
|
||||
[ -f /var/lock/subsys/kojisd ] && restart || :
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|reload|condrestart}"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
exit $?
|
||||
Loading…
Add table
Add a link
Reference in a new issue