diff --git a/Authors b/Authors new file mode 100644 index 00000000..a9337342 --- /dev/null +++ b/Authors @@ -0,0 +1,3 @@ +Mike McLean +Dennis Gregorovic +Mike Bonnet diff --git a/COPYING b/COPYING new file mode 100644 index 00000000..2f66bb23 --- /dev/null +++ b/COPYING @@ -0,0 +1,16 @@ + Koji - a system for building and tracking RPMS. + Copyright (c) 2007 Red Hat + + Koji is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; + version 2.1 of the License. + + This software is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this software; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA diff --git a/LGPL b/LGPL new file mode 100644 index 00000000..3b473dbf --- /dev/null +++ b/LGPL @@ -0,0 +1,458 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..94b8b482 --- /dev/null +++ b/Makefile @@ -0,0 +1,108 @@ +NAME=koji +SPECFILE = $(firstword $(wildcard *.spec)) +SUBDIRS = hub builder koji cli docs util www + +ifdef DIST +DIST_DEFINES := --define "dist $(DIST)" +endif + +ifndef VERSION +VERSION := $(shell rpm $(RPM_DEFINES) $(DIST_DEFINES) -q --qf "%{VERSION}\n" --specfile $(SPECFILE)| head -1) +endif +# the release of the package +ifndef RELEASE +RELEASE := $(shell rpm $(RPM_DEFINES) $(DIST_DEFINES) -q --qf "%{RELEASE}\n" --specfile $(SPECFILE)| head -1) +endif + +ifndef WORKDIR +WORKDIR := $(shell pwd) +endif +## Override RPM_WITH_DIRS to avoid the usage of these variables. +ifndef SRCRPMDIR +SRCRPMDIR = $(WORKDIR) +endif +ifndef BUILDDIR +BUILDDIR = $(WORKDIR) +endif +ifndef RPMDIR +RPMDIR = $(WORKDIR) +endif +## SOURCEDIR is special; it has to match the CVS checkout directory,- +## because the CVS checkout directory contains the patch files. So it basically- +## can't be overridden without breaking things. But we leave it a variable +## for consistency, and in hopes of convincing it to work sometime. +ifndef SOURCEDIR +SOURCEDIR := $(shell pwd) +endif + + +# RPM with all the overrides in place; +ifndef RPM +RPM := $(shell if test -f /usr/bin/rpmbuild ; then echo rpmbuild ; else echo rpm ; fi) +endif +ifndef RPM_WITH_DIRS +RPM_WITH_DIRS = $(RPM) --define "_sourcedir $(SOURCEDIR)" \ + --define "_builddir $(BUILDDIR)" \ + --define "_srcrpmdir $(SRCRPMDIR)" \ + --define "_rpmdir $(RPMDIR)" +endif + +# CVS-safe version/release -- a package name like 4Suite screws things +# up, so we have to remove the leaving digits from the name +TAG_NAME := $(shell echo $(NAME) | sed -e s/\\\./_/g -e s/^[0-9]\\\+//g) +TAG_VERSION := $(shell echo $(VERSION) | sed s/\\\./_/g) +TAG_RELEASE := $(shell echo $(RELEASE) | sed s/\\\./_/g) + +# tag to export, defaulting to current tag in the spec file +ifndef TAG +TAG=$(TAG_NAME)-$(TAG_VERSION)-$(TAG_RELEASE) +endif + +_default: + @echo "read the makefile" + +clean: + rm -f *.o *.so *.pyc *~ koji*.bz2 koji*.src.rpm + rm -rf koji-$(VERSION) + for d in $(SUBDIRS); do make -s -C $$d clean; done + +subdirs: + for d in $(SUBDIRS); do make -C $$d; [ $$? = 0 ] || exit 1; done + +tarball: clean + @rm -rf .koji-$(VERSION) + @mkdir .koji-$(VERSION) + @cp -rl $(SUBDIRS) Makefile *.spec .koji-$(VERSION) + @mv .koji-$(VERSION) koji-$(VERSION) + tar --bzip2 --exclude '*.tar.bz2' --exclude '*.rpm' --exclude '.#*' --exclude '.cvsignore' --exclude CVS \ + -cpf koji-$(VERSION).tar.bz2 koji-$(VERSION) + @rm -rf koji-$(VERSION) + +srpm: tarball + $(RPM_WITH_DIRS) $(DIST_DEFINES) -ts koji-$(VERSION).tar.bz2 + +rpm: tarball + $(RPM_WITH_DIRS) $(DIST_DEFINES) -tb koji-$(VERSION).tar.bz2 + +tag:: $(SPECFILE) + cvs tag $(TAG_OPTS) -c $(TAG) + @echo "Tagged with: $(TAG)" + @echo + +# If and only if "make build" fails, use "make force-tag" to +# re-tag the version. +force-tag: $(SPECFILE) + @$(MAKE) tag TAG_OPTS="-F $(TAG_OPTS)" + +DESTDIR ?= / +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR) + + for d in $(SUBDIRS); do make DESTDIR=`cd $(DESTDIR); pwd` \ + -C $$d install; [ $$? = 0 ] || exit 1; done diff --git a/builder/Makefile b/builder/Makefile new file mode 100644 index 00000000..72275791 --- /dev/null +++ b/builder/Makefile @@ -0,0 +1,28 @@ + +BINFILES = kojid +PYFILES = $(wildcard *.py) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/usr/sbin + install -m 755 $(BINFILES) $(DESTDIR)/usr/sbin + + mkdir -p $(DESTDIR)/etc/mock/koji + mkdir -p $(DESTDIR)/etc/rc.d/init.d + install -m 755 kojid.init $(DESTDIR)/etc/rc.d/init.d/kojid + + mkdir -p $(DESTDIR)/etc/sysconfig + install -m 644 kojid.sysconfig $(DESTDIR)/etc/sysconfig/kojid + + install -m 644 kojid.conf $(DESTDIR)/etc/kojid.conf diff --git a/builder/kojid b/builder/kojid new file mode 100755 index 00000000..14f17277 --- /dev/null +++ b/builder/kojid @@ -0,0 +1,2429 @@ +#!/usr/bin/python + +# Koji build daemon +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: +# Mike McLean + +try: + import krbV +except ImportError: + pass +import base64 +import koji +import koji.util +import commands +import errno +import glob +import logging +import logging.handlers +import md5 +import os +import pprint +import pwd +import grp +import re +import rpm +import signal +import smtplib +import socket +import sys +import time +import datetime +import traceback +import urlparse +import xmlrpclib +from ConfigParser import ConfigParser +from fnmatch import fnmatch +from optparse import OptionParser +from xmlrpclib import Fault + +class ServerExit(Exception): + """Raised to shutdown the server""" + pass + +def main(): + global session + global options + logger = logging.getLogger("koji.build") + logger.info('Starting up') + tm = TaskManager() + def shutdown(*args): + raise SystemExit + signal.signal(signal.SIGTERM,shutdown) + while 1: + try: + tm.updateBuildroots() + tm.updateTasks() + tm.getNextTask() + except (SystemExit,ServerExit,KeyboardInterrupt): + logger.warn("Exiting") + break + except koji.AuthExpired: + logger.error('Session expired') + break + except koji.RetryError: + raise + except: + # XXX - this is a little extreme + # log the exception and continue + logger.error(''.join(traceback.format_exception(*sys.exc_info()))) + try: + time.sleep(options.sleeptime) + except (SystemExit,KeyboardInterrupt): + logger.warn("Exiting") + break + logger.warn("Shutting down, please wait...") + tm.shutdown() + session.logout() + sys.exit(0) + +def log_output(path, args, outfile, uploadpath, cwd=None, logerror=0, append=0, chroot=None): + """Run command with output redirected. If chroot is not None, chroot to the directory specified + before running the command.""" + pid = os.fork() + if not pid: + session._forget() + try: + if chroot: + os.chroot(chroot) + if cwd: + os.chdir(cwd) + flags = os.O_CREAT | os.O_WRONLY + if append: + flags |= os.O_APPEND + fd = os.open(outfile, flags) + os.dup2(fd, 1) + if logerror: + os.dup2(fd, 2) + os.execvp(path, args) + except: + msg = ''.join(traceback.format_exception(*sys.exc_info())) + if fd: + try: + os.write(fd, msg) + os.close(fd) + except: + pass + print msg + os._exit(1) + else: + if chroot: + outfile = os.path.normpath(chroot + outfile) + outfd = None + remotename = os.path.basename(outfile) + while True: + status = os.waitpid(pid, os.WNOHANG) + time.sleep(1) + + if not outfd: + try: + outfd = file(outfile, 'r') + except IOError: + # will happen if the forked process has not created the logfile yet + continue + except: + print 'Error reading log file: %s' % outfile + print ''.join(traceback.format_exception(*sys.exc_info())) + + incrementalUpload(remotename, outfd, uploadpath) + + if status[0] != 0: + if outfd: + outfd.close() + return status[1] + +def safe_rmtree(path, unmount=False, strict=True): + logger = logging.getLogger("koji.build") + #safe remove: with -xdev the find cmd will not cross filesystems + # (though it will cross bind mounts from the same filesystem) + if not os.path.exists(path): + logger.debug("No such path: %s" % path) + return + if unmount: + umount_all(path) + #first rm -f non-directories + logger.debug('Scrubbing files in %s' % path) + rv = os.system("find '%s' -xdev \\! -type d -print0 |xargs -0 rm -f" % path) + msg = 'file removal failed (code %r) for %s' % (rv,path) + if rv != 0: + logger.warn(msg) + if strict: + raise koji.GenericError, msg + else: + return rv + #them rmdir directories + #with -depth, we start at the bottom and work up + logger.debug('Scrubbing directories in %s' % path) + rv = os.system("find '%s' -xdev -depth -type d -print0 |xargs -0 rmdir" % path) + msg = 'dir removal failed (code %r) for %s' % (rv,path) + if rv != 0: + logger.warn(msg) + if strict: + raise koji.GenericError, msg + return rv + +def umount_all(topdir): + "Unmount every mount under topdir" + logger = logging.getLogger("koji.build") + for path in scan_mounts(topdir): + logger.debug('Unmounting %s' % path) + cmd = ['umount', '-l', path] + rv = os.spawnvp(os.P_WAIT,cmd[0],cmd) + if rv != 0: + raise koji.GenericError, 'umount failed (exit code %r) for %s' % (rv,path) + #check mounts again + remain = scan_mounts(topdir) + if remain: + raise koji.GenericError, "Unmounting incomplete: %r" % remain + +def scan_mounts(topdir): + """Search path for mountpoints""" + mplist = [] + topdir = os.path.normpath(topdir) + fo = file('/proc/mounts','r') + for line in fo.readlines(): + path = line.split()[1] + if path.startswith(topdir): + mplist.append(path) + fo.close() + #reverse sort so deeper dirs come first + mplist.sort() + mplist.reverse() + return mplist + +def incrementalUpload(fname, fd, path, retries=5, logger=None): + if not fd: + return + + offset = fd.tell() + contents = fd.read() + size = len(contents) + if size == 0: + return + + data = base64.encodestring(contents) + digest = md5.new(contents).hexdigest() + del contents + + tries = 0 + while True: + if session.uploadFile(path, fname, size, digest, offset, data): + return + if tries <= retries: + tries += 1 + time.sleep(10) + continue + else: + if logger: + logger.error("Error uploading file %s to %s at offset %d" % (fname, path, offset)) + else: + sys.stderr.write("Error uploading file %s to %s at offset %d" % (fname, path, offset)) + sys.stderr.write("\n") + return + +def _parseStatus(rv, prefix): + if isinstance(prefix, list) or isinstance(prefix, tuple): + prefix = ' '.join(prefix) + if os.WIFSIGNALED(rv): + return '%s was killed by signal %i' % (prefix, os.WTERMSIG(rv)) + elif os.WIFEXITED(rv): + return '%s exited with status %i' % (prefix, os.WEXITSTATUS(rv)) + else: + return '%s terminated for unknown reasons' % prefix + +def _isSuccess(rv): + """Return True if rv indicates successful completion + (exited with status 0), False otherwise.""" + if os.WIFEXITED(rv) and os.WEXITSTATUS(rv) == 0: + return True + else: + return False + +class BuildRoot(object): + + def __init__(self,*args,**kwargs): + self.logger = logging.getLogger("koji.build.buildroot") + if len(args) + len(kwargs) == 1: + # manage an existing mock buildroot + self._load(*args,**kwargs) + else: + self._new(*args,**kwargs) + + def _load(self, data): + #manage an existing buildroot + if isinstance(data, dict): + #assume data already pulled from db + self.id = data['id'] + else: + self.id = data + data = session.getBuildroot(self.id) + self.task_id = data['task_id'] + self.tag_id = data['tag_id'] + self.tag_name = data['tag_name'] + self.repoid = data['repo_id'] + self.br_arch = data['arch'] + self.name = "%(tag_name)s-%(id)s-%(repoid)s" % vars(self) + self.config = session.getBuildConfig(self.tag_id) + + def _new(self, tag, arch, task_id, distribution=None): + """Create a brand new repo""" + self.task_id = task_id + self.distribution = distribution + if not self.distribution: + # Provide a default if the srpm did not contain a 'distribution' header, + # e.g. if it was built by hand + self.distribution = 'Unknown' + self.config = session.getBuildConfig(tag) + if not self.config: + raise koji.BuildrootError("Could not get config info for tag: %s" % tag) + self.tag_id = self.config['id'] + self.tag_name = self.config['name'] + while 1: + repo_info = session.getRepo(self.tag_id) + if repo_info and repo_info['state'] == koji.REPO_READY: + break + self.logger.debug("Waiting for repo to be created %s" % self.tag_name) + time.sleep(5) + self.repoid = repo_info['id'] + self.br_arch = koji.canonArch(arch) + self.logger.debug("New buildroot: %(tag_name)s/%(br_arch)s/%(repoid)s" % vars(self)) + id = session.host.newBuildRoot(self.repoid, self.br_arch, task_id=task_id) + if id is None: + raise koji.BuildrootError, "failed to get a buildroot id" + self.id = id + self.name = "%(tag_name)s-%(id)s-%(repoid)s" % vars(self) + self._writeMockConfig() + + def _writeMockConfig(self): + global options + # mock config + configdir = '/etc/mock/koji' + configfile = "%s/%s.cfg" % (configdir,self.name) + self.mockcfg = "koji/%s" % self.name + + opts = {} + for k in ('repoid', 'tag_name', 'distribution'): + if hasattr(self, k): + opts[k] = getattr(self, k) + for k in ('mockdir', 'topdir'): + if hasattr(options, k): + opts[k] = getattr(options, k) + opts['buildroot_id'] = self.id + output = koji.genMockConfig(self.name, self.br_arch, managed=True, **opts) + + #write config + fo = file(configfile,'w') + fo.write(output) + fo.close() + + def mock(self, args, skip_setarch=False): + """Run mock""" + global options + mockpath = getattr(options,"mockpath","mock") + cmd = [mockpath, "-r", self.mockcfg] + if not skip_setarch: + cmd[:0] = ['setarch', self.br_arch] + if options.debug_mock: + cmd.append('--debug') + cmd.extend(args) + self.logger.info(' '.join(cmd)) + pid = os.fork() + if pid: + path = self.getUploadPath() + logs = {} + + while True: + status = os.waitpid(pid,os.WNOHANG) + time.sleep(1) + + try: + results = os.listdir(self.resultdir()) + except OSError: + # will happen when mock hasn't created the resultdir yet + results = [] + + for fname in results: + if fname.endswith('.log') and not logs.has_key(fname): + logs[fname] = None + + for (fname, fd) in logs.items(): + if not fd: + try: + fd = file(os.path.join(self.resultdir(), fname),'r') + logs[fname] = fd + except: + self.logger.error("Error reading mock log: %s", fname) + self.logger.error(''.join(traceback.format_exception(*sys.exc_info()))) + continue + + incrementalUpload(fname, fd, path, self.logger) + + if status[0] != 0: + for (fname, fd) in logs.items(): + if fd: + fd.close() + return status[1] + + else: + #in no case should exceptions propagate past here + try: + session._forget() + if os.getuid() == 0 and hasattr(options,"mockuser"): + self.logger.info('Running mock as %s' % options.mockuser) + try: + uid,gid = pwd.getpwnam(options.mockuser)[2:4] + except KeyError: + #perhaps a uid was specified + try: + uid,gid = pwd.getpwuid(int(options.mockuser))[2:4] + except (TypeError,ValueError,KeyError): + uid = None + os.setgroups([grp.getgrnam('mock')[2]]) + if uid is not None: + os.setregid(gid,gid) + os.setreuid(uid,uid) + os.execvp(cmd[0],cmd) + except: + #diediedie + print "Failed to exec mock" + print ''.join(traceback.format_exception(*sys.exc_info())) + os._exit(1) + + def getUploadPath(self): + """Get the path that should be used when uploading files to + the hub.""" + return 'tasks/%i' % self.task_id + + def uploadDir(self, dirpath, suffix=None): + """Upload the contents of the given directory to the + task output directory on the hub. If suffix is provided, + append '.' + suffix to the filenames, so that successive uploads + of the same directory won't overwrite each other, if the files have + the same name but different contents.""" + if not os.path.isdir(dirpath): + return + uploadpath = self.getUploadPath() + for filename in os.listdir(dirpath): + filepath = os.path.join(dirpath, filename) + if os.stat(filepath).st_size > 0: + if suffix: + filename = '%s.%s' % (filename, suffix) + session.uploadWrapper(filepath, uploadpath, filename) + + def init(self): + rv = self.mock(['init']) + + if rv: + self.expire() + raise koji.BuildrootError, "could not init mock buildroot, %s" % self._mockResult(rv) + session.host.setBuildRootList(self.id,self.getPackageList()) + + def _mockResult(self, rv): + return _parseStatus(rv, 'mock') + + def build(self,srpm,arch=None): + # run build + session.host.setBuildRootState(self.id,'BUILDING') + args = ['--no-clean'] + if arch: + args.extend(['--arch', arch]) + args.extend(['rebuild', srpm]) + rv = self.mock(args) + + session.host.updateBuildRootList(self.id,self.getPackageList()) + if rv: + self.expire() + raise koji.BuildrootError, "error building package (arch %s), %s" % (arch, self._mockResult(rv)) + + def getPackageList(self): + """Return a list of packages from the buildroot + + Each member of the list is a dictionary containing the following fields: + - name + - version + - release + - epoch + """ + fields = ('name','version','release','epoch','arch') + rpm.addMacro("_dbpath", "%s/var/lib/rpm" % self.rootdir()) + ret = [] + try: + ts = rpm.TransactionSet() + for h in ts.dbMatch(): + pkg = koji.get_header_fields(h,fields) + #skip our fake packages + if pkg['name'] == 'buildsys-build': + #XXX config + continue + ret.append(pkg) + finally: + rpm.delMacro("_dbpath") + return ret + + def scrub(self): + "Non-mock implementation of clean" + rootdir = self.rootdir() + umount_all(rootdir) + safe_rmtree(rootdir) + + def clean(self): + """Remove the buildroot and associated mock config""" + #self.mock(['clean']) + raise koji.GenericError, "dangerous and deprecated. use scrub()" + + def resultdir(self): + global options + return "%s/%s/result" % (options.mockdir, self.name) + + def rootdir(self): + global options + return "%s/%s/root" % (options.mockdir, self.name) + + def expire(self): + session.host.setBuildRootState(self.id,'EXPIRED') + + +class TaskManager(object): + + def __init__(self): + self.tasks = {} + self.pids = {} + self.subsessions = {} + self.findHandlers() + self.status = '' + self.ready = False + self.host_id = session.host.getID() + self.logger = logging.getLogger("koji.build.TaskManager") + + def findHandlers(self): + """Find and index task handlers""" + handlers = {} + for v in globals().values(): + if type(v) == type(BaseTaskHandler) and issubclass(v,BaseTaskHandler): + for method in v.Methods: + handlers[method] = v + self.handlers = handlers + + def shutdown(self): + """Attempt to shut down cleanly""" + for task_id in self.pids.keys(): + self.cleanupTask(task_id) + session.host.freeTasks(self.tasks.keys()) + session.host.updateHost(task_load=0.0,ready=False) + + def updateBuildroots(self): + """Handle buildroot cleanup/maintenance + + - examine current buildroots on system + - compare with db + - clean up as needed + - /var/lib/mock + - /etc/mock/koji + """ + local_br = self._scanLocalBuildroots() + #query buildroots in db that are not expired + states = [ koji.BR_STATES[x] for x in ('INIT','WAITING','BUILDING') ] + db_br = session.listBuildroots(hostID=self.host_id,state=tuple(states)) + # index by id + db_br = dict([(row['id'],row) for row in db_br]) + st_expired = koji.BR_STATES['EXPIRED'] + for id, br in db_br.items(): + task_id = br['task_id'] + if task_id is None: + # not associated with a task + # this makes no sense now, but may in the future + self.logger.warn("Expiring taskless buildroot: %(id)i/%(tag_name)s/%(arch)s" % br) + session.host.setBuildRootState(id,st_expired) + elif not self.tasks.has_key(task_id): + #task not running - expire the buildroot + #TODO - consider recycling hooks here (with strong sanity checks) + self.logger.info("Expiring buildroot: %(id)i/%(tag_name)s/%(arch)s" % br) + self.logger.debug("Buildroot task: %r, Current tasks: %r" % (task_id,self.tasks.keys())) + session.host.setBuildRootState(id,st_expired) + continue + # get info on local_only buildroots (most likely expired) + local_only = [id for id in local_br.iterkeys() if not db_br.has_key(id)] + if local_only: + missed_br = session.listBuildroots(buildrootID=tuple(local_only)) + #get all the task info in one call + tasks = [] + for br in missed_br: + task_id = br['task_id'] + if task_id: + tasks.append(task_id) + #index + missed_br = dict([(row['id'],row) for row in missed_br]) + tasks = dict([(row['id'],row) for row in session.getTaskInfo(tasks)]) + for id in local_only: + # Cleaning options + # - wait til later + # - "soft" clean (leaving empty root/ dir) + # - full removal + data = local_br[id] + br = missed_br.get(id) + if not br: + self.logger.warn("%(name)s: not in db" % data) + continue + desc = "%(id)i/%(tag_name)s/%(arch)s" % br + if not br['retire_ts']: + self.logger.warn("%s: no retire timestamp" % desc) + continue + age = time.time() - br['retire_ts'] + self.logger.debug("Expired/stray buildroot: %s" % desc) + if br and br['task_id']: + task = tasks.get(br['task_id']) + if not task: + self.logger.warn("%s: invalid task %s" % (desc, br['task_id'])) + continue + if (task['state'] == koji.TASK_STATES['FAILED'] and age < 3600 * 4): + #XXX - this could be smarter + # keep buildroots for failed tasks around for a little while + self.logger.debug("Keeping failed buildroot: %s" % desc) + continue + topdir = data['dir'] + rootdir = None + if topdir: + rootdir = "%s/root" % topdir + try: + st = os.lstat(rootdir) + except OSError, e: + if e.errno == errno.ENOENT: + rootdir = None + else: + self.logger.warn("%s: %s" % (desc, e)) + continue + age = min(age, time.time() - st.st_mtime) + #note: https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=192153) + #If rpmlib is installing in this chroot, removing it entirely + #can lead to a world of hurt. + #We remove the rootdir contents but leave the rootdir unless it + #is really old + if age > 3600*24: + #dir untouched for a day + self.logger.info("Removing buildroot: %s" % desc) + if topdir and safe_rmtree(topdir, unmount=True, strict=False) != 0: + continue + #also remove the config + try: + os.unlink(data['cfg']) + except OSError, e: + self.logger.warn("%s: can't remove config: %s" % (desc, e)) + elif age > 120: + if rootdir: + try: + flist = os.listdir(rootdir) + except OSError, e: + self.logger.warn("%s: can't list rootdir: %s" % (desc, e)) + continue + if flist: + self.logger.info("%s: clearing rootdir" % desc) + for fn in flist: + safe_rmtree("%s/%s" % (rootdir,fn), unmount=True, strict=False) + else: + self.logger.debug("Recent buildroot: %s: %i seconds" % (desc,age)) + self.logger.debug("Local buildroots: %d" % len(local_br)) + self.logger.debug("Active buildroots: %d" % len(db_br)) + self.logger.debug("Expired/stray buildroots: %d" % len(local_only)) + + def _scanLocalBuildroots(self): + #XXX + configdir = '/etc/mock/koji' + buildroots = {} + for f in os.listdir(configdir): + if not f.endswith('.cfg'): + continue + fn = "%s/%s" % (configdir,f) + if not os.path.isfile(fn): + continue + fo = file(fn,'r') + id = None + name = None + for n in xrange(10): + # data should be in first few lines + line = fo.readline() + if line.startswith('# Koji buildroot id:'): + try: + id = int(line.split(':')[1]) + except ValueError,IndexError: + continue + if line.startswith('# Koji buildroot name:'): + try: + name = line.split(':')[1].strip() + except ValueError,IndexError: + continue + if id is None or name is None: + continue + # see if there's a dir for the buildroot + vardir = "/var/lib/mock/%s" % name + #XXX + buildroots[id] = {} + buildroots[id]['name'] = name + buildroots[id]['cfg'] = fn + buildroots[id]['dir'] = None + if os.path.isdir(vardir): + buildroots[id]['dir'] = vardir + return buildroots + + def updateTasks(self): + """Read and process task statuses from server + + The processing we do is: + 1) clean up after tasks that are not longer active: + * kill off processes + * retire buildroots + * remove buildroots + - with some possible exceptions + 2) wake waiting tasks if appropriate + """ + tasks = {} + stale = [] + task_load = 0.0 + if self.pids: + self.logger.info("pids: %r" % self.pids) + for task in session.host.getHostTasks(): + self.logger.info("open task: %r" % task) + # the tasks returned are those that are open and locked + # by this host. + id = task['id'] + if not self.pids.has_key(id): + #We don't have a process for this + #Expected to happen after a restart, otherwise this is an error + stale.append(id) + continue + tasks[id] = task + if task.get('alert',False): + #wake up the process + self.logger.info("Waking up task: %r" % task) + os.kill(self.pids[id],signal.SIGUSR2) + if not task['waiting']: + task_load += task['weight'] + self.logger.debug("Task Load: %s" % task_load) + self.task_load = task_load + self.tasks = tasks + self.logger.debug("Current tasks: %r" % self.tasks) + if len(stale) > 0: + #A stale task is one which is opened to us, but we know nothing + #about). This will happen after a daemon restart, for example. + self.logger.info("freeing stale tasks: %r" % stale) + session.host.freeTasks(stale) + w_opts = os.WNOHANG + for id, pid in self.pids.items(): + if self._waitTask(id, pid): + # the subprocess handles most everything, we just need to clear things out + if self.cleanupTask(id): + del self.pids[id] + if self.tasks.has_key(id): + del self.tasks[id] + for id, pid in self.pids.items(): + if not tasks.has_key(id): + # expected to happen when: + # - we are in the narrow gap between the time the task + # records its result and the time the process actually + # exits. + # - task is canceled + # - task is forcibly reassigned/unassigned + tinfo = session.getTaskInfo(id) + if tinfo is None: + raise koji.GenericError, "Invalid task %r (pid %r)" % (id,pid) + elif tinfo['state'] == koji.TASK_STATES['CANCELED']: + self.logger.info("Killing canceled task %r (pid %r)" % (id,pid)) + if self.cleanupTask(id): + del self.pids[id] + elif tinfo['host_id'] != self.host_id: + self.logger.info("Killing reassigned task %r (pid %r)" % (id,pid)) + if self.cleanupTask(id): + del self.pids[id] + else: + self.logger.info("Lingering task %r (pid %r)" % (id,pid)) + + def getNextTask(self): + self.ready = self.readyForTask() + session.host.updateHost(self.task_load,self.ready) + if not self.ready: + self.logger.info("Not ready for task") + return + hosts, tasks = session.host.getLoadData() + self.logger.debug("Load Data:") + self.logger.debug(" hosts: %r" % hosts) + self.logger.debug(" tasks: %r" % tasks) + #now we organize this data into channel-arch bins + bin_hosts = {} #hosts indexed by bin + bins = {} #bins for this host + our_avail = None + for host in hosts: + host['bins'] = [] + if host['id'] == self.host_id: + #note: task_load reported by server might differ from what we + #sent due to precision variation + our_avail = host['capacity'] - host['task_load'] + for chan in host['channels']: + for arch in host['arches'].split() + ['noarch']: + bin = "%s:%s" % (chan,arch) + bin_hosts.setdefault(bin,[]).append(host) + if host['id'] == self.host_id: + bins[bin] = 1 + self.logger.debug("bins: %r" % bins) + if our_avail is None: + self.logger.info("Server did not report this host. Are we disabled?") + return + elif not bins: + self.logger.info("No bins for this host. Missing channel/arch config?") + return + #sort available capacities for each of our bins + avail = {} + for bin in bins.iterkeys(): + avail[bin] = [host['capacity'] - host['task_load'] for host in bin_hosts[bin]] + avail[bin].sort() + avail[bin].reverse() + for task in tasks: + # note: tasks are in priority order + self.logger.debug("task: %r" % task) + if task['state'] == koji.TASK_STATES['ASSIGNED']: + self.logger.debug("task is assigned") + if self.host_id == task['host_id']: + #assigned to us, we can take it regardless + if self.takeTask(task[id]): + return + elif task['state'] == koji.TASK_STATES['FREE']: + bin = "%(channel_id)s:%(arch)s" % task + self.logger.debug("task is free, bin=%r" % bin) + if not bins.has_key(bin): + continue + #see where our available capacity is compared to other hosts for this bin + #(note: the hosts in this bin are exactly those that could + #accept this task) + bin_avail = avail.get(bin, [0]) + self.logger.debug("available capacities for bin: %r" % bin_avail) + median = bin_avail[(len(bin_avail)-1)/2] + self.logger.debug("ours: %.2f, median: %.2f" % (our_avail, median)) + if our_avail < median: + self.logger.debug("Skipping - available capacity in lower half") + #decline for now and give the upper half a chance + return + #otherwise, we attempt to open the task + if self.takeTask(task['id']): + return + else: + #should not happen + raise Exception, "Invalid task state reported by server" + return + + def _waitTask(self, task_id, pid=None): + """Wait (nohang) on the task, return true if finished""" + if pid is None: + pid = self.pids.get(task_id) + if not pid: + raise koji.GenericError, "No pid for task %i" % task_id + prefix = "Task %i (pid %i)" % (task_id, pid) + try: + (childpid, status) = os.waitpid(pid, os.WNOHANG) + except OSError, e: + #check errno + if e.errno != errno.ECHILD: + #should not happen + raise + #otherwise assume the process is gone + self.logger.info("%s: %s" % (prefix, e)) + return False + if childpid != 0: + self.logger.info(_parseStatus(status, prefix)) + return True + return False + + def _killGroup(self, task_id, sig=signal.SIGTERM, timeout=5): + """Kill process group with signal, keep trying within timeout + + Returns True if successful, False if not + """ + pid = self.pids.get(task_id) + if not pid: + raise koji.GenericError, "No pid for task %i" % task_id + pgrps = self._childPGIDs(pid) + success = True + for pgrp in pgrps[::-1]: + # iterate in reverse order so processes whose children are killed might have + # a chance to cleanup before they're killed + success &= self._doKillGroup(task_id, pgrp, sig, timeout) + return success + + def _doKillGroup(self, task_id, pgrp, sig=signal.SIGTERM, timeout=5): + """Kill the process group with the given process group ID. Return True if the + group is successfully killed in the given timeout, False otherwise.""" + incr = 1.0 + t = 0.0 + while t < timeout: + try: + pid, rv = os.waitpid(-pgrp, os.WNOHANG) + while pid != 0: + self.logger.info(_parseStatus(rv, 'process %i' % pid)) + pid, rv = os.waitpid(-pgrp, os.WNOHANG) + except OSError, e: + # means there are no processes in that process group + self.logger.debug("Task %i (pgrp %i): %s" % (task_id, pgrp, e)) + if t == 0.0: + self.logger.info("Task %i (pgrp %i) exited" % (task_id, pgrp)) + else: + self.logger.info("Killed task %i (pgrp %i)" % (task_id, pgrp)) + return True + else: + self.logger.info("Task %i (pgrp %i) exists" % (task_id, pgrp)) + + try: + os.killpg(pgrp, sig) + except OSError, e: + # shouldn't happen + self.logger.warn("Task %i (pgrp %i): %s" % (task_id, pgrp, e)) + continue + else: + self.logger.info("Sent signal %i to task %i (pgrp %i)" % (sig, task_id, pgrp)) + + time.sleep(incr) + t += incr + self.logger.warn("Failed to kill task %i (pgrp %i)" % (task_id, pgrp)) + return False + + def _childPGIDs(self, pid): + """Recursively get the children of the process with the given ID. + Return a list containing the process group IDs of the children + in depth-first order, without duplicates.""" + statsByPPID = {} + pgids = [] + for procdir in os.listdir('/proc'): + if not procdir.isdigit(): + continue + try: + procfile = file('/proc/%s/stat' % procdir) + procstats = [not field.isdigit() and field or int(field) for field in procfile.read().split()] + procfile.close() + if not statsByPPID.has_key(procstats[3]): + statsByPPID[procstats[3]] = [] + statsByPPID[procstats[3]].append(procstats) + if procstats[0] == pid: + # put the pgid of the top-level process into the list + pgids.append(procstats[4]) + except: + # We expect IOErrors, because files in /proc may disappear between the listdir() and read(). + # Nothing we can do about it, just move on. + continue + + if not pgids: + # assume the pid and pgid of the forked task are the same + pgids.append(pid) + pids = [pid] + while pids: + for ppid in pids[:]: + for procstats in statsByPPID.get(ppid, []): + # get the /proc entries with ppid as their parent, and append their pgid to the list, + # then recheck for their children + # pid is the 0th field, ppid is the 3rd field, pgid is the 4th field + if procstats[4] not in pgids: + pgids.append(procstats[4]) + pids.append(procstats[0]) + pids.remove(ppid) + + return pgids + + def cleanupTask(self, task_id): + """Clean up after task + + - kill children + - expire session + """ + # clean up stray children of tasks + ch_killed = self._killGroup(task_id) + if not ch_killed: + ch_killed = self._killGroup(task_id, signal.SIGKILL, timeout=2) + #expire the task's subsession + session_id = self.subsessions.get(task_id) + if session_id: + self.logger.info("Expiring subsession %i (task %i)" % (session_id, task_id)) + try: + session.logoutChild(session_id) + del self.subsessions[task_id] + except: + #not much we can do about it + pass + return ch_killed + + def checkSpace(self): + """See if we have enough space to accept another job""" + global options + br_path = options.mockdir + if not os.path.exists(br_path): + self.logger.error("No such directory: %s" % br_path) + raise IOError, "No such directory: %s" % br_path + cmd = "df -P %s" % br_path + fd = os.popen(cmd) + output = fd.readlines() + fd.close() + df_fields = ['filesystem','total','used','available','capacity','mountpoint'] + data = dict(zip(df_fields,output[1].split())) + availableMB = int(data['available']) / 1024 + self.logger.debug("disk space available in '%s': %i MB", br_path, availableMB) + if availableMB < options.minspace: + self.status = "Insufficient disk space: %i MB, %i MB required" % (availableMB, options.minspace) + self.logger.warn(self.status) + return False + return True + + def readyForTask(self): + """Determine if the system is ready to accept a new task. + + This function measures the system load and tries to determine + if there is room to accept a new task.""" + # key resources to track: + # disk_space + # df -P path + # df -iP path ? + # memory (meminfo/vmstat) + # vmstat fields 3-6 (also 7-8 for swap) + # http://www.redhat.com/advice/tips/meminfo.html + # cpu cycles (vmstat?) + # vmstat fields 13-16 (and others?) + # others?: + # io (iostat/vmstat) + # network (netstat?) + global options + hostdata = session.host.getHost() + self.logger.debug('hostdata: %r' % hostdata) + if not hostdata['enabled']: + self.status = "Host is disabled" + self.logger.info(self.status) + return False + if self.task_load > hostdata['capacity']: + self.status = "Over capacity" + self.logger.info("Task load (%.1f) exceeds capacity (%.1f)" % (self.task_load, hostdata['capacity'])) + return False + if len(self.tasks) >= options.maxjobs: + # This serves as a backup to the capacity check and prevents + # a tremendous number of low weight jobs from piling up + self.status = "Full queue" + self.logger.info(self.status) + return False + if not self.checkSpace(): + # checkSpace() does its own logging + return False + #XXX - add more checks + return True + + def takeTask(self,task_id): + """Attempt to open the specified task + + Returns True if successful, False otherwise + """ + self.logger.info("Attempting to take task %s" %task_id) + data = session.host.openTask(task_id) + if data is None: + self.logger.warn("Could not open") + return False + if not data.has_key('request') or data['request'] is None: + self.logger.warn("Task '%s' has no request" % task_id) + return False + id = data['id'] + request = data['request'] + self.tasks[id] = data + params, method = xmlrpclib.loads(base64.decodestring(request)) + if self.handlers.has_key(method): + handlerClass = self.handlers[method] + elif self.handlers.has_key('default'): + handlerClass = self.handlers['default'] + else: + raise koji.GenericError, "No handler found for method '%s'" % method + handler = handlerClass(id,method,params) + # set weight + session.host.setTaskWeight(task_id,handler.weight()) + if handler.Foreground: + self.logger.info("running task in foreground") + handler.setManager(self) + self.runTask(handler) + else: + pid, session_id = self.forkTask(handler) + self.pids[id] = pid + self.subsessions[id] = session_id + return True + + def forkTask(self,handler): + global session + #get the subsession before we fork + newhub = session.subsession() + session_id = newhub.sinfo['session-id'] + pid = os.fork() + if pid: + newhub._forget() + return pid, session_id + #in no circumstance should we return after the fork + #nor should any exceptions propagate past here + try: + session._forget() + #set process group + os.setpgrp() + #use the subsession + session = newhub + #set a do-nothing handler for sigusr2 + signal.signal(signal.SIGUSR2,lambda *args: None) + self.runTask(handler) + finally: + #diediedie + try: + session.logout() + finally: + os._exit(0) + + def runTask(self,handler): + fail = False + try: + response = (handler.run(),) + # note that we wrap response in a singleton tuple + response = xmlrpclib.dumps(response, methodresponse=1, allow_none=1) + self.logger.info("RESPONSE: %r" % response) + except Fault, fault: + fail = True + response = xmlrpclib.dumps(fault) + tb = ''.join(traceback.format_exception(*sys.exc_info())).replace(r"\n", "\n") + self.logger.warn("FAULT:\n%s" % tb) + except (SystemExit,ServerExit,KeyboardInterrupt): + #we do not trap these + raise + except: + fail = True + # report exception back to server + e_class, e = sys.exc_info()[:2] + faultCode = getattr(e_class,'faultCode',1) + if issubclass(e_class, koji.GenericError): + #just pass it through + tb = str(e) + self.logger.warn(tb) + else: + tb = ''.join(traceback.format_exception(*sys.exc_info())) + self.logger.warn("TRACEBACK: %s" % tb) + response = xmlrpclib.dumps(xmlrpclib.Fault(faultCode, tb)) + data_out = base64.encodestring(response) + if fail: + session.host.failTask(handler.id, data_out) + else: + session.host.closeTask(handler.id, data_out) + + +class BaseTaskHandler(object): + """The base class for task handlers + + Each task handler is a class, a new instance of which is created + to handle each task. + """ + + # list of methods the class can handle + Methods = [] + + # Options: + Foreground = False + + def __init__(self, id, method, params, workdir=None): + global options + self.id = id #task id + if method not in self.Methods: + raise koji.GenericError, 'method "%s" is not supported' % method + self.method = method + # handle named parameters + self.params,self.opts = koji.decode_args(*params) + if workdir is None: + workdir = "%s/tasks/%s" % (options.workdir, id) + self.workdir = workdir + self.logger = logging.getLogger("koji.build.BaseTaskHandler") + + def setManager(self,manager): + """Set the manager attribute + + This is only used for foreground tasks to give them access + to their task manager. + """ + if not self.Foreground: + return + self.manager = manager + + def handler(self): + """(abstract) the handler for the task.""" + raise NotImplementedError + + def run(self): + """Execute the task""" + self.createWorkdir() + try: + return self.handler(*self.params,**self.opts) + finally: + self.removeWorkdir() + + _taskWeight = 1.0 + + def weight(self): + """Return the weight of the task. + + This is run by the taskmanager before the task is run to determine + the weight of the task. The weight is an abstract measure of the + total load the task places on the system while running. + + A task may set _taskWeight for a constant weight different from 1, or + override this function for more complicated situations. + + Note that task weight is partially ignored while the task is sleeping. + """ + return getattr(self,'_taskWeight',1.0) + + def createWorkdir(self): + if self.workdir is None: + return + self.removeWorkdir() + os.makedirs(self.workdir) + + def removeWorkdir(self): + if self.workdir is None: + return + safe_rmtree(self.workdir, unmount=False, strict=True) + #os.spawnvp(os.P_WAIT, 'rm', ['rm', '-rf', self.workdir]) + + def wait(self, subtasks=None, all=False, failany=False): + """Wait on subtasks + + subtasks is a list of integers (or an integer). If more than one subtask + is specified, then the default behavior is to return when any of those + tasks complete. However, if all is set to True, then it waits for all of + them to complete. If all and failany are both set to True, then each + finished task will be checked for failure, and a failure will cause all + of the unfinished tasks to be cancelled. + + special values: + subtasks = None specify all subtasks + + Implementation notes: + The build daemon forks all tasks as separate processes. This function + uses signal.pause to sleep. The main process watches subtasks in + the database and will send the subprocess corresponding to the + subtask a SIGUSR2 to wake it up when subtasks complete. + """ + if isinstance(subtasks,int): + # allow single integer w/o enclosing list + subtasks = [subtasks] + session.host.taskSetWait(self.id,subtasks) + self.logger.debug("Waiting on %r" % subtasks) + while True: + finished, unfinished = session.host.taskWait(self.id) + if len(unfinished) == 0: + #all done + break + elif len(finished) > 0: + if all: + if failany: + failed = False + for task in finished: + try: + result = session.getTaskResult(task) + except (koji.GenericError, Fault): + self.logger.info("task %s failed or was canceled" % task) + failed = True + break + if failed: + self.logger.info("at least one task failed or was canceled, cancelling unfinished tasks") + session.cancelTaskChildren(self.id) + # reraise the original error now, rather than waiting for + # an error in taskWaitResults() + raise + else: + # at least one done + break + # signal handler set by TaskManager.forkTask + self.logger.debug("Pausing...") + signal.pause() + # main process will wake us up with SIGUSR2 + self.logger.debug("...waking up") + self.logger.debug("Finished waiting") + return dict(session.host.taskWaitResults(self.id,subtasks)) + + def getUploadDir(self): + return 'tasks/%i' % self.id + + def uploadFile(self, filename, remoteName=None): + """Upload the file with the given name to the task output directory + on the hub.""" + # Only upload files with content + if os.path.isfile(filename) and os.stat(filename).st_size > 0: + session.uploadWrapper(filename, self.getUploadDir(), remoteName) + +class FakeTask(BaseTaskHandler): + Methods = ['someMethod'] + Foreground = True + def handler(self, *args): + self.logger.info("This is a fake task. Args: " + str(args)) + return 42 + + +class SleepTask(BaseTaskHandler): + Methods = ['sleep'] + _taskWeight = 0.25 + def handler(self, n): + self.logger.info("Sleeping for %s seconds" % n) + time.sleep(n) + self.logger.info("Finished sleeping") + +class ForkTask(BaseTaskHandler): + Methods = ['fork'] + def handler(self, n=5, m=37): + for i in xrange(n): + os.spawnvp(os.P_NOWAIT, 'sleep', ['sleep',str(m)]) + +class WaitTestTask(BaseTaskHandler): + Methods = ['waittest'] + _taskWeight = 0.1 + def handler(self,count,seconds=10): + tasks = [] + for i in xrange(count): + task_id = session.host.subtask(method='sleep', + arglist=[seconds], + label=str(i), + parent=self.id) + tasks.append(task_id) + results = self.wait(all=True) + self.logger.info(pprint.pformat(results)) + + +class SubtaskTask(BaseTaskHandler): + Methods = ['subtask'] + _taskWeight = 0.1 + def handler(self,n=4): + if n > 0: + task_id = session.host.subtask(method='subtask', + arglist=[n-1], + label='foo', + parent=self.id) + self.wait(task_id) + else: + task_id = session.host.subtask(method='sleep', + arglist=[15], + label='bar', + parent=self.id) + self.wait(task_id) + + +class DefaultTask(BaseTaskHandler): + """Used when no matching method is found""" + Methods = ['default'] + _taskWeight = 0.1 + def __init__(self, id, method, params, workdir=None): + self.id = id #task id + self.method = method + self.params = params + self.workdir = None + self.opts = {} + def handler(self,*args,**opts): + raise koji.GenericError, "Invalid method: %s" % self.method + + +class ShutdownTask(BaseTaskHandler): + Methods = ['shutdown'] + _taskWeight = 0.0 + Foreground = True + def handler(self): + #note: this is a foreground task + raise ServerExit + + +class DependantTask(BaseTaskHandler): + + Methods = ['dependantTask'] + #mostly just waiting on other tasks + _taskWeight = 0.2 + + def handler(self, wait_list, task_list): + for task in wait_list: + if not isinstance(task, int) or not session.getTaskInfo(task): + self.logger.debug("invalid task id %s, removing from wait_list" % task) + wait_list.remove(task) + + # note, tasks in wait_list are not children of this task so we can't + # just use self.wait() + while wait_list: + for task in wait_list[:]: + if session.taskFinished(task): + info = session.getTaskInfo(task) + if info and koji.TASK_STATES[info['state']] in ['CANCELED','FAILED']: + raise koji.GenericError, "Dependency %s failed to complete." % info['id'] + wait_list.remove(task) + # let the system rest before polling again + time.sleep(1) + + subtasks = [] + for task in task_list: + # **((len(task)>2 and task[2]) or {}) expands task[2] into opts if it exists, allows for things like 'priority=15' + task_id = session.host.subtask(method=task[0], arglist=task[1], parent=self.id, **((len(task)>2 and task[2]) or {})) + if task_id: + subtasks.append(task_id) + if subtasks: + self.wait(subtasks, all=True) + +class ChainBuildTask(BaseTaskHandler): + + Methods = ['chainbuild'] + #mostly just waiting on other tasks + _taskWeight = 0.1 + + def handler(self, srcs, target, opts=None): + if opts.get('scratch'): + raise koji.BuildError, "--scratch is not allowed with chain-builds" + for build_level in srcs: + subtasks = [] + for src in build_level: + task_id = session.host.subtask(method='build', + arglist=[src,target,opts], + parent=self.id) + subtasks.append(task_id) + if not subtasks: + continue + self.wait(subtasks, all=True, failany=True) + if srcs[-1] == build_level: + continue + nvrs = [] + for subtask in subtasks: + builds = session.listBuilds(taskID=subtask) + if builds: + nvrs.append(builds[0]['nvr']) + task_id = session.host.subtask(method='waitrepo', + arglist=[target, None, nvrs], + parent=self.id) + self.wait(task_id, all=True, failany=True) + +class BuildTask(BaseTaskHandler): + + Methods = ['build'] + #we mostly just wait on other tasks + _taskWeight = 0.2 + + def handler(self, src, target, opts=None): + """Handler for the master build task""" + if opts is None: + opts = {} + self.opts = opts + if opts.get('arch_override') and not opts.get('scratch'): + raise koji.BuildError, "arch_override is only allowed for scratch builds" + target_info = session.getBuildTarget(target) + if not target_info: + raise koji.GenericError, 'unknown build target: %s' % target + dest_tag = target_info['dest_tag'] + build_tag = target_info['build_tag'] + srpm = self.getSRPM(src) + h = self.readSRPMHeader(srpm) + data = koji.get_header_fields(h,['name','version','release','epoch']) + data['task_id'] = self.id + extra_arches = None + self.logger.info("Reading package config for %(name)s" % data) + pkg_cfg = session.getPackageConfig(dest_tag,data['name']) + self.logger.debug("%r" % pkg_cfg) + if pkg_cfg is not None: + extra_arches = pkg_cfg.get('extra_arches') + if not self.opts.get('skip_tag') and not self.opts.get('scratch'): + dest_cfg = session.getPackageConfig(dest_tag,data['name']) + # Make sure package is on the list for this tag + if dest_cfg is None: + raise koji.BuildError, "package %s not in list for tag %s" \ + % (data['name'], target_info['dest_tag_name']) + elif dest_cfg['blocked']: + raise koji.BuildError, "package %s is blocked for tag %s" \ + % (data['name'], target_info['dest_tag_name']) + # TODO - more pre tests + archlist = self.getArchList(build_tag, h, extra=extra_arches) + #let the system know about the build we're attempting + if not self.opts.get('scratch'): + #scratch builds do not get imported + build_id = session.host.initBuild(data) + session.host.importChangelog(build_id, srpm) + #(initBuild raises an exception if there is a conflict) + try: + srpm,rpms,brmap,logs = self.runBuilds(srpm,build_tag,archlist) + if opts.get('scratch'): + #scratch builds do not get imported + session.host.moveBuildToScratch(self.id,srpm,rpms,logs=logs) + else: + session.host.completeBuild(self.id,build_id,srpm,rpms,brmap,logs=logs) + except (SystemExit,ServerExit,KeyboardInterrupt): + #we do not trap these + raise + except: + if not self.opts.get('scratch'): + #scratch builds do not get imported + session.host.failBuild(self.id, build_id) + # reraise the exception + raise + if not self.opts.get('skip_tag') and not self.opts.get('scratch'): + self.tagBuild(build_id,dest_tag) + + def getSRPM(self, src): + """Get srpm from src""" + if isinstance(src,str): + if src.startswith('cvs://'): + return self.getSRPMFromCVS(src) + else: + #assume this is a path under uploads + return src + else: + raise koji.BuildError, 'Invalid source specification: %s' % src + #XXX - other methods? + + def getSRPMFromCVS(self, url): + #TODO - allow different ways to get the srpm + task_id = session.host.subtask(method='buildSRPMFromCVS', + arglist=[url], + label='srpm', + parent=self.id) + # wait for subtask to finish + result = self.wait(task_id)[task_id] + srpm = result['srpm'] + return srpm + + def readSRPMHeader(self, srpm): + #srpm arg should be a path relative to /work + global options + self.logger.debug("Reading SRPM") + fn = "%s/work/%s" % (options.topdir, srpm) + if not os.path.exists(fn): + raise koji.BuildError, "file missing: %s" % fn + h = koji.get_rpm_header(fn) + if h[rpm.RPMTAG_SOURCEPACKAGE] != 1: + raise koji.BuildError, "%s is not a source package" % fn + return h + + def getArchList(self, build_tag, h, extra=None): + # get list of arches to build for + buildconfig = session.getBuildConfig(build_tag) + arches = buildconfig['arches'] + tag_archlist = arches.split() + self.logger.debug('arches: %s' % arches) + if arches is None: + #XXX - need to handle this better + raise koji.BuildError, "No arches for tag %(name)s [%(id)s]" % buildconfig + if extra: + self.logger.debug('Got extra arches: %s' % extra) + arches = "%s %s" % (arches,extra) + override = self.opts.get('arch_override') + if self.opts.get('scratch') and override: + #only honor override for scratch builds + self.logger.debug('arch override: %s' % override) + arches = override + archlist = arches.split() + self.logger.debug('base archlist: %r' % archlist) + # - adjust arch list based on srpm macros + buildarchs = h[rpm.RPMTAG_BUILDARCHS] + exclusivearch = h[rpm.RPMTAG_EXCLUSIVEARCH] + excludearch = h[rpm.RPMTAG_EXCLUDEARCH] + if buildarchs: + archlist = [ a for a in archlist if a in buildarchs ] + self.logger.debug('archlist after buildarchs: %r' % archlist) + if exclusivearch: + archlist = [ a for a in archlist if a in exclusivearch ] + self.logger.debug('archlist after exclusivearch: %r' % archlist) + if excludearch: + archlist = [ a for a in archlist if a not in excludearch ] + self.logger.debug('archlist after excludearch: %r' % archlist) + #noarch is funny + if 'noarch' not in excludearch and \ + ( 'noarch' in buildarchs or 'noarch' in exclusivearch ): + archlist.append('noarch') + archdict = {} + for a in archlist: + if a == 'noarch' or koji.canonArch(a) in tag_archlist: + archdict[a] = 1 + if not archdict: + raise koji.BuildError, "No matching arches were found" + return archdict.keys() + + def runBuilds(self, srpm, build_tag, archlist): + self.logger.debug("Spawning jobs for arches: %r" % (archlist)) + subtasks = {} + keep_srpm = True + for arch in archlist: + subtasks[arch] = session.host.subtask(method='buildArch', + arglist=[srpm,build_tag,arch,keep_srpm], + label=arch, + parent=self.id, + arch=koji.canonArch(arch)) + keep_srpm = False + + self.logger.debug("Got subtasks: %r" % (subtasks)) + self.logger.debug("Waiting on subtasks...") + + # wait for subtasks to finish + results = self.wait(subtasks.values(), all=True, failany=True) + + # finalize import + # merge data into needed args for completeBuild call + rpms = [] + brmap = {} + logs = {} + built_srpm = None + for (arch, task_id) in subtasks.iteritems(): + result = results[task_id] + self.logger.debug("DEBUG: %r : %r " % (arch,result,)) + brootid = result['brootid'] + for fn in result['rpms']: + rpms.append(fn) + brmap[fn] = brootid + for fn in result['logs']: + logs.setdefault(arch,[]).append(fn) + if len(result['srpms']) == 1: + if built_srpm: + raise koji.BuildError, "multiple builds returned a srpm. task %i" % self.id + else: + built_srpm = result['srpms'][0] + brmap[result['srpms'][0]] = brootid + if built_srpm: + srpm = built_srpm + else: + raise koji.BuildError("could not find a built srpm") + + return srpm,rpms,brmap,logs + + def tagBuild(self,build_id,dest_tag): + #XXX - need options to skip tagging and to force tagging + #create the tagBuild subtask + #this will handle the "post tests" + task_id = session.host.subtask(method='tagBuild', + arglist=[dest_tag,build_id,False,None,True], + label='tag', + parent=self.id, + arch='noarch') + self.wait(task_id) + +class BuildArchTask(BaseTaskHandler): + + Methods = ['buildArch'] + + def weight(self): + # XXX - this one needs to figure the weight from the package to be + # built + return 1.5 + + def srpm_sanity_checks(self, filename): + header = koji.get_rpm_header(filename) + + if not header[rpm.RPMTAG_PACKAGER]: + raise koji.BuildError, "The build system failed to set the packager tag" + if not header[rpm.RPMTAG_VENDOR]: + raise koji.BuildError, "The build system failed to set the vendor tag" + if not header[rpm.RPMTAG_DISTRIBUTION]: + raise koji.BuildError, "The build system failed to set the distribution tag" + + def handler(self, pkg, root, arch, keep_srpm, opts={}): + """Build a package in a buildroot for one arch""" + global options + + ret = {} + + #noarch is funny + if arch == "noarch": + #use any arch this host can handle + host = session.host.getHost() + if host['arches'] is None: + raise koji.BuildError, "No arch list for this host" + br_arch = host['arches'].split()[0] + else: + br_arch = arch + + # starting srpm should already have been uploaded by parent + self.logger.debug("Reading SRPM") + fn = "%s/work/%s" % (options.topdir, pkg) + if not os.path.exists(fn): + raise koji.BuildError, "SRPM file missing: %s" % fn + # peel E:N-V-R from package + h = koji.get_rpm_header(fn) + name = h[rpm.RPMTAG_NAME] + ver = h[rpm.RPMTAG_VERSION] + rel = h[rpm.RPMTAG_RELEASE] + epoch = h[rpm.RPMTAG_EPOCH] + if h[rpm.RPMTAG_SOURCEPACKAGE] != 1: + raise koji.BuildError, "not a source package" + # Disable checking for distribution in the initial SRPM because it + # might have been built outside of the build system + # if not h[rpm.RPMTAG_DISTRIBUTION]: + # raise koji.BuildError, "the distribution tag is not set in the original srpm" + + broot = BuildRoot(root, br_arch, self.id, distribution=h[rpm.RPMTAG_DISTRIBUTION]) + + self.logger.debug("Initializing buildroot") + broot.init() + + # run build + self.logger.debug("Running build") + broot.build(fn,arch) + + # extract results + resultdir = broot.resultdir() + rpm_files = [] + srpm_files = [] + log_files = [] + unexpected = [] + for f in os.listdir(resultdir): + # files here should have one of two extensions: .log and .rpm + if f[-4:] == ".log": + log_files.append(f) + elif f[-8:] == ".src.rpm": + srpm_files.append(f) + elif f[-4:] == ".rpm": + rpm_files.append(f) + else: + unexpected.append(f) + self.logger.debug("rpms: %r" % rpm_files) + self.logger.debug("srpms: %r" % srpm_files) + self.logger.debug("logs: %r" % log_files) + self.logger.debug("unexpected: %r" % unexpected) + + # upload files to storage server + uploadpath = broot.getUploadPath() + for f in rpm_files: + self.uploadFile("%s/%s" % (resultdir,f)) + self.logger.debug("keep srpm %i %s %s" % (self.id, keep_srpm, opts)) + if keep_srpm: + if len(srpm_files) == 0: + raise koji.BuildError, "no srpm files found for task %i" % self.id + if len(srpm_files) > 1: + raise koji.BuildError, "mulitple srpm files found for task %i: %s" % (self.id, srpm_files) + + # Run sanity checks. Any failures will throw a BuildError + self.srpm_sanity_checks("%s/%s" % (resultdir,srpm_files[0])) + + self.logger.debug("uploading %s/%s to %s" % (resultdir,srpm_files[0], uploadpath)) + self.uploadFile("%s/%s" % (resultdir,srpm_files[0])) + ret['rpms'] = [ "%s/%s" % (uploadpath,f) for f in rpm_files ] + if keep_srpm: + ret['srpms'] = [ "%s/%s" % (uploadpath,f) for f in srpm_files ] + else: + ret['srpms'] = [] + ret['logs'] = [ "%s/%s" % (uploadpath,f) for f in log_files ] + + ret['brootid'] = broot.id + + broot.expire() + #Let TaskManager clean up + #broot.scrub() + + return ret + + +class TagBuildTask(BaseTaskHandler): + + Methods = ['tagBuild'] + #XXX - set weight? + + def handler(self, tag_id, build_id, force=False, fromtag=None, ignore_success=False): + task = session.getTaskInfo(self.id) + user_id = task['owner'] + try: + build = session.getBuild(build_id, strict=True) + tag = session.getTag(tag_id, strict=True) + + #several basic sanity checks have already been run (and will be run + #again when we make the final call). Our job is to perform the more + #computationally expensive 'post' tests. + + #XXX - add more post tests + session.host.tagBuild(self.id,tag_id,build_id,force=force,fromtag=fromtag) + session.host.tagNotification(True, tag_id, fromtag, build_id, user_id, ignore_success) + except Exception, e: + exctype, value = sys.exc_info()[:2] + session.host.tagNotification(False, tag_id, fromtag, build_id, user_id, ignore_success, "%s: %s" % (exctype, value)) + raise e + +class BuildSRPMFromCVSTask(BaseTaskHandler): + + Methods = ['buildSRPMFromCVS'] + _taskWeight = 0.75 + + def spec_sanity_checks(self, filename): + spec = open(filename).read() + for tag in ("Packager", "Distribution", "Vendor"): + if re.match("%s:" % tag, spec, re.M): + raise koji.BuildError, "%s is not allowed to be set in spec file" % tag + for tag in ("packager", "distribution", "vendor"): + if re.match("%%define\s+%s\s+" % tag, spec, re.M): + raise koji.BuildError, "%s is not allowed to be defined in spec file" % tag + + def handler(self,url): + if not url.startswith('cvs://'): + raise koji.BuildError("invalid cvs URL: %s" % url) + + # Hack it because it refuses to parse it properly otherwise + scheme, netloc, path, params, query, fragment = urlparse.urlparse('http'+url[3:]) + if not (netloc and path and fragment and query): + raise koji.BuildError("invalid cvs URL: %s" % url) + + # Steps: + # 1. CVS checkout into tempdir + # 2. create sources hardlinks + # 3. Run 'make srpm' + + cvsdir = self.workdir + '/cvs' + self.logger.debug(cvsdir) + koji.ensuredir(cvsdir) + logfile = self.workdir + "/srpm.log" + uploadpath = self.getUploadDir() + + #perform checkouts + cmd = ['cvs', '-d', ':pserver:anonymous@%s:%s' % (netloc, path), + 'checkout', '-r', fragment, query] + if log_output(cmd[0], cmd, logfile, uploadpath, cwd=cvsdir, logerror=1): + output = "(none)" + try: + output = open(logfile).read() + except IOError: + pass + raise koji.BuildError, "Error with checkout ':pserver:anonymous@%s:%s': %s" % (netloc, path, output) + cmd = ['cvs', '-d', ':pserver:anonymous@%s:%s' % (netloc, path), + 'checkout', 'common'] + if log_output(cmd[0], cmd, logfile, uploadpath, cwd=cvsdir, logerror=1, append=1): + raise koji.BuildError, "Error with checkout :pserver:anonymous@%s:%s" % (netloc, path) + os.symlink('%s/common' % cvsdir, '%s/%s/../common' % (cvsdir, query)) + + spec_files = glob.glob("%s/%s/*.spec" % (cvsdir, query)) + if len(spec_files) == 0: + raise koji.BuildError("No spec file found") + elif len(spec_files) > 1: + raise koji.BuildError("Multiple spec files found: %s" % spec_files) + spec_file = spec_files[0] + + # Run spec file sanity checks. Any failures will throw a BuildError + self.spec_sanity_checks(spec_file) + + #build srpm + cmd = ['make', "RPM_DEFINES=--define '__beehive_build 1'", + 'BEEHIVE_SRPM_BUILD=1', + '-C', '%s/%s' % (cvsdir, query), 'beehive-srpm'] + if log_output(cmd[0], cmd, logfile, uploadpath, cwd=cvsdir, logerror=1, append=1): + raise koji.BuildError, "Error building SRPM" + srpm = '%s/%s/src.rpm' % (cvsdir, query) + + #give srpm a proper name + h = koji.get_rpm_header(srpm) + name = h[rpm.RPMTAG_NAME] + version = h[rpm.RPMTAG_VERSION] + release = h[rpm.RPMTAG_RELEASE] + basename = "%(name)s-%(version)s-%(release)s.src.rpm" % locals() + + #upload srpm and return + self.uploadFile(srpm, remoteName=basename) + return { + 'srpm' : "%s/%s" % (uploadpath, basename), + 'log' : "%s/srpm.log" % uploadpath, + } + +class TagNotificationTask(BaseTaskHandler): + Methods = ['tagNotification'] + + _taskWeight = 0.1 + + message_templ = \ +"""From: %(from_addr)s\r +Subject: %(nvr)s %(result)s %(operation)s by %(user_name)s\r +To: %(to_addrs)s\r +X-Koji-Package: %(pkg_name)s\r +X-Koji-NVR: %(nvr)s\r +X-Koji-User: %(user_name)s\r +X-Koji-Status: %(status)s\r +%(tag_headers)s\r +\r +Package: %(pkg_name)s\r +NVR: %(nvr)s\r +User: %(user_name)s\r +Status: %(status)s\r +%(operation_details)s\r +%(nvr)s %(result)s %(operation)s by %(user_name)s\r +%(failure_info)s\r +""" + + def handler(self, recipients, is_successful, tag_info, from_info, build_info, user_info, ignore_success=None, failure_msg=''): + if len(recipients) == 0: + self.logger.debug('task %i: no recipients, not sending notifications', self.id) + return + + if ignore_success and is_successful: + self.logger.debug('task %i: tag operation successful and ignore success is true, not sending notifications', self.id) + return + + build = session.getBuild(build_info) + user = session.getUser(user_info) + pkg_name = build['package_name'] + nvr = '%(package_name)s-%(version)s-%(release)s' % build + user_name = user['name'] + + from_addr = options.from_addr + to_addrs = ', '.join(recipients) + + operation = '%(action)s' + operation_details = 'Tag Operation: %(action)s\r\n' + tag_headers = '' + if from_info: + from_tag = session.getTag(from_info) + from_tag_name = from_tag['name'] + operation += ' from %s' % from_tag_name + operation_details += 'From Tag: %s\r\n' % from_tag_name + tag_headers += 'X-Koji-Tag: %s\r\n' % from_tag_name + action = 'untagged' + if tag_info: + tag = session.getTag(tag_info) + tag_name = tag['name'] + operation += ' into %s' % tag_name + operation_details += 'Into Tag: %s\r\n' % tag_name + tag_headers += 'X-Koji-Tag: %s\r\n' % tag_name + action = 'tagged' + if tag_info and from_info: + action = 'moved' + operation = operation % locals() + operation_details = operation_details % locals() + + if is_successful: + result = 'successfully' + status = 'complete' + failure_info = '' + else: + result = 'unsuccessfully' + status = 'failed' + failure_info = "Operation failed with the error:\r\n %s\r\n" % failure_msg + + message = self.message_templ % locals() + + server = smtplib.SMTP(options.smtphost) + #server.set_debuglevel(True) + server.sendmail(from_addr, recipients, message) + server.quit() + + return 'sent notification of tag operation %i to: %s' % (self.id, to_addrs) + +class BuildNotificationTask(BaseTaskHandler): + Methods = ['buildNotification'] + + _taskWeight = 0.1 + + # XXX externalize these templates somewhere + subject_templ = """Package: %(build_nevr)s Tag: %(dest_tag)s Status: %(status)s Built by: %(build_owner)s""" + message_templ = \ +"""From: %(from_addr)s\r +Subject: %(subject)s\r +To: %(to_addrs)s\r +X-Koji-Tag: %(dest_tag)s\r +X-Koji-Package: %(build_pkg_name)s\r +X-Koji-Builder: %(build_owner)s\r +X-Koji-Status: %(status)s\r +\r +Package: %(build_nevr)s\r +Tag: %(dest_tag)s\r +Status: %(status)s%(cancel_info)s\r +Built by: %(build_owner)s\r +ID: %(build_id)i\r +Started: %(creation_time)s\r +Finished: %(completion_time)s\r +%(changelog)s\r +%(failure)s\r +%(output)s\r +Task Info: %(weburl)s/taskinfo?taskID=%(task_id)i\r +Build Info: %(weburl)s/buildinfo?buildID=%(build_id)i\r +""" + + def _getTaskData(self, task_id, data={}): + taskinfo = session.getTaskInfo(task_id) + + if not taskinfo: + # invalid task_id + return data + + if taskinfo['host_id']: + hostinfo = session.getHost(taskinfo['host_id']) + else: + hostinfo = None + + result = None + try: + result = session.getTaskResult(task_id) + except: + excClass, result = sys.exc_info()[:2] + if hasattr(result, 'faultString'): + result = result.faultString + else: + result = '%s: %s' % (excClass.__name__, result) + result = result.strip() + # clear the exception, since we're just using + # it for display purposes + sys.exc_clear() + if not result: + result = 'Unknown' + + files = session.listTaskOutput(task_id) + logs = [filename for filename in files if filename.endswith('.log')] + rpms = [filename for filename in files if filename.endswith('.rpm') and not filename.endswith('.src.rpm')] + srpms = [filename for filename in files if filename.endswith('.src.rpm')] + misc = [filename for filename in files if filename not in logs + rpms + srpms] + + logs.sort() + rpms.sort() + misc.sort() + + data[task_id] = {} + data[task_id]['id'] = taskinfo['id'] + data[task_id]['method'] = taskinfo['method'] + data[task_id]['arch'] = taskinfo['arch'] + data[task_id]['host'] = hostinfo and hostinfo['name'] or None + data[task_id]['state'] = koji.TASK_STATES[taskinfo['state']].lower() + data[task_id]['result'] = result + data[task_id]['request'] = session.getTaskRequest(task_id) + data[task_id]['logs'] = logs + data[task_id]['rpms'] = rpms + data[task_id]['srpms'] = srpms + data[task_id]['misc'] = misc + + children = session.getTaskChildren(task_id) + for child in children: + data = self._getTaskData(child['id'], data) + return data + + def handler(self, recipients, build, target, weburl): + if len(recipients) == 0: + self.logger.debug('task %i: no recipients, not sending notifications', self.id) + return + + build_pkg_name = build['package_name'] + build_pkg_evr = '%s%s-%s' % ((build['epoch'] and str(build['epoch']) + ':' or ''), build['version'], build['release']) + build_nevr = '%s-%s' % (build_pkg_name, build_pkg_evr) + build_id = build['id'] + build_owner = build['owner_name'] + # target comes from session.py:_get_build_target() + dest_tag = target['dest_tag_name'] + status = koji.BUILD_STATES[build['state']].lower() + creation_time = koji.formatTimeLong(build['creation_time']) + completion_time = koji.formatTimeLong(build['completion_time']) + task_id = build['task_id'] + + task_data = self._getTaskData(task_id) + + cancel_info = '' + failure_info = '' + if build['state'] == koji.BUILD_STATES['CANCELED']: + # The owner of the buildNotification task is the one + # who canceled the task, it turns out. + this_task = session.getTaskInfo(self.id) + if this_task['owner']: + canceler = session.getUser(this_task['owner']) + cancel_info = "\r\nCanceled by: %s" % canceler['name'] + elif build['state'] == koji.BUILD_STATES['FAILED']: + failure_data = task_data[task_id]['result'] + failed_hosts = ['%s (%s)' % (task['host'], task['arch']) for task in task_data.values() if task['host'] and task['state'] == 'failed'] + failure_info = "\r\n%s (%d) failed on %s:\r\n %s" % (build_nevr, build_id, + ', '.join(failed_hosts), + failure_data) + + failure = failure_info or cancel_info or '' + + tasks = {'failed' : [task for task in task_data.values() if task['state'] == 'failed'], + 'canceled' : [task for task in task_data.values() if task['state'] == 'canceled'], + 'closed' : [task for task in task_data.values() if task['state'] == 'closed']} + + srpms = [] + for taskinfo in task_data.values(): + for srpmfile in taskinfo['srpms']: + srpms.append(srpmfile) + srpms = self.uniq(srpms) + srpms.sort() + + if srpms: + output = "SRPMS:\r\n" + for srpm in srpms: + output += " %s" % srpm + output += "\r\n\r\n" + else: + output = '' + + # list states here to make them go in the correct order + for task_state in ['failed', 'canceled', 'closed']: + if tasks[task_state]: + output += "%s tasks:\r\n" % task_state.capitalize() + output += "%s-------\r\n\r\n" % ("-" * len(task_state)) + for task in tasks[task_state]: + output += "Task %s" % task['id'] + if task['host']: + output += " on %s\r\n" % task['host'] + else: + output += "\r\n" + output += "Task Type: %s\r\n" % koji.taskLabel(task) + for filetype in ['logs', 'rpms', 'misc']: + if task[filetype]: + output += "%s:\r\n" % filetype + for file in task[filetype]: + output += " %s/getfile?taskID=%s&name=%s\r\n" % (weburl, task['id'], file) + output += "\r\n" + output += "\r\n" + + changelog = koji.util.formatChangelog(session.getChangelogEntries(build_id, queryOpts={'limit': 3})).replace("\n","\r\n") + if changelog: + changelog = "Changelog:\r\n%s" % changelog + + from_addr = self.from_addr + to_addrs = ', '.join(recipients) + subject = self.subject_templ % locals() + message = self.message_templ % locals() + + server = smtplib.SMTP(options.smtphost) + # server.set_debuglevel(True) + server.sendmail(from_addr, recipients, message) + server.quit() + + return 'sent notification of build %i to: %s' % (build_id, to_addrs) + + def uniq(self, items): + """Remove duplicates from the list of items, and sort the list.""" + m = dict(zip(items, [1] * len(items))) + l = m.keys() + l.sort() + return l + + +class NewRepoTask(BaseTaskHandler): + Methods = ['newRepo'] + _taskWeight = 0.1 + + def handler(self, tag): + self.uploadpath = self.getUploadDir() + tinfo = session.getTag(tag, strict=True) + preptask = session.host.subtask(method='prepRepo', + arglist=[tinfo], + label='prep', + parent=self.id, + arch='noarch') + repo_id, event_id = self.wait(preptask)[preptask] + path = koji.pathinfo.repo(repo_id, tinfo['name']) + if not os.path.isdir(path): + raise koji.GenericError, "Repo directory missing: %s" % repodir + arches = [] + for fn in os.listdir(path): + if fn != 'groups' and os.path.isdir("%s/%s/RPMS" % (path, fn)): + arches.append(fn) + #see if we can find a previous repo to update from + oldrepo = session.getRepo(tinfo['id'], state=koji.REPO_READY) + subtasks = {} + for arch in arches: + subtasks[arch] = session.host.subtask(method='createrepo', + arglist=[repo_id, arch, oldrepo], + label=arch, + parent=self.id, + arch='noarch') + # wait for subtasks to finish + results = self.wait(subtasks.values(), all=True, failany=True) + data = {} + for (arch, task_id) in subtasks.iteritems(): + data[arch] = results[task_id] + self.logger.debug("DEBUG: %r : %r " % (arch,data[arch],)) + session.host.repoDone(repo_id, data) + return repo_id, event_id + + +class PrepRepoTask(BaseTaskHandler): + Methods = ['prepRepo'] + _taskWeight = 0.2 + + def handler(self, tinfo): + repo_id, event_id = session.host.repoInit(tinfo['id']) + path = koji.pathinfo.repo(repo_id, tinfo['name']) + if not os.path.isdir(path): + raise koji.GenericError, "Repo directory missing: %s" % repodir + #create and upload meta rpm + spec = "%s/groups/groups.spec" % path + #the repoInit call should have created groups.spec + if not os.path.exists(spec): + raise koji.GenericError, "groups.spec missing" + #build command + cmd = ['rpmbuild'] + for macro in ('_sourcedir', '_builddir', '_srcrpmdir', '_rpmdir'): + cmd.extend(['--define', "%s %s" % (macro,self.workdir)]) + cmd.extend(['-bb', spec]) + logfile = "%s/groups_rpm.log" % self.workdir + uploadpath = self.getUploadDir() + status = log_output(cmd[0], cmd, logfile, uploadpath, logerror=True) + if not _isSuccess(status): + raise koji.GenericError, "failed to build groups rpm: %s" \ + % _parseStatus(status, pkgcmd) + #upload file and return path + fn = 'buildsys-build-1-1.noarch.rpm' + pkg = "%s/noarch/%s" % (self.workdir, fn) + session.uploadWrapper(pkg, uploadpath, fn) + self.logger.debug("Adding %s to repo %s" % (fn, repo_id)) + session.host.repoAddRPM(repo_id, "%s/%s" % (uploadpath, fn)) + return repo_id, event_id + + +class CreaterepoTask(BaseTaskHandler): + + Methods = ['createrepo'] + #XXX - set weight? + _taskWeight = 0.5 + + def handler(self, repo_id, arch, oldrepo): + #arch is the arch of the repo, not the task + rinfo = session.repoInfo(repo_id) + if rinfo['state'] != koji.REPO_INIT: + raise koji.GenericError, "Repo %(id)s not in INIT state (got %(state)s)" % rinfo + pathinfo = koji.PathInfo(options.topdir) + repodir = pathinfo.repo(repo_id, rinfo['tag_name']) + repodir = "%s/%s" % (repodir, arch) + if not os.path.isdir(repodir): + raise koji.GenericError, "Repo directory missing: %s" % repodir + #set up our output dir + outdir = "%s/repo" % self.workdir + datadir = "%s/repodata" % outdir + koji.ensuredir(outdir) + cmd = ['/usr/bin/createrepo', '-vp', '--outputdir', outdir] + if os.path.exists("%s/comps.xml" % repodir): + cmd.extend(['-g', 'comps.xml']) + #attempt to recycle repodata from last repo + if oldrepo: + oldpath = pathinfo.repo(oldrepo['id'], rinfo['tag_name']) + olddatadir = "%s/%s/repodata" % (oldpath, arch) + if not os.path.isdir(olddatadir): + self.logger.warn("old repodata is missing: %s" % olddatadir) + else: + koji.ensuredir(datadir) + os.system('cp -a %s/* %s' % (olddatadir, datadir)) + cmd.append('--update') + # note: we can't easily use a cachedir because we do not have write + # permission. The good news is that with --update we won't need to + # be scanning many rpms. + cmd.append(repodir) + + logfile = "%s/createrepo.log" % self.workdir + uploadpath = self.getUploadDir() + #log_output(path, args, outfile, uploadpath, cwd=None, logerror=0, append=0, chroot=None): + status = log_output(cmd[0], cmd, logfile, uploadpath, logerror=True) + if not _isSuccess(status): + raise koji.GenericError, "failed to create repo: %s" \ + % _parseStatus(status, pkgcmd) + + files = [] + for f in os.listdir(datadir): + if f.endswith('.xml') or f.endswith('.xml.gz'): + files.append(f) + session.uploadWrapper("%s/%s" % (datadir, f), uploadpath, f) + + return [uploadpath, files] + +class WaitrepoTask(BaseTaskHandler): + + Methods = ['waitrepo'] + #mostly just waiting + _taskWeight = 0.2 + + PAUSE = 60 + # time in seconds before we fail this task + TIMEOUT = 3600 + + def checkForNVR(self, tag, nvrs, repo_creation_event): + if not isinstance(nvrs, list): + nvrs = [nvrs] + nvr_list = [build['nvr'] for build in session.listTagged(tag, event=repo_creation_event, inherit=True)] + if not nvr_list: + return False + for nvr in nvrs: + if not nvr in nvr_list: + return False + return True + + def handler(self, build_target_info, newer_then=None, nvr=None): + start = time.time() + build_target = session.getBuildTarget(build_target_info) + repo = None + last_repo = None + if not newer_then and not nvr: + newer_then = datetime.datetime.now().isoformat(' ') + if not build_target: + raise koji.GenericError("Error: Invalid BuildTarget: %s" % build_target_info) + + while True: + repo = session.getRepo(build_target['build_tag_name']) + if repo and repo != last_repo: + if (nvr or newer_then) and (not nvr or self.checkForNVR(build_target['dest_tag'], nvr, repo['create_event'])) and (not newer_then or repo['creation_time'] > newer_then): + break + if self.TIMEOUT and ((time.time() - start) > self.TIMEOUT): + raise koji.GenericError("Error: Waited %d seconds and still no repo meeting conditions, timing out" % self.TIMEOUT) + last_repo = repo + time.sleep(self.PAUSE) + return "Successfully waited %s seconds for a '%s' repo (%s)" % ((time.time() - start), build_target['build_tag_name'], repo['id']) + +def get_options(): + """process options from command line and config file""" + global options + # parse command line args + parser = OptionParser() + parser.add_option("-c", "--config", dest="configFile", + help="use alternate configuration file", metavar="FILE", + default="/etc/kojid.conf") + parser.add_option("--user", help="specify user") + parser.add_option("--password", help="specify password") + parser.add_option("-f", "--fg", dest="daemon", + action="store_false", default=True, + help="run in foreground") + parser.add_option("--force-lock", action="store_true", default=False, + help="force lock for exclusive session") + parser.add_option("-v", "--verbose", action="store_true", default=False, + help="show verbose output") + parser.add_option("-d", "--debug", action="store_true", default=False, + help="show debug output") + parser.add_option("--debug-task", action="store_true", default=False, + help="enable debug output for tasks") + parser.add_option("--debug-xmlrpc", action="store_true", default=False, + help="show xmlrpc debug output") + parser.add_option("--debug-mock", action="store_true", default=False, + help="show mock debug output") + parser.add_option("--skip-main", action="store_true", default=False, + help="don't actually run main") + parser.add_option("--maxjobs", type='int', help="Specify maxjobs") + parser.add_option("--minspace", type='int', help="Specify minspace") + parser.add_option("--sleeptime", type='int', help="Specify the polling interval") + parser.add_option("--admin-emails", help="Address(es) to send error notices to") + parser.add_option("--topdir", help="Specify topdir") + parser.add_option("--workdir", help="Specify workdir") + parser.add_option("--mockdir", help="Specify mockdir") + parser.add_option("--mockuser", help="User to run mock as") + parser.add_option("-s", "--server", help="url of XMLRPC server") + (options, args) = parser.parse_args() + + if args: + parser.error("incorrect number of arguments") + #not reached + assert False + + # load local config + config = ConfigParser() + config.read(options.configFile) + for x in config.sections(): + if x != 'kojid': + quit('invalid section found in config file: %s' % x) + defaults = {'sleeptime': 15, + 'maxjobs': 5, + 'minspace': 8192, + 'admin_emails': None, + 'topdir': '/mnt/koji', + 'workdir': '/tmp/koji', + 'mockdir': '/var/lib/mock', + 'mockuser': 'kojibuilder', + 'smtphost': 'mail@example.com', + 'from_addr': 'Koji Build System ', + 'krb_principal': None, + 'host_principal_format': 'compile/%s@EXAMPLE.COM', + 'keytab': '/etc/kojid.keytab', + 'server': None, + 'user': None, + 'password': None} + if config.has_section('kojid'): + for name, value in config.items('kojid'): + if name in ['sleeptime', 'maxjobs', 'minspace']: + try: + defaults[name] = int(value) + except ValueError: + quit("value for %s option must be a valid integer" % name) + elif name in defaults.keys(): + defaults[name] = value + else: + quit("unknown config option: %s" % name) + for name, value in defaults.items(): + if getattr(options, name) is None: + setattr(options, name, value) + + #make sure workdir exists + if not os.path.exists(options.workdir): + koji.ensuredir(options.workdir) + + if not options.server: + parser.error("--server argument required") + +def quit(msg=None, code=1): + if msg: + logging.getLogger("koji.build").error(msg) + sys.stderr.write('%s\n' % msg) + sys.stderr.flush() + sys.exit(code) + +if __name__ == "__main__": + global options + + koji.add_file_logger("koji", "/var/log/kojid.log") + koji.add_sys_logger("koji") + #note we're setting logging params for all of koji* + get_options() + if options.debug: + logging.getLogger("koji").setLevel(logging.DEBUG) + elif options.verbose: + logging.getLogger("koji").setLevel(logging.INFO) + else: + logging.getLogger("koji").setLevel(logging.WARN) + if options.debug_task: + logging.getLogger("koji.build.BaseTaskHandler").setLevel(logging.DEBUG) + if options.admin_emails: + koji.add_mail_logger("koji", options.admin_emails) + + #build session options + session_opts = {} + for k in ('user','password','debug_xmlrpc', 'debug'): + session_opts[k] = getattr(options,k) + #start a session and login + session = koji.ClientSession(options.server, session_opts) + if options.user: + try: + #authenticate using user/password + session.login() + except koji.AuthError: + quit("Error: Unable to log in. Bad credentials?") + except xmlrpclib.ProtocolError: + quit("Error: Unable to connect to server %s" % (options.server)) + elif sys.modules.has_key('krbV'): + krb_principal = options.krb_principal + if krb_principal is None: + krb_principal = options.host_principal_format % socket.getfqdn() + try: + session.krb_login(principal=krb_principal, + keytab=options.keytab) + except krbV.Krb5Error, e: + quit("Kerberos authentication failed: '%s' (%s)" % (e.message, e.err_code)) + except socket.error, e: + quit("Could not connect to Kerberos authentication service: '%s'" % e.args[1]) + else: + quit("No username/password supplied and Kerberos missing or not configured") + #make session exclusive + try: + session.exclusiveSession(force=options.force_lock) + except koji.AuthLockError: + quit("Error: Unable to get lock. Trying using --force-lock") + if not session.logged_in: + quit("Error: Unknown login error") + #make sure it works + try: + ret = session.echo("OK") + except xmlrpclib.ProtocolError: + quit("Error: Unable to connect to server %s" % (options.server)) + if ret != ["OK"]: + quit("Error: incorrect server response: %r" % (ret)) + + # run main + if options.daemon: + #detach + koji.daemonize() + main() + # not reached + assert False + elif not options.skip_main: + koji.add_stderr_logger("koji") + main() diff --git a/builder/kojid.conf b/builder/kojid.conf new file mode 100644 index 00000000..d8210baf --- /dev/null +++ b/builder/kojid.conf @@ -0,0 +1,24 @@ +[kojid] +; The number of seconds to sleep between tasks +; sleeptime=15 + +; The maximum number of jobs that kojid will handle at a time +; maxjobs=10 + +; The minimum amount of free space (in MBs) required for each build root +; minspace=8192 + +; The directory root where work data can be found from the koji hub +; topdir=/mnt/koji + +; The directory root for temporary storage +; workdir=/tmp/koji + +; The directory root for mock +; mockdir=/var/lib/mock + +; The user to run as when doing builds +; mockuser=kojibuilder + +; The URL for the xmlrpc server +server=http://hub.example.com/kojihub diff --git a/builder/kojid.init b/builder/kojid.init new file mode 100755 index 00000000..0de1203b --- /dev/null +++ b/builder/kojid.init @@ -0,0 +1,85 @@ +#! /bin/sh +# +# kojid Start/Stop kojid +# +# chkconfig: 345 99 99 +# description: kojid server +# processname: kojid + +# This is an interactive program, we need the current locale + +# Source function library. +. /etc/init.d/functions + +# Check that we're a priviledged user +[ `id -u` = 0 ] || exit 0 + +[ -f /etc/sysconfig/kojid ] && . /etc/sysconfig/kojid + +prog="kojid" + +# Check that networking is up. +if [ "$NETWORKING" = "no" ] +then + exit 0 +fi + +[ -f /usr/sbin/kojid ] || exit 0 + +RETVAL=0 + +start() { + echo -n $"Starting $prog: " + cd / + ARGS="" + [ "$FORCE_LOCK" == "Y" ] && ARGS="$ARGS --force-lock" + [ "$KOJID_DEBUG" == "Y" ] && ARGS="$ARGS --debug" + [ "$KOJID_VERBOSE" == "Y" ] && ARGS="$ARGS --verbose" + # XXX Fix for make download-checks in kernel builds + # Remove once we're running the buildSRPMFromCVS task + # as an unpriviledged user with their own environment + export HOME="/root" + daemon /usr/sbin/kojid $ARGS + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch /var/lock/subsys/kojid + return $RETVAL +} + +stop() { + echo -n $"Stopping $prog: " + killproc kojid + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/kojid + return $RETVAL +} + +restart() { + stop + start +} + +# See how we were called. +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status $prog + ;; + restart|reload) + restart + ;; + condrestart) + [ -f /var/lock/subsys/kojid ] && restart || : + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|reload|condrestart}" + exit 1 +esac + +exit $? diff --git a/builder/kojid.sysconfig b/builder/kojid.sysconfig new file mode 100644 index 00000000..393aee9a --- /dev/null +++ b/builder/kojid.sysconfig @@ -0,0 +1,3 @@ +FORCE_LOCK=Y +KOJID_DEBUG=N +KOJID_VERBOSE=Y diff --git a/cli/Makefile b/cli/Makefile new file mode 100644 index 00000000..7d0c0c9e --- /dev/null +++ b/cli/Makefile @@ -0,0 +1,18 @@ +FILES = koji + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/usr/bin + install -m 755 $(FILES) $(DESTDIR)/usr/bin + install -m 644 koji.conf $(DESTDIR)/etc/koji.conf diff --git a/cli/koji b/cli/koji new file mode 100755 index 00000000..d782746e --- /dev/null +++ b/cli/koji @@ -0,0 +1,3102 @@ +#!/usr/bin/python + +# command line interface for the Koji build system +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: +# Dennis Gregorovic +# Mike McLean +import sys +try: + import krbV +except ImportError: + pass +import ConfigParser +import base64 +import koji +import fnmatch +import md5 +import os +import re +import pprint +import random +import socket +import string +import time +import urllib +import xmlrpclib +import optparse +#for import-comps handler +from rhpl.comps import Comps + +# fix OptionParser for python 2.3 (optparse verion 1.4.1+) +# code taken from optparse version 1.5a2 +OptionParser = optparse.OptionParser +if optparse.__version__ == "1.4.1+": + def _op_error(self, msg): + self.print_usage(sys.stderr) + msg = "%s: error: %s\n" % (self._get_prog_name(), msg) + if msg: + sys.stderr.write(msg) + sys.exit(2) + OptionParser.error = _op_error + +def _(args): + """Stub function for translation""" + return args + +def arg_filter(str): + try: + return int(str) + except ValueError: + pass + try: + return float(str) + except ValueError: + pass + #handle lists? + return str + +def get_options(): + """process options from command line and config file""" + + usage = _("%prog [global-options] command [command-options-and-arguments]") + parser = OptionParser(usage=usage) + parser.disable_interspersed_args() + parser.add_option("-c", "--config", dest="configFile", + help=_("use alternate configuration file"), metavar="FILE", + default="/etc/koji.conf") + parser.add_option("--keytab", help=_("specify a Kerberos keytab to use")) + parser.add_option("--principal", help=_("specify a Kerberos principal to use")) + parser.add_option("--runas", help=_("run as the specified user (requires special privileges)")) + parser.add_option("--user", help=_("specify user")) + parser.add_option("--password", help=_("specify password")) + parser.add_option("--noauth", action="store_true", default=False, + help=_("do not authenticate")) + parser.add_option("--force-auth", action="store_true", default=False, + help=_("authenticate even for read-only operations")) + parser.add_option("-d", "--debug", action="store_true", default=False, + help=_("show debug output")) + parser.add_option("--debug-xmlrpc", action="store_true", default=False, + help=_("show xmlrpc debug output")) + parser.add_option("--skip-main", action="store_true", default=False, + help=_("don't actually run main")) + parser.add_option("-s", "--server", help=_("url of XMLRPC server")) + parser.add_option("--topdir", help=_("specify topdir")) + parser.add_option("--web-url", help=_("url of the Koji web interface")) + parser.add_option("--help-commands", action="store_true", default=False, help=_("list commands")) + (options, args) = parser.parse_args() + + if options.help_commands: + list_commands() + sys.exit(0) + if not args: + list_commands() + sys.exit(0) + + aliases = { + 'cancel-task' : 'cancel', + 'cxl' : 'cancel', + 'list-commands' : 'help', + } + cmd = args[0] + cmd = aliases.get(cmd, cmd) + cmd = cmd.replace('-', '_') + if globals().has_key('anon_handle_' + cmd): + if not options.force_auth: + options.noauth = True + cmd = 'anon_handle_' + cmd + elif globals().has_key('handle_' + cmd): + cmd = 'handle_' + cmd + else: + list_commands() + parser.error('Unknown command: %s' % cmd) + assert False + # load local config + defaults = { + 'server' : 'http://localhost/kojihub', + 'web_url' : 'http://localhost/koji', + 'topdir' : '/mnt/koji', + } + if os.access(options.configFile, os.F_OK): + f = open(options.configFile) + config = ConfigParser.ConfigParser() + config.readfp(f) + f.close() + #XXX - really need a more robust config file setup, but this will have + # to do for now + if config.has_section('koji'): + for name, value in config.items('koji'): + #note the defaults dictionary also serves to indicate which + #options *can* be set via the config file. Such options should + #not have a default value set in the option parser. + if defaults.has_key(name): + defaults[name] = value + for name, value in defaults.iteritems(): + if getattr(options, name) is None: + setattr(options, name, value) + + return options, cmd, args[1:] + +def ensure_connection(session): + try: + ret = session.getAPIVersion() + except xmlrpclib.ProtocolError: + error(_("Error: Unable to connect to server")) + if ret != koji.API_VERSION: + warn(_("WARNING: The server is at API version %d and the client is at %d" % (ret, koji.API_VERSION))) + +def print_task_headers(): + """Print the column headers""" + print "ID Pri Owner State Arch Name" + +def print_task(task,depth=0): + """Print a task""" + task = task.copy() + task['state'] = koji.TASK_STATES.get(task['state'],'BADSTATE') + fmt1 = "%(id)-5s %(priority)-4s %(owner)-12s %(state)-8s %(arch)-10s " + fmt2 = "%(method)s" + if depth: + indent = " "*(depth-1) + " +" + else: + indent = '' + if task.get('host'): + fmt3 = ' [%(host)s]' + else: + fmt3 = '' + if task.get('build_id'): + fmt4 = ' %(build_name)s-%(build_version)s-%(build_release)s' + else: + fmt4 = '' + print ''.join([fmt1 % task, indent, fmt2 % task, fmt3 % task, fmt4 % task]) + +def print_task_recurse(task,depth=0): + """Print a task and its children""" + print_task(task,depth) + for child in task.get('children',()): + print_task_recurse(child,depth+1) + + +class TaskWatcher(object): + + def __init__(self,task_id,session,level=0): + self.id = task_id + self.session = session + self.info = None + self.level = level + + #XXX - a bunch of this stuff needs to adapt to different tasks + + def str(self): + if self.info: + label = koji.taskLabel(self.info) + return "%s%d %s" % (' ' * self.level, self.id, label) + else: + return "%s%d" % (' ' * self.level, self.id) + + def __str__(self): + return self.str() + + def get_failure(self): + """Print infomation about task completion""" + if self.info['state'] != koji.TASK_STATES['FAILED']: + return '' + error = None + try: + result = self.session.getTaskResult(self.id) + except (xmlrpclib.Fault,koji.GenericError),e: + error = e + if error is None: + # print "%s: complete" % self.str() + # We already reported this task as complete in update() + return '' + else: + return '%s: %s' % (error.__class__.__name__, str(error).strip()) + + def update(self): + """Update info and log if needed. Returns True on state change.""" + if self.is_done(): + # Already done, nothing else to report + return False + last = self.info + self.info = self.session.getTaskInfo(self.id, request=True) + if self.info is None: + print "No such task id: %i" % self.id + sys.exit(1) + state = self.info['state'] + if last: + #compare and note status changes + laststate = last['state'] + if laststate != state: + print "%s: %s -> %s" % (self.str(), self.display_state(last), self.display_state(self.info)) + return True + return False + else: + # First time we're seeing this task, so just show the current state + print "%s: %s" % (self.str(), self.display_state(self.info)) + return False + + def is_done(self): + if self.info is None: + return False + state = koji.TASK_STATES[self.info['state']] + return (state in ['CLOSED','CANCELED','FAILED']) + + def is_success(self): + if self.info is None: + return False + state = koji.TASK_STATES[self.info['state']] + return (state == 'CLOSED') + + def display_state(self, info): + if info['state'] == koji.TASK_STATES['OPEN']: + if info['host_id']: + host = self.session.getHost(info['host_id']) + return 'open (%s)' % host['name'] + else: + return 'open' + elif info['state'] == koji.TASK_STATES['FAILED']: + return 'FAILED: %s' % self.get_failure() + else: + return koji.TASK_STATES[info['state']].lower() + +def display_tasklist_status(tasks): + free = 0 + open = 0 + failed = 0 + done = 0 + for task_id in tasks.keys(): + status = tasks[task_id].info['state'] + if status == koji.TASK_STATES['FAILED']: + failed += 1 + elif status == koji.TASK_STATES['CLOSED'] or status == koji.TASK_STATES['CANCELED']: + done += 1 + elif status == koji.TASK_STATES['OPEN'] or status == koji.TASK_STATES['ASSIGNED']: + open += 1 + elif status == koji.TASK_STATES['FREE']: + free += 1 + print " %d free %d open %d done %d failed" % (free, open, done, failed) + +def watch_tasks(session,tasklist): + if not tasklist: + return + print "Watching tasks (this may be safely interrupted)..." + rv = 0 + try: + tasks = {} + for task_id in tasklist: + tasks[task_id] = TaskWatcher(task_id,session) + while True: + all_done = True + for task_id,task in tasks.items(): + changed = task.update() + if not task.is_done(): + all_done = False + elif changed: + # task is done and state just changed + display_tasklist_status(tasks) + if not task.is_success(): + rv = 1 + for child in session.getTaskChildren(task_id): + child_id = child['id'] + if not child_id in tasks.keys(): + tasks[child_id] = TaskWatcher(child_id, session, task.level + 1) + tasks[child_id].update() + # If we found new children, go through the list again, + # in case they have children also + all_done = False + if all_done: + break + + time.sleep(1) + except (KeyboardInterrupt): + if tasks: + print \ +"""Tasks still running. You can continue to watch with the 'koji watch-task' command. +Running Tasks: +%s""" % '\n'.join(['%s: %s' % (t.str(), t.display_state(t.info)) for t in tasks.values() if not t.is_done()]) + rv = 1 + return rv + +def watch_logs(session, tasklist, options): + print "Watching logs (this may be safely interrupted)..." + def _isDone(session, taskId): + info = session.getTaskInfo(taskId) + if info is None: + print "No such task id: %i" % taskId + sys.exit(1) + state = koji.TASK_STATES[info['state']] + return (state in ['CLOSED','CANCELED','FAILED']) + + try: + offsets = {} + for task_id in tasklist: + offsets[task_id] = {} + + lastlog = None + while True: + for task_id in tasklist[:]: + if _isDone(session, task_id): + tasklist.remove(task_id) + + output = session.listTaskOutput(task_id) + + if options.log: + logs = [filename for filename in output if filename == options.log] + else: + logs = [filename for filename in output if filename.endswith('.log')] + + taskoffsets = offsets[task_id] + for log in logs: + contents = 'placeholder' + while contents: + if not taskoffsets.has_key(log): + taskoffsets[log] = 0 + + contents = session.downloadTaskOutput(task_id, log, taskoffsets[log], 16384) + taskoffsets[log] += len(contents) + if contents: + currlog = "%d:%s:" % (task_id, log) + if currlog != lastlog: + if lastlog: + sys.stdout.write("\n") + sys.stdout.write("==> %s <==\n" % currlog) + lastlog = currlog + sys.stdout.write(contents) + + if not tasklist: + break + + time.sleep(1) + except (KeyboardInterrupt): + pass + +def handle_add_host(options, session, args): + "[admin] Add a host" + usage = _("usage: %prog add-host [options] hostname arch [arch2 ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 2: + parser.error(_("Please specify a hostname and at least one arch")) + assert False + host = args[0] + activate_session(session) + id = session.getHost(host) + if id: + print "%s is already in the database, skipping" % host + else: + id = session.addHost(host, args[1:]) + if id: + print "%s added: id %d" % (host, id) + +def handle_add_host_to_channel(options, session, args): + "[admin] Add a host to a channel" + usage = _("usage: %prog add-host-to-channel [options] hostname channel") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 2: + parser.error(_("Please specify a hostname and a channel")) + assert False + host = args[0] + activate_session(session) + id = session.getHost(host) + if not id: + print "%s is not a host" % host + return 1 + session.addHostToChannel(host, args[1]) + +def handle_remove_host_from_channel(options, session, args): + "[admin] Remove a host from a channel" + usage = _("usage: %prog remove-host-from-channel [options] hostname channel") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 2: + parser.error(_("Please specify a hostname and a channel")) + assert False + host = args[0] + activate_session(session) + id = session.getHost(host) + if not id: + print "%s is not a host" % host + return 1 + session.removeHostFromChannel(host, args[1]) + +def handle_add_pkg(options, session, args): + "[admin] Add a package to the listing for tag" + usage = _("usage: %prog add-pkg [options] tag package [package2 ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--force", action='store_true', help=_("Override blocks if necessary")) + parser.add_option("--owner", help=_("Specify owner")) + parser.add_option("--extra-arches", help=_("Specify extra arches")) + (options, args) = parser.parse_args(args) + if len(args) < 2: + parser.error(_("Please specify a tag and at least one package")) + assert False + activate_session(session) + tag = args[0] + opts = {} + opts['force'] = options.force + opts['block'] = False + if options.extra_arches: + opts['extra_arches'] = ' '.join(options.extra_arches.replace(',',' ').split()) + for package in args[1:]: + #really should implement multicall... + session.packageListAdd(tag,package,options.owner,**opts) + +def handle_block_pkg(options, session, args): + "[admin] Block a package in the listing for tag" + usage = _("usage: %prog block-pkg [options] tag package [package2 ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 2: + parser.error(_("Please specify a tag and at least one package")) + assert False + activate_session(session) + tag = args[0] + for package in args[1:]: + #really should implement multicall... + session.packageListBlock(tag,package) + +def _unique_path(prefix): + """Create a unique path fragment by appending a path component + to prefix. The path component will consist of a string of letter and numbers + that is unlikely to be a duplicate, but is not guaranteed to be unique.""" + # Use time() in the dirname to provide a little more information when + # browsing the filesystem. + # For some reason repr(time.time()) includes 4 or 5 + # more digits of precision than str(time.time()) + return '%s/%r.%s' % (prefix, time.time(), + ''.join([random.choice(string.ascii_letters) for i in range(8)])) + +def _format_size(size): + if (size / 1073741824 >= 1): + return "%0.2f Gb" % (size / 1073741824.0) + if (size / 1048576 >= 1): + return "%0.2f Mb" % (size / 1048576.0) + if (size / 1024 >=1): + return "%0.2f Kb" % (size / 1024.0) + return "%0.2f B" % (size) + +def _format_secs(t): + h = t / 3600 + t = t % 3600 + m = t / 60 + s = t % 60 + return "%02d:%02d:%02d" % (h, m, s) + +def _progress_callback(uploaded, total, piece, time, total_time): + percent_done = float(uploaded)/float(total) + percent_done_str = "%02d%%" % (percent_done * 100) + data_done = _format_size(uploaded) + elapsed = _format_secs(total_time) + + speed = "- B/sec" + if (time): + if (uploaded != total): + speed = _format_size(float(piece)/float(time)) + "/sec" + else: + speed = _format_size(float(total)/float(total_time)) + "/sec" + + # write formated string and flush + sys.stdout.write("[% -36s] % 4s % 8s % 10s % 14s\r" % ('='*(int(percent_done*36)), percent_done_str, elapsed, data_done, speed)) + sys.stdout.flush() + +def _running_in_bg(): + if (not os.isatty(0)) or (os.getpgrp() != os.tcgetpgrp(0)): + return True + return False + +def handle_build(options, session, args): + "Build a package from source" + usage = _("usage: %prog build [options] target URL") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--skip-tag", action="store_true", + help=_("Do not attempt to tag package")) + parser.add_option("--scratch", action="store_true", + help=_("Perform a scratch build")) + parser.add_option("--nowait", action="store_true", + help=_("Don't wait on build")) + parser.add_option("--arch-override", help=_("Override build arches")) + parser.add_option("--noprogress", action="store_true", + help=_("Do not display progress of the upload")) + parser.add_option("--background", action="store_true", + help=_("Run the build at a lower priority")) + (options, args) = parser.parse_args(args) + if len(args) != 2: + parser.error(_("Exactly two arguments (a build target and a CVS URL or srpm file) are required")) + assert False + if options.arch_override and not options.scratch: + parser.error(_("--arch_override is only allowed for --scratch builds")) + activate_session(session) + target = args[0] + build_target = session.getBuildTarget(target) + if not build_target: + parser.error(_("Unknown build target: %s" % target)) + dest_tag = session.getTag(build_target['dest_tag']) + if not dest_tag: + parser.error(_("Unknown destination tag: %s" % build_target['dest_tag_name'])) + if dest_tag['locked'] and not options.scratch: + parser.error(_("Destination tag %s is locked" % dest_tag['name'])) + source = args[1] + opts = {} + if options.arch_override: + opts['arch_override'] = ' '.join(options.arch_override.replace(',',' ').split()) + for key in ('skip_tag','scratch'): + opts[key] = getattr(options,key) + priority = None + if options.background: + #relative to koji.PRIO_DEFAULT + priority = 5 + if not source.startswith('cvs://'): + #treat source as an srpm and upload it + print "Uploading srpm: %s" % source + serverdir = _unique_path('cli-build') + if _running_in_bg() or options.noprogress: + callback = None + else: + callback = _progress_callback + session.uploadWrapper(source, serverdir, callback=callback) + print + source = "%s/%s" % (serverdir, os.path.basename(source)) + task_id = session.build(source, target, opts, priority=priority) + print "Created task:", task_id + print "Task info: %s/taskinfo?taskID=%s" % (options.web_url, task_id) + if _running_in_bg() or options.nowait: + return + else: + return watch_tasks(session,[task_id]) + +def handle_chain_build(options, session, args): + # XXX - replace handle_build with this, once chain-building has gotten testing + "Build one or more packages from source" + usage = _("usage: %prog chain-build [options] target URL [URL2 [:] URL3 [:] URL4 ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--skip-tag", action="store_true", + help=_("Do not attempt to tag package")) + parser.add_option("--scratch", action="store_true", + help=_("Perform a scratch build")) + parser.add_option("--nowait", action="store_true", + help=_("Don't wait on build")) + parser.add_option("--arch-override", help=_("Override build arches")) + parser.add_option("--noprogress", action="store_true", + help=_("Do not display progress of the upload")) + parser.add_option("--background", action="store_true", + help=_("Run the build at a lower priority")) + (options, args) = parser.parse_args(args) + if len(args) < 2: + parser.error(_("At least two arguments (a build target and a CVS URL or srpm file) are required")) + assert False + if options.arch_override and not options.scratch: + parser.error(_("--arch_override is only allowed for --scratch builds")) + activate_session(session) + target = args[0] + build_target = session.getBuildTarget(target) + if not build_target: + parser.error(_("Unknown build target: %s" % target)) + dest_tag = session.getTag(build_target['dest_tag']) + if not dest_tag: + parser.error(_("Unknown destination tag: %s" % build_target['dest_tag_name'])) + if dest_tag['locked']: + parser.error(_("Destination tag %s is locked" % dest_tag['name'])) + + sources = args[1:] + if options.scratch and ':' in sources: + parser.error(_("chain-builds do not support --scratch")) + srpms = {} + src_list = [] + build_level = [] + #src_lists is a list of lists of sources to build. + # each list is block of builds ("build level") which must all be completed + # before the next block begins. Blocks are separated on the command line with ':' + for src in sources: + if src == ':': + if build_level: + src_list.append(build_level) + build_level = [] + elif not src.startswith('cvs://'): + serverpath = "%s/%s" % (_unique_path('cli-build'), os.path.basename(src)) + srpms[src] = serverpath + build_level.append(serverpath) + else: + build_level.append(src) + if build_level: + src_list.append(build_level) + + opts = {} + if options.arch_override: + opts['arch_override'] = ' '.join(options.arch_override.replace(',',' ').split()) + for key in ('skip_tag','scratch'): + opts[key] = getattr(options,key) + priority = None + if options.background: + #relative to koji.PRIO_DEFAULT + priority = 5 + + if srpms: + print "Uploading SRPMs:" + if _running_in_bg() or options.noprogress: + callback = None + else: + callback = _progress_callback + for source, dest in srpms.items(): + print os.path.basename(source) + #uploadWrapper wants the destination dir + dest = os.path.dirname(dest) + session.uploadWrapper(source, dest, callback=callback) + print + + task_ids = [] + if len(src_list) == 1: + # single or multi-package build + for src in src_list[0]: + task_ids.append(session.build(src, target, opts, priority=priority)) + else: + # chain build + task_ids = [session.chainBuild(src_list, target, opts, priority=priority)] + + print "Created tasks: %s" % ' '.join([str(d) for d in task_ids]) + print "Task info:" + for task_id in task_ids: + print " %s/taskinfo?taskID=%s" % (options.web_url, task_id) + if _running_in_bg() or options.nowait: + return + else: + watch_tasks(session,task_ids) + +def handle_resubmit(options, session, args): + """Retry a canceled or failed task, using the same parameter as the original task.""" + usage = _("usage: %prog resubmit [options] taskID") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--nowait", action="store_true", help=_("Don't wait on task")) + parser.add_option("--nowatch", action="store_true", dest="nowait", + help=_("An alias for --nowait")) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("Please specify a task ID")) + assert False + activate_session(session) + taskID = args[0] + newID = session.resubmitTask(int(taskID)) + print "Resubmitted task %s as new task %s" % (taskID, newID) + if _running_in_bg() or options.nowait: + return + else: + watch_tasks(session,[newID]) + +def handle_call(options, session, args): + "[admin] Execute an arbitrary XML-RPC call" + usage = _("usage: %prog call [options] name [arg...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("Please specify the name of the XML-RPC method")) + assert False + activate_session(session) + name = args[0] + non_kw = [] + kw = {} + for arg in args[1:]: + if arg.isdigit(): + non_kw.append(int(arg)) + elif arg.find('=') != -1: + key, value = arg.split('=', 1) + kw[key] = value + else: + non_kw.append(arg) + pprint.pprint(getattr(session, name).__call__(*non_kw, **kw)) + +def anon_handle_mock_config(options, session, args): + "Create a mock config" + usage = _("usage: %prog mock-config [options] name") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--arch", help=_("Specify the arch")) + parser.add_option("--tag", help=_("Create a mock config for a tag")) + parser.add_option("--task", help=_("Duplicate the mock config of a previous task")) + parser.add_option("--buildroot", help=_("Duplicate the mock config for the specified buildroot id")) + parser.add_option("--mockdir", default="/var/lib/mock", metavar="DIR", + help=_("Specify mockdir")) + parser.add_option("--topdir", metavar="DIR", + help=_("Specify topdir")) + parser.add_option("--distribution", default="Koji Testing", + help=_("Change the distribution macro")) + parser.add_option("-o", metavar="FILE", dest="ofile", help=_("Output to a file")) + (options, args) = parser.parse_args(args) + if len(args) != 1: + parser.error(_("Please specify and name for your buildroot")) + assert False + activate_session(session) + name = args[0] + arch = None + opts = {} + for k in ('topdir', 'distribution', 'mockdir'): + if hasattr(options, k): + opts[k] = getattr(options, k) + if options.buildroot: + try: + br_id = int(options.buildroot) + except ValueError: + parser.error(_("Buildroot id must be an integer")) + brootinfo = session.getBuildroot(br_id) + opts['repoid'] = brootinfo['repo_id'] + opts['tag_name'] = brootinfo['tag_name'] + arch = brootinfo['arch'] + elif options.task: + try: + task_id = int(options.task) + except ValueError: + parser.error(_("Task id must be an integer")) + broots = session.listBuildroots(taskID=task_id) + if not broots: + print _("No buildroots for task %s (or no such task)") % options.task + sys.exit(1) + if len(broots) > 1: + print _("Multiple buildroots found: %s" % [br['id'] for br in broots]) + brootinfo = broots[0] + opts['repoid'] = brootinfo['repo_id'] + opts['tag_name'] = brootinfo['tag_name'] + arch = brootinfo['arch'] + elif options.tag: + if not options.arch: + print _("Please specify an arch") + sys.exit(1) + tag = session.getTag(options.tag) + if not tag: + parser.error(_("Invalid tag: %s" % options.tag)) + arch = options.arch + config = session.getBuildConfig(tag['id']) + if not config: + print _("Could not get config info for tag: %(name)s") % tag + sys.exit(1) + opts['tag_name'] = tag['name'] + repo = session.getRepo(config['id']) + if not repo: + print _("Could not get a repo for tag: %(name)s") % tag + sys.exit(1) + opts['repoid'] = repo['id'] + else: + parser.error(_("Please specify one of: --tag, --task, --buildroot")) + assert False + output = koji.genMockConfig(name, arch, **opts) + if options.ofile: + fo = file(options.ofile, 'w') + fo.write(output) + fo.close() + else: + print output + +def handle_disable_host(options, session, args): + "[admin] Mark a host as disabled" + usage = _("usage: %prog disable-host [options] hostname") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 1: + parser.error(_("Exactly one argument (a hostname) is required")) + assert False + activate_session(session) + try: + session.disableHost(args[0]) + except koji.GenericError, e: + print "Could not enable host", e + +def handle_enable_host(options, session, args): + "[admin] Mark a host as enabled" + usage = _("usage: %prog enable-host [options] hostname") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 1: + parser.error(_("Exactly one argument (a hostname) is required")) + assert False + activate_session(session) + try: + session.enableHost(args[0]) + except koji.GenericError, e: + print "Could not enable host", e + +def handle_import(options, session, args): + "[admin] Import local RPMs to the database" + usage = _("usage: %prog import [options] package [package...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--link", action="store_true", help=_("Attempt to hardlink the rpm")) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("At least one package must be specified")) + assert False + activate_session(session) + for path in args: + data = koji.get_header_fields(path, ('name','version','release','arch','sigmd5','sourcepackage','sourcerpm')) + rinfo = dict([(k,data[k]) for k in ('name','version','release','arch')]) + if data['sourcepackage']: + rinfo['arch'] = 'src' + prev = session.getRPM(rinfo) + if prev: + if prev['payloadhash'] == koji.hex_string(data['sigmd5']): + print _("RPM already imported: %s") % path + else: + print _("WARNING: md5sum mismatch for %s") % path + print _("Skipping import") + continue + serverdir = _unique_path('cli-import') + if options.link: + old_umask = os.umask(002) + dst = "%s/%s/%s" % (koji.pathinfo.work(), serverdir, os.path.basename(path)) + koji.ensuredir(os.path.dirname(dst)) + os.chown(os.path.dirname(dst), 48, 48) #XXX - hack + print "Linking rpm to: %s" % dst + os.link(path, dst) + os.umask(old_umask) + else: + print _("uploading %s...") % path, + sys.stdout.flush() + session.uploadWrapper(path, serverdir) + print _("done") + sys.stdout.flush() + print _("importing %s...") % path, + sys.stdout.flush() + try: + session.importRPM(serverdir, os.path.basename(path)) + except koji.GenericError, e: + print _("\nError importing: %s" % str(e).splitlines()[-1]) + sys.stdout.flush() + else: + print _("done") + sys.stdout.flush() + +def handle_import_comps(options, session, args): + "Import group/package information from a comps file" + usage = _("usage: %prog import-comps [options] ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--force", action="store_true", help=_("force import")) + (local_options, args) = parser.parse_args(args) + if len(args) != 2: + parser.error(_("Incorrect number of arguments")) + assert False + comps = Comps(args[0]) + tag = args[1] + force = local_options.force + #add all the groups first (so that group reqs do not break) + for name,group in comps.groups.items(): + print "Group: %s (%s)" % (group.id,name) + session.groupListAdd(tag,group.id,force=force,display_name=name, + is_default=bool(group.default), + uservisible=bool(group.user_visible), + description=group.description, + langonly=group.langonly, + biarchonly=bool(group.biarchonly)) + #for k in ('id','biarchonly','langonly','user_visible','default','description'): + # print " %s: %s" %(k,getattr(group,k)) + for name,group in comps.groups.items(): + print "Group: %s (%s)" % (group.id,name) + for pkg in group.pkgs.values(): + pkg = pkg.copy() + pkg_name = pkg['package'] + if group.pkgConditionals.has_key(pkg_name): + pkg['requires'] = group.pkgConditionals[pkg_name] + pkg['basearchonly'] = bool(pkg['baseonly']) + del pkg['package'] + del pkg['baseonly'] + print " Package: %s: %r" % (pkg_name, pkg) + session.groupPackageListAdd(tag,group.id,pkg_name,force=force, **pkg) + for type,req in group.groups.values(): + print " Req: %s (%s)" % (req,type) + session.groupReqListAdd(tag,group.id,req,force=force,type=type) + for type,req in group.metapkgs.values(): + print " Metapkg: %s (%s)" %(req,type) + session.groupReqListAdd(tag,group.id,req,force=force,type=type,is_metapkg=True) + +def handle_import_sig(options, session, args): + "[admin] Import signatures into the database" + usage = _("usage: %prog import-sig [options] package [package...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--with-unsigned", action="store_true", + help=_("Also import unsigned sig headers")) + parser.add_option("--test", action="store_true", + help=_("Test mode -- don't actually import")) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("At least one package must be specified")) + assert False + for path in args: + if not os.path.exists(path): + parser.error(_("No such file: %s") % path) + activate_session(session) + for path in args: + data = koji.get_header_fields(path, ('name','version','release','arch','siggpg','sourcepackage')) + if data['sourcepackage']: + data['arch'] = 'src' + sigkey = data['siggpg'] + if not sigkey: + sigkey = "" + if not options.with_unsigned: + print _("Skipping unsigned package: %s" % path) + continue + else: + sigkey = koji.hex_string(sigkey[13:17]) + del data['siggpg'] + rinfo = session.getRPM(data) + if not rinfo: + print "No such rpm in system: %(name)s-%(version)s-%(release)s.%(arch)s" % data + continue + sighdr = koji.rip_rpm_sighdr(path) + previous = session.queryRPMSigs(rpm_id=rinfo['id'], sigkey=sigkey) + assert len(previous) <= 1 + if previous: + sighash = md5.new(sighdr).hexdigest() + if previous[0]['sighash'] == sighash: + print _("Signature already imported: %s") % path + continue + else: + print _("Warning: signature mismatch: %s") % path + continue + print _("Importing signature [key %s] from %s...") % (sigkey, path) + if not options.test: + session.addRPMSig(rinfo['id'], base64.encodestring(sighdr)) + +def handle_write_signed_rpm(options, session, args): + "[admin] Write signed RPMs to disk" + usage = _("usage: %prog write-signed-rpm [options] n-v-r [n-v-r...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--all", action="store_true", help=_("Write out all RPMs signed with this key")) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("A signature key must be specified")) + assert False + if len(args) < 2 and not options.all: + parser.error(_("At least one RPM must be specified")) + assert False + key = args.pop(0) + activate_session(session) + if options.all: + rpms = session.queryRPMSigs(sigkey=key) + count = 1 + for rpm in rpms: + print "%d/%d" % (count, len(rpms)) + count += 1 + session.writeSignedRPM(rpm['rpm_id'], key) + else: + for nvr in args: + build = session.getBuild(nvr) + rpms = session.listRPMs(buildID=build['id']) + for rpm in rpms: + session.writeSignedRPM(rpm['id'], key) + +def handle_list_permissions(options, session, args): + "[admin] List user permissions" + usage = _("usage: %prog list-permissions [options]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--user", help=_("Only list permissions for this user")) + (options, args) = parser.parse_args(args) + if len(args) > 0: + parser.error(_("This command takes no arguments")) + assert False + if not options.user: + parser.error(_("A user must be specified")) + assert False + activate_session(session) + user = session.getUser(options.user) + if not user: + raise koji.GenericError("%s can not be found" % options.user) + perms = session.getUserPerms(user['id']) + print perms + +def handle_list_signed(options, session, args): + "[admin] List signed copies of rpms" + usage = _("usage: %prog list-signed [options]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--debug", action="store_true") + parser.add_option("--key", help=_("Only list RPMs signed with this key")) + parser.add_option("--build", help=_("Only list RPMs from this build")) + parser.add_option("--rpm", help=_("Only list signed copies for this RPM")) + parser.add_option("--tag", help=_("Only list RPMs within this tag")) + (options, args) = parser.parse_args(args) + activate_session(session) + qopts = {} + build_idx = {} + rpm_idx = {} + if options.key: + qopts['sigkey'] = options.key + if options.rpm: + rinfo = session.getRPM(options.rpm) + rpm_idx[rinfo['id']] = rinfo + if rinfo is None: + parser.error(_("No such RPM: %s") % options.rpm) + qopts['rpm_id'] = rinfo['id'] + if options.build: + binfo = session.getBuild(options.build) + build_idx[binfo['id']] = binfo + if binfo is None: + parser.error(_("No such build: %s") % options.rpm) + sigs = [] + rpms = session.listRPMs(buildID=binfo['id']) + for rinfo in rpms: + rpm_idx[rinfo['id']] = rinfo + sigs += session.queryRPMSigs(rpm_id=rinfo['id'], **qopts) + else: + sigs = session.queryRPMSigs(**qopts) + if options.tag: + print "getting tag listing" + rpms, builds = session.listTaggedRPMS(options.tag, inherit=False, latest=False) + print "got tag listing" + tagged = {} + for binfo in builds: + build_idx.setdefault(binfo['id'], binfo) + for rinfo in rpms: + rpm_idx.setdefault(rinfo['id'], rinfo) + tagged[rinfo['id']] = 1 + #Now figure out which sig entries actually have live copies + for sig in sigs: + rpm_id = sig['rpm_id'] + sigkey = sig['sigkey'] + if options.tag: + if tagged.get(rpm_id) is None: + continue + rinfo = rpm_idx.get(rpm_id) + if not rinfo: + rinfo = session.getRPM(rpm_id) + rpm_idx[rinfo['id']] = rinfo + binfo = build_idx.get(rinfo['build_id']) + if not binfo: + binfo = session.getBuild(rinfo['build_id']) + build_idx[binfo['id']] = binfo + binfo['name'] = binfo['package_name'] + builddir = koji.pathinfo.build(binfo) + signedpath = "%s/%s" % (builddir, koji.pathinfo.signed(rinfo, sigkey)) + if not os.path.exists(signedpath): + if options.debug: + print "No copy: %s" % signedpath + continue + print signedpath + +def handle_import_in_place(options, session, args): + "[admin] Import RPMs that are already in place" + usage = _("usage: %prog import-in-place [options] package [package...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("At least one package must be specified")) + assert False + activate_session(session) + for nvr in args: + data = koji.parse_NVR(nvr) + print _("importing %s...") % nvr, + try: + session.importBuildInPlace(data) + except koji.GenericError, e: + print _("\nError importing: %s" % str(e).splitlines()[-1]) + sys.stdout.flush() + else: + print _("done") + sys.stdout.flush() + +def handle_grant_permission(options, session, args): + "[admin] Grant a permission to a user" + usage = _("usage: %prog grant-permission [ ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--list", action="store_true", help=_("List possible permissions")) + (options, args) = parser.parse_args(args) + if not options.list and len(args) < 2: + parser.error(_("Please specify a permission and at least one user")) + assert False + activate_session(session) + perms = dict([(p['name'], p['id']) for p in session.getAllPerms()]) + if options.list: + for p in perms.keys(): + print p + return + perm_id = perms.get(args[0], None) + if perm_id is None: + print "No such permission: %s" % args[0] + sys.exit(1) + names = args[1:] + users = [] + for n in names: + user = session.getUser(n) + if user is None: + print "No such user: %s" % n + sys.exit(1) + users.append(user) + for user in users: + session.grantPermission(user['id'], perm_id) + +def anon_handle_latest_pkg(options, session, args): + "Print the latest packages for a tag" + usage = _("usage: %prog latest-pkg [options] tag package [package...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--arch", help=_("List all of the latest packages for this arch")) + parser.add_option("--all", action="store_true", help=_("List all of the latest packages for this tag")) + parser.add_option("--quiet", action="store_true", help=_("Do not print the header information")) + parser.add_option("--paths", action="store_true", help=_("Show the file paths")) + (options, args) = parser.parse_args(args) + if len(args) == 0: + parser.error(_("A tag name must be specified")) + assert False + activate_session(session) + if options.all: + if len(args) > 1: + parser.error(_("A package name may not be combined with --all")) + assert False + # Set None as the package argument + args.append(None) + else: + if len(args) < 2: + parser.error(_("A tag name and package name must be specified")) + assert False + pathinfo = koji.PathInfo() + + for pkg in args[1:]: + if options.arch: + rpms, builds = session.getLatestRPMS(args[0], package=pkg, arch=options.arch) + builds_hash = dict([(x['build_id'], x) for x in builds]) + data = rpms + if options.paths: + for x in data: + z = x.copy() + x['name'] = builds_hash[x['build_id']]['package_name'] + x['path'] = os.path.join(pathinfo.build(x), pathinfo.rpm(z)) + fmt = "%(path)s" + else: + fmt = "%(name)s-%(version)s-%(release)s.%(arch)s" + else: + data = session.getLatestBuilds(args[0], package=pkg) + if options.paths: + for x in data: + x['name'] = x['package_name'] + x['path'] = pathinfo.build(x) + fmt = "%(path)-40s %(tag_name)-20s %(owner_name)s" + else: + fmt = "%(nvr)-40s %(tag_name)-20s %(owner_name)s" + if not options.quiet: + print "%-40s %-20s %s" % ("Build","Tag","Built by") + print "%s %s %s" % ("-"*40, "-"*20, "-"*16) + options.quiet = True + + output = [ fmt % x for x in data] + output.sort() + for line in output: + print line + +def anon_handle_list_api(options, session, args): + "Print the list of XML-RPC APIs" + usage = _("usage: %prog list-api [options]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 0: + parser.error(_("This command takes no arguments")) + assert False + activate_session(session) + tmplist = [(x['name'], x) for x in session._listapi()] + tmplist.sort() + funcs = [x[1] for x in tmplist] + for x in funcs: + if x['args']: + expanded = [] + for arg in x['args']: + if type(arg) is str: + expanded.append(arg) + else: + expanded.append('%s=%s' % (arg[0], arg[1])) + args = ", ".join(expanded) + else: + args = "" + print '%s(%s)' % (x['name'], args) + if x['doc']: + print " description: %s" % x['doc'] + +def anon_handle_list_tagged(options, session, args): + "List the builds or rpms in a tag" + usage = _("usage: %prog list-tagged [options] tag [package]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--arch", help=_("List rpms for this arch")) + parser.add_option("--rpms", action="store_true", help=_("Show rpms instead of builds")) + parser.add_option("--inherit", action="store_true", help=_("Follow inheritance")) + parser.add_option("--latest", action="store_true", help=_("Only show the latest builds/rpms")) + parser.add_option("--quiet", action="store_true", help=_("Do not print the header information")) + parser.add_option("--paths", action="store_true", help=_("Show the file paths")) + parser.add_option("--sigs", action="store_true", help=_("Show signatures")) + (options, args) = parser.parse_args(args) + if len(args) == 0: + parser.error(_("A tag name must be specified")) + assert False + elif len(args) > 2: + parser.error(_("Only one package name may be specified")) + assert False + activate_session(session) + pathinfo = koji.PathInfo() + package = None + if len(args) > 1: + package = args[1] + tag = args[0] + opts = {} + for key in ('latest','inherit'): + opts[key] = getattr(options, key) + if package: + opts['package'] = package + if options.arch: + options.rpms = True + opts['arch'] = options.arch + if options.sigs: + opts['rpmsigs'] = True + options.rpms = True + + if options.rpms: + rpms, builds = session.listTaggedRPMS(tag, **opts) + data = rpms + if options.paths: + build_idx = dict([(b['id'],b) for b in builds]) + for rinfo in data: + build = build_idx[rinfo['build_id']] + build['name'] = build['package_name'] + builddir = pathinfo.build(build) + if options.sigs: + sigkey = rinfo['sigkey'] + signedpath = os.path.join(builddir, pathinfo.signed(rinfo, sigkey)) + if os.path.exists(signedpath): + rinfo['path'] = signedpath + else: + rinfo['path'] = os.path.join(builddir, pathinfo.rpm(rinfo)) + fmt = "%(path)s" + data = [x for x in data if x.has_key('path')] + else: + fmt = "%(name)s-%(version)s-%(release)s.%(arch)s" + if options.sigs: + fmt = "%(sigkey)s " + fmt + else: + data = session.listTagged(tag, **opts) + if options.paths: + for x in data: + x['name'] = x['package_name'] + x['path'] = pathinfo.build(x) + fmt = "%(path)-40s %(tag_name)-20s %(owner_name)s" + else: + fmt = "%(nvr)-40s %(tag_name)-20s %(owner_name)s" + if not options.quiet: + print "%-40s %-20s %s" % ("Build","Tag","Built by") + print "%s %s %s" % ("-"*40, "-"*20, "-"*16) + + output = [ fmt % x for x in data] + output.sort() + for line in output: + print line + +def anon_handle_list_buildroot(options, session, args): + "List the rpms used in or built in a buildroot" + usage = _("usage: %prog list-buildroot [options] buildroot-id") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--paths", action="store_true", help=_("Show the file paths")) + parser.add_option("--built", action="store_true", help=_("Show the built rpms")) + (options, args) = parser.parse_args(args) + if len(args) != 1: + parser.error(_("Incorrect number of arguments")) + assert False + activate_session(session) + package = None + buildrootID = int(args[0]) + opts = {} + if options.built: + opts['buildrootID'] = buildrootID + else: + opts['componentBuildrootID'] = buildrootID + data = session.listRPMs(**opts) + + fmt = "%(nvr)s" + output = [ fmt % x for x in data] + output.sort() + for line in output: + print line + +def anon_handle_list_untagged(options, session, args): + "List untagged builds" + usage = _("usage: %prog list-tagged [options] [package]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--paths", action="store_true", help=_("Show the file paths")) + parser.add_option("--show-references", action="store_true", help=_("Show build references")) + (options, args) = parser.parse_args(args) + if len(args) > 1: + parser.error(_("Only one package name may be specified")) + assert False + activate_session(session) + package = None + if len(args) > 0: + package = args[0] + opts = {} + if package: + opts['name'] = package + pathinfo = koji.PathInfo() + + data = session.untaggedBuilds(**opts) + if options.show_references: + print "(Showing build references)" + refs = {} + refs2 = {} #reverse map + for x in session.buildMap(): + refs.setdefault(x['used'], {}).setdefault(x['built'], 1) + refs2.setdefault(x['built'], {}).setdefault(x['used'], 1) + has_ref = {} + #XXX - need to ignore refs to unreferenced builds + for x in data: + builds = refs.get(x['id']) + if builds: + x['refs'] = "%s" % builds + else: + x['refs'] = '' + #data = [x for x in data if not refs.has_key(x['id'])] + if options.paths: + for x in data: + x['path'] = pathinfo.build(x) + fmt = "%(path)s" + else: + fmt = "%(name)s-%(version)s-%(release)s" + if options.show_references: + fmt = fmt + " %(refs)s" + + output = [ fmt % x for x in data] + output.sort() + for line in output: + print line + +def print_group_list_req_group(group): + print " @%(name)s [%(tag_name)s]" % group + +def print_group_list_req_package(pkg): + print " %(package)s: %(basearchonly)s, %(type)s [%(tag_name)s]" % pkg + +def anon_handle_list_groups(options, session, args): + "Print the group listings" + usage = _("usage: %prog list-groups [options] [group]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 1 or len(args) > 2: + parser.error(_("Incorrect number of arguments")) + assert False + opts = {} + activate_session(session) + tags = dict([(x['id'], x['name']) for x in session.listTags()]) + tmp_list = [(x['name'], x) for x in session.getTagGroups(args[0], **opts)] + tmp_list.sort() + groups = [x[1] for x in tmp_list] + for group in groups: + if len(args) > 1 and group['name'] != args[1]: + continue + print "%s [%s]" % (group['name'], tags.get(group['tag_id'], group['tag_id'])) + groups = [(x['name'], x) for x in group['grouplist']] + groups.sort() + for x in [x[1] for x in groups]: + x['tag_name'] = tags.get(x['tag_id'], x['tag_id']) + print_group_list_req_group(x) + pkgs = [(x['package'], x) for x in group['packagelist']] + pkgs.sort() + for x in [x[1] for x in pkgs]: + x['tag_name'] = tags.get(x['tag_id'], x['tag_id']) + print_group_list_req_package(x) + #print "%(name)-28s %(enabled)-7s %(ready)-5s %(task_load)-4s %(capacity)-8s %(arches)s" % host + +def handle_add_group_pkg(options, session, args): + "[admin] Add a package to a group's package listing" + usage = _("usage: %prog add-group-pkg [options] ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 3: + parser.error(_("This command takes three arguments")) + assert False + tag = args[0] + group = args[1] + pkg = args[2] + activate_session(session) + session.groupPackageListAdd(tag, group, pkg) + +def handle_block_group_pkg(options, session, args): + "[admin] Block a package from a group's package listing" + usage = _("usage: %prog block-group-pkg [options] [...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 3: + parser.error(_("This command takes at least three arguments")) + assert False + tag = args[0] + group = args[1] + activate_session(session) + for pkg in args[2:]: + session.groupPackageListBlock(tag, group, pkg) + +def handle_unblock_group_pkg(options, session, args): + "[admin] Unblock a package from a group's package listing" + usage = _("usage: %prog unblock-group-pkg [options] ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 3: + parser.error(_("This command takes three arguments")) + assert False + tag = args[0] + group = args[1] + pkg = args[2] + activate_session(session) + session.groupPackageListUnblock(tag, group, pkg) + +def handle_add_group_req(options, session, args): + "[admin] Add a group to a group's required list" + usage = _("usage: %prog add-group-req [options] ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 3: + parser.error(_("This command takes three arguments")) + assert False + tag = args[0] + group = args[1] + req = args[2] + activate_session(session) + session.groupReqListAdd(tag, group, req) + +def handle_block_group_req(options, session, args): + "[admin] Block a group's requirement listing" + usage = _("usage: %prog block-group-req [options] ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 3: + parser.error(_("This command takes three arguments")) + assert False + tag = args[0] + group = args[1] + req = args[2] + activate_session(session) + session.groupReqListBlock(tag, group, req) + +def handle_unblock_group_req(options, session, args): + "[admin] Unblock a group's requirement listing" + usage = _("usage: %prog unblock-group-req [options] ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) != 3: + parser.error(_("This command takes three arguments")) + assert False + tag = args[0] + group = args[1] + req = args[2] + activate_session(session) + session.groupReqListUnblock(tag, group, req) + +def anon_handle_list_hosts(options, session, args): + "Print the host listing" + usage = _("usage: %prog list-hosts [options]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--arch", action="append", default=[], help=_("Specify an architecture")) + parser.add_option("--channel", help=_("Specify a channel")) + parser.add_option("--ready", action="store_true", help=_("Limit to ready hosts")) + parser.add_option("--not-ready", action="store_false", dest="ready", help=_("Limit to not ready hosts")) + parser.add_option("--enabled", action="store_true", help=_("Limit to enabled hosts")) + parser.add_option("--not-enabled", action="store_false", dest="enabled", help=_("Limit to not enabled hosts")) + parser.add_option("--quiet", action="store_true", help=_("Do not print header information")) + (options, args) = parser.parse_args(args) + opts = {} + activate_session(session) + if options.arch: + opts['arches'] = options.arch + if options.channel: + channel = session.getChannel(options.channel) + if not channel: + parser.error(_('Unknown channel: %s' % options.channel)) + assert False + opts['channelID'] = channel['id'] + if options.ready is not None: + opts['ready'] = options.ready + if options.enabled is not None: + opts['enabled'] = options.enabled + tmp_list = [(x['name'], x) for x in session.listHosts(**opts)] + tmp_list.sort() + hosts = [x[1] for x in tmp_list] + + def yesno(x): + if x: return 'Y' + else: return 'N' + + # pull in the last update using multicall to speed it up a bit + session.multicall = True + for host in hosts: + session.getLastHostUpdate(host['id']) + updateList = session.multiCall() + + for host, [update] in zip(hosts, updateList): + if update is None: + host['update'] = '-' + else: + host['update'] = update.split('.')[0] + host['enabled'] = yesno(host['enabled']) + host['ready'] = yesno(host['ready']) + host['arches'] = ','.join(host['arches'].split()) + + if not options.quiet: + print "Hostname Enb Rdy Load/Cap Arches Last Update" + for host in hosts: + print "%(name)-28s %(enabled)-3s %(ready)-3s %(task_load)4.1f/%(capacity)-3.1f %(arches)-16s %(update)s" % host + +def anon_handle_list_pkgs(options, session, args): + "Print the package listing for tag or for owner" + usage = _("usage: %prog list-pkgs [options]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--owner", help=_("Specify owner")) + parser.add_option("--tag", help=_("Specify tag")) + parser.add_option("--package", help=_("Specify package")) + parser.add_option("--quiet", action="store_true", help=_("Do not print header information")) + parser.add_option("--noinherit", action="store_true", help=_("Don't follow inheritance")) + parser.add_option("--show-blocked", action="store_true", help=_("Show blocked packages")) + (options, args) = parser.parse_args(args) + if len(args) != 0: + parser.error(_("This command takes no arguments")) + assert False + activate_session(session) + opts = {} + if options.owner: + user = session.getUser(options.owner) + if user is None: + parser.error(_("Invalid user")) + assert False + opts['userID'] = user['id'] + if options.tag: + tag = session.getTag(options.tag) + if tag is None: + parser.error(_("Invalid tag")) + assert False + opts['tagID'] = tag['id'] + if options.package: + opts['pkgID'] = options.package + opts['inherited'] = not options.noinherit + opts['with_dups'] = True + data = session.listPackages(**opts) + if not data: + print "(no matching packages)" + return 1 + if not options.quiet: + if data[0][0].has_key('tag_id'): + print "%-23s %-23s %-16s %-16s" % ('Package','Tag','Extra Arches','Owner') + print "%s %s %s %s" % ('-'*23,'-'*23,'-'*16,'-'*16) + else: + print "Package" + print '-'*23 + for pkg in data: + if options.tag: + pkg = [pkg[0]] + for tagged_pkg in pkg: + if not options.show_blocked and tagged_pkg.get('blocked',False): + continue + if tagged_pkg.has_key('tag_id'): + if tagged_pkg['extra_arches'] is None: + tagged_pkg['extra_arches'] = "" + fmt = "%(package_name)-23s %(tag_name)-23s %(extra_arches)-16s %(owner_name)-16s" + if tagged_pkg.get('blocked',False): + fmt += " [BLOCKED]" + else: + fmt = "%(package_name)s" + print fmt % tagged_pkg + +def anon_handle_rpminfo(options, session, args): + "Print basic information about an RPM" + usage = _("usage: %prog rpminfo [options] [ ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("Please specify an RPM")) + assert False + activate_session(session) + for rpm in args: + info = session.getRPM(rpm) + if info is None: + print "No such rpm: %s\n" % rpm + continue + if info['epoch'] is None: + info['epoch'] = "" + else: + info['epoch'] = str(info['epoch']) + ":" + buildinfo = session.getBuild(info['build_id']) + buildinfo['name'] = buildinfo['package_name'] + buildinfo['arch'] = 'src' + if buildinfo['epoch'] is None: + buildinfo['epoch'] = "" + else: + buildinfo['epoch'] = str(buildinfo['epoch']) + ":" + print "RPM: %(epoch)s%(name)s-%(version)s-%(release)s.%(arch)s [%(id)d]" %info + print "RPM Path: %s" % os.path.join(koji.pathinfo.build(buildinfo), koji.pathinfo.rpm(info)) + print "SRPM: %(epoch)s%(name)s-%(version)s-%(release)s [%(id)d]" % buildinfo + print "SRPM Path: %s" % os.path.join(koji.pathinfo.build(buildinfo), koji.pathinfo.rpm(buildinfo)) + print "Built: %s" % time.strftime('%a, %d %b %Y %H:%M:%S %Z', time.localtime(info['buildtime'])) + print "Payload: %(payloadhash)s" %info + print "Size: %(size)s" %info + print "Build ID: %(build_id)s" %info + +def anon_handle_buildinfo(options, session, args): + "Print basic information about a build" + usage = _("usage: %prog buildinfo [options] [ ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("Please specify a build")) + assert False + activate_session(session) + for build in args: + if build.isdigit(): + build = int(build) + info = session.getBuild(build) + if info is None: + print "No such build: %s\n" % build + continue + if info['epoch'] is None: + info['epoch'] = "" + else: + info['epoch'] = str(info['epoch']) + ":" + info['name'] = info['package_name'] + info['arch'] = 'src' + info['state'] = koji.BUILD_STATES[info['state']] + rpms = session.listRPMs(buildID=info['id']) + print "BUILD: %(name)s-%(version)s-%(release)s [%(id)d]" % info + print "State: %(state)s" % info + print "Built by: %(owner_name)s" % info + print "Task: %(task_id)s" % info + print "Finished: %s" % koji.formatTimeLong(info['completion_time']) + print "RPMs:" + for rpm in rpms: + print os.path.join(koji.pathinfo.build(info), koji.pathinfo.rpm(rpm)) + print "Changelog:" + for entry in session.getChangelogEntries(info['id']): + print "* %s %s" % (time.strftime('%a %b %d %Y', time.strptime(entry['date'], '%Y-%m-%d %H:%M:%S')), entry['author']) + print entry['text'] + +def handle_add_target(options, session, args): + "[admin] Create a new build target" + usage = _("usage: %prog add-target name build-tag ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 2: + parser.error(_("Please specify a target name, a build tag, and destination tag")) + assert False + elif len(args) > 3: + parser.error(_("Incorrect number of arguments")) + assert False + name = args[0] + build_tag = args[1] + if len(args) > 2: + dest_tag = args[3] + else: + #most targets have the same name as their destination + dest_tag = name + activate_session(session) + if not session.hasPerm('admin'): + print "This action requires admin privileges" + return + session.createBuildTarget(name, build_tag, dest_tag) + +def handle_edit_target(options, session, args): + "[admin] Set the name, build_tag, and/or dest_tag of an existing build target to new values" + usage = _("usage: %prog edit-target [options] name") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--rename", help=_("Specify new name for target")) + parser.add_option("--build-tag", help=_("Specify a different build tag")) + parser.add_option("--dest-tag", help=_("Specify a different destination tag")) + + (options, args) = parser.parse_args(args) + + if len(args) != 1: + parser.error(_("Please specify a build target")) + assert False + activate_session(session) + + if not session.hasPerm('admin'): + print "This action requires admin privileges" + return + + targetInfo = session.getBuildTarget(args[0]) + if targetInfo == None: + raise koji.GenericError("No build target with the name or id '%s'" % args[0]) + + targetInfo['orig_name'] = targetInfo['name'] + + if options.rename: + targetInfo['name'] = options.rename + if options.build_tag: + targetInfo['build_tag_name'] = options.build_tag + if options.dest_tag: + targetInfo['dest_tag_name'] = options.dest_tag + + session.editBuildTarget(targetInfo['orig_name'], targetInfo['name'], targetInfo['build_tag_name'], targetInfo['dest_tag_name']) + +def handle_remove_target(options, session, args): + "[admin] Remove a build target" + usage = _("usage: %prog remove-target [options] name") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + + if len(args) != 1: + parser.error(_("Please specify a build target to remove")) + assert False + activate_session(session) + + if not session.hasPerm('admin'): + print "This action requires admin privileges" + return + + session.deleteBuildTarget(args[0]) + +def anon_handle_list_targets(options, session, args): + "List the build targets" + usage = _("usage: %prog list-targets [options]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--name", help=_("Specify the build target name")) + parser.add_option("--quiet", action="store_true", help=_("Do not print the header information")) + (options, args) = parser.parse_args(args) + if len(args) != 0: + parser.error(_("This command takes no arguments")) + assert False + activate_session(session) + + fmt = "%(name)-30s %(build_tag_name)-30s %(dest_tag_name)-30s" + if not options.quiet: + print "%-30s %-30s %-30s" % ('Name','Buildroot','Destination') + print "-" * 93 + tmp_list = [(x['name'], x) for x in session.getBuildTargets(options.name)] + tmp_list.sort() + targets = [x[1] for x in tmp_list] + for target in targets: + print fmt % target + #pprint.pprint(session.getBuildTargets()) + +def _printInheritance(tags, sibdepths=None, reverse=False): + if len(tags) == 0: + return + if sibdepths == None: + sibdepths = [] + currtag = tags[0] + tags = tags[1:] + if reverse: + siblings = len([tag for tag in tags if tag['parent_id'] == currtag['parent_id']]) + else: + siblings = len([tag for tag in tags if tag['child_id'] == currtag['child_id']]) + + outdepth = 0 + for depth in sibdepths: + if depth < currtag['currdepth']: + outspacing = depth - outdepth + sys.stdout.write(' ' * (outspacing * 3 - 1)) + sys.stdout.write(u'\u2502'.encode('UTF-8')) + outdepth = depth + + sys.stdout.write(' ' * ((currtag['currdepth'] - outdepth) * 3 - 1)) + if siblings: + sys.stdout.write(u'\u251c'.encode('UTF-8')) + else: + sys.stdout.write(u'\u2514'.encode('UTF-8')) + sys.stdout.write(u'\u2500'.encode('UTF-8')) + if reverse: + sys.stdout.write('%(name)s (%(tag_id)i)\n' % currtag) + else: + sys.stdout.write('%(name)s (%(parent_id)i)\n' % currtag) + + if siblings: + if len(sibdepths) == 0 or sibdepths[-1] != currtag['currdepth']: + sibdepths.append(currtag['currdepth']) + else: + if len(sibdepths) > 0 and sibdepths[-1] == currtag['currdepth']: + sibdepths.pop() + + _printInheritance(tags, sibdepths, reverse) + +def anon_handle_list_tag_inheritance(options, session, args): + "Print the inheritance information for a tag" + usage = _("usage: %prog list-tag-inheritance [options] ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--reverse", action="store_true", help=_("Process tag's children instead of its parents")) + parser.add_option("--stop", help=_("Stop processing inheritance at this tag")) + parser.add_option("--jump", help=_("Jump from one tag to another when processing inheritance")) + (options, args) = parser.parse_args(args) + if len(args) != 1: + parser.error(_("This command takes exctly one argument: a tag name or ID")) + assert False + activate_session(session) + tag = session.getTag(args[0]) + if not tag: + parser.error(_("Unknown tag: %s" % args[0])) + + opts = {} + opts['reverse'] = options.reverse or False + opts['stop'] = {} + opts['jump'] = {} + + if options.jump: + match = re.match(r'^(.*)/(.*)$', options.jump) + if match: + tag1 = session.getTagID(match.group(1)) + if not tag1: + parser.error(_("Unknown tag: %s" % match.group(1))) + tag2 = session.getTagID(match.group(2)) + if not tag2: + parser.error(_("Unknown tag: %s" % match.group(2))) + opts['jump'][str(tag1)] = tag2 + + if options.stop: + tag1 = session.getTagID(options.stop) + if not tag1: + parser.error(_("Unknown tag: %s" % options.stop)) + opts['stop'] = {str(tag1): 1} + + sys.stdout.write('%s (%i)\n' % (tag['name'], tag['id'])) + _printInheritance(session.getFullInheritance(tag['id'], None, opts['reverse'], opts['stop'], opts['jump']), None, opts['reverse']) + +def anon_handle_list_tags(options, session, args): + "Print the list of tags" + usage = _("usage: %prog list-tags [options] [pattern]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--show-id", action="store_true", help=_("Show tag ids")) + parser.add_option("--verbose", action="store_true", help=_("Show more information")) + parser.add_option("--unlocked", action="store_true", help=_("Only show unlocked tags")) + parser.add_option("--build", help=_("Show tags associated with a build")) + parser.add_option("--package", help=_("Show tags associated with a package")) + (options, args) = parser.parse_args(args) + #if len(args) != 0: + # parser.error(_("This command takes no arguments")) + # assert False + activate_session(session) + + pkginfo = {} + buildinfo = {} + + if options.package: + pkginfo = session.getPackage(options.package) + if not pkginfo: + parser.error(_("Invalid package %s" % options.package)) + assert False + + if options.build: + buildinfo = session.getBuild(options.build) + if not buildinfo: + parser.error(_("Invalid build %s" % options.build)) + assert False + + tags = session.listTags(buildinfo.get('id',None), pkginfo.get('id',None)) + tags.sort(lambda a,b: cmp(a['name'],b['name'])) + #if options.verbose: + # fmt = "%(name)s [%(id)i] %(perm)s %(locked)s %(arches)s" + if options.show_id: + fmt = "%(name)s [%(id)i]" + else: + fmt = "%(name)s" + for tag in tags: + if args: + for pattern in args: + if fnmatch.fnmatch(tag['name'], pattern): + break + else: + continue + if options.unlocked: + if tag['locked'] or tag['perm']: + continue + if not options.verbose: + print fmt % tag + else: + print fmt % tag, + if tag['locked']: + print ' [LOCKED]', + if tag['perm']: + print ' [%(perm)s perm required]' % tag, + print '' + +def anon_handle_list_tag_history(options, session, args): + "Print a history of tag operations" + usage = _("usage: %prog list-tag-history [options] [pattern]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--debug", action="store_true") + parser.add_option("--build", help=_("Only show data for a specific build")) + parser.add_option("--package", help=_("Only show data for a specific package")) + parser.add_option("--tag", help=_("Only show data for a specific tag")) + parser.add_option("--all", action="store_true", help=_("Allows listing the entire global history")) + (options, args) = parser.parse_args(args) + if len(args) != 0: + parser.error(_("This command takes no arguments")) + assert False + kwargs = {} + limited = False + if options.package: + kwargs['package'] = options.package + limited = True + if options.tag: + kwargs['tag'] = options.tag + limited = True + if options.build: + kwargs['build'] = options.build + limited = True + if not limited and not options.all: + parser.error(_("Please specify an option to limit the query")) + + activate_session(session) + + hist = session.tagHistory(**kwargs) + timeline = [] + for x in hist: + event_id = x['revoke_event'] + if event_id is not None: + timeline.append((event_id, x)) + event_id = x['create_event'] + timeline.append((event_id, x)) + timeline.sort() + def _histline(event_id, x): + if event_id == x['revoke_event']: + ts = x['revoke_ts'] + fmt = "Untagged %(name)s-%(version)s-%(release)s from %(tag_name)s" + elif event_id == x['create_event']: + ts = x['create_ts'] + fmt = "Tagged %(name)s-%(version)s-%(release)s with %(tag_name)s" + if x['active']: + fmt += " [still active]" + else: + raise koji.GenericError, "unknown event: (%r, %r)" % (event_id, x) + time_str = time.asctime(time.localtime(ts)) + return "%s: %s" % (time_str, fmt % x) + for event_id, x in timeline: + if options.debug: + print "%r" % x + print _histline(event_id, x) + +def _parseTaskParams(session, method, task_id): + """Parse the return of getTaskRequest()""" + params = session.getTaskRequest(task_id) + + lines = [] + + if method == 'buildFromCVS': + lines.append("CVS URL: %s" % params[0]) + lines.append("Build Target: %s" % params[1]) + elif method == 'buildSRPMFromCVS': + lines.append("CVS URL: %s" % params[0]) + elif method == 'multiArchBuild': + lines.append("SRPM: %s/work/%s" % (options.topdir, params[0])) + lines.append("Build Target: %s" % params[1]) + lines.append("Options:") + for key in params[2].keys(): + if not key == '__starstar': + lines.append(" %s: %s" % (key, params[2][key])) + elif method == 'buildArch': + lines.append("SRPM: %s/work/%s" % (options.topdir, params[0])) + lines.append("Build Tag: %s" % session.getTag(params[1])['name']) + lines.append("Build Arch: %s" % params[2]) + lines.append("SRPM Kept: %r" % params[3]) + if len(params) > 4: + for key in params[4].keys(): + if not key == '__starstar': + lines.append("%s: %s" % (key, params[4][key])) + elif method == 'tagBuild': + build = session.getBuild(params[1]) + lines.append("Destination Tag: %s" % session.getTag(params[0])['name']) + lines.append("Build: %s" % koji.buildLabel(build)) + elif method == 'buildNotification': + build = params[1] + buildTarget = params[2] + lines.append("Recipients: %s" % (", ".join(params[0]))) + lines.append("Build: %s" % koji.buildLabel(build)) + lines.append("Build Target: %s" % buildTarget['name']) + lines.append("Web URL: %s" % params[3]) + elif method == 'build': + lines.append("Source: %s" % params[0]) + lines.append("Build Target: %s" % params[1]) + for key in params[2].keys(): + if not key == '__starstar': + lines.append("%s: %s" % (key, params[2][key])) + elif method == 'runroot': + lines.append("Tag: %s" % params[0]) + lines.append("Arch: %s" % params[1]) + lines.append("Command: %s" % (' '.join(params[2]))) + if len(params) > 3: + for key in params[3].keys(): + if not key == '__starstar': + lines.append("%s: %s" % (key, params[3][key])) + elif method == 'newRepo': + tag = session.getTag(params[0]) + lines.append("Tag: %s" % tag['name']) + elif method == 'prepRepo': + lines.append("Tag: %s" % params[0]['name']) + elif method == 'createrepo': + lines.append("Repo ID: %i" % params[0]) + lines.append("Arch: %s" % params[1]) + oldrepo = params[2] + if oldrepo: + lines.append("Old Repo ID: %i" % oldrepo['id']) + lines.append("Old Repo Creation: %s" % koji.formatTimeLong(oldrepo['creation_time'])) + elif method == 'tagNotification': + destTag = session.getTag(params[2]) + srcTag = None + if params[3]: + srcTag = session.getTag(params[3]) + build = session.getBuild(params[4]) + user = session.getUser(params[5]) + + lines.append("Recipients: %s" % ", ".join(params[0])) + lines.append("Successful?: %s" % (params[1] and 'yes' or 'no')) + lines.append("Tagged Into: %s" % destTag['name']) + if srcTag: + lines.append("Moved From: %s" % srcTag['name']) + lines.append("Build: %s" % koji.buildLabel(build)) + lines.append("Tagged By: %s" % user['name']) + lines.append("Ignore Success?: %s" % (params[6] and 'yes' or 'no')) + if params[7]: + lines.append("Failure Message: %s" % params[7]) + elif method == 'dependantTask': + lines.append("Dependant Tasks: %s" % ", ".join([str(depID) for depID in params[0]])) + lines.append("Subtasks:") + for subtask in params[1]: + lines.append(" Method: %s" % subtask[0]) + lines.append(" Parameters: %s" % ", ".join([str(subparam) for subparam in subtask[1]])) + if len(subtask) > 2 and subtask[2]: + lines.append(" Options:") + subopts = subtask[2] + for key in subopts: + if not key == '__starstar': + lines.append(" %s: %s" % (key, subopts[key])) + lines.append("") + + return lines + +def _printTaskInfo(session, task_id, level=0, recurse=True, verbose=True): + """Recursive function to print information about a task + and its children.""" + + BUILDDIR = '/var/lib/mock' + indent = " "*2*level + + info = session.getTaskInfo(task_id) + if info['host_id']: + host_info = session.getHost(info['host_id']) + else: + host_info = None + buildroot_infos = session.listBuildroots(taskID=task_id) + build_info = session.listBuilds(taskID=task_id) + + files = session.listTaskOutput(task_id) + logs = [filename for filename in files if filename.endswith('.log')] + output = [filename for filename in files if not filename.endswith('.log')] + files_dir = '%s/tasks/%i' % (koji.pathinfo.work(), task_id) + + owner = session.getUser(info['owner'])['name'] + + print "%sTask: %d" % (indent, task_id) + print "%sType: %s" % (indent, info['method']) + if verbose: + print "%sRequest Parameters:" % indent + for line in _parseTaskParams(session, info['method'], task_id): + print "%s %s" % (indent, line) + print "%sOwner: %s" % (indent, owner) + print "%sState: %s" % (indent, koji.TASK_STATES[info['state']].lower()) + if host_info: + print "%sHost: %s" % (indent, host_info['name']) + if build_info: + print "%sBuild: %s (%d)" % (indent, build_info[0]['nvr'], build_info[0]['build_id']) + if buildroot_infos: + print "%sBuildroots:" % indent + for root in buildroot_infos: + print "%s %s/%s-%d-%d/" % (indent, BUILDDIR, root['tag_name'], root['id'], root['repo_id']) + if logs: + print "%sLog Files:" % indent + for log in logs: + print "%s %s/%s" % (indent, files_dir, log) + if output: + print "%sOutput:" % indent + for filename in output: + print "%s %s/%s" % (indent, files_dir, filename) + + # white space + sys.stdout.write("\n") + + if recurse: + level += 1 + children = session.getTaskChildren(task_id) + for child in children: + _printTaskInfo(session, child['id'], level, verbose=verbose) + +def anon_handle_taskinfo(options, session, args): + """Show information about a task""" + usage = _("usage: %prog taskinfo [options] task_id") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--recurse", action="store_true", help=_("Show children of this task as well")) + parser.add_option("-v", "--verbose", action="store_true", help=_("Be verbose")) + (options, args) = parser.parse_args(args) + if len(args) != 1: + parser.error(_("This command takes exctly one argument: a task ID")) + assert False + + activate_session(session) + + task_id = int(args[0]) + + _printTaskInfo(session, task_id, 0, options.recurse, options.verbose) + +def anon_handle_taginfo(options, session, args): + "Print basic information about a tag" + usage = _("usage: %prog taginfo [options] [ ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("Please specify a tag")) + assert False + activate_session(session) + perms = dict([(p['id'], p['name']) for p in session.getAllPerms()]) + for tag in args: + info = session.getTag(tag) + if info is None: + print "No such tag: %s\n" % tag + continue + print "Tag: %(name)s [%(id)d]" %info + print "Arches: %(arches)s" %info + if info.get('locked'): + print 'LOCKED' + if info.get('perm_id') is not None: + perm_id = info['perm_id'] + print "Required permission: %r" % perms.get(perm_id, perm_id) + dest_targets = session.getBuildTargets(destTagID=info['id']) + build_targets = session.getBuildTargets(buildTagID=info['id']) + repos = {} + for target in dest_targets + build_targets: + if not repos.has_key(target['build_tag']): + repo = session.getRepo(target['build_tag']) + if repo is None: + repos[target['build_tag']] = "no active repo" + else: + repos[target['build_tag']] = "repo#%(id)i: %(creation_time)s" % repo + if dest_targets: + print "Targets that build into this tag:" + for target in dest_targets: + print " %s (%s, %s)" % (target['name'], target['build_tag_name'], repos[target['build_tag']]) + if build_targets: + print "This tag is a buildroot for one or more targets" + print "Current repo: %s" % repos[target['build_tag']] + print "Targets that build from this tag:" + for target in build_targets: + print " %s" % target['name'] + print "Inheritance:" + for parent in session.getInheritanceData(tag): + flags = '' + for code,expr in ( + ('M',parent['maxdepth'] is not None), + ('F',parent['pkg_filter']), + ('I',parent['intransitive']), + ('N',parent['noconfig']),): + if expr: + flags += code + else: + flags += '.' + parent['flags'] = flags + print " %(priority)-4d %(flags)s %(name)s [%(parent_id)s]" % parent + if parent['maxdepth']: + print " maxdepth: %(maxdepth)s" % parent + if parent['pkg_filter']: + print " packge filter: %(filter)s" % parent + print + +def handle_add_tag(options, session, args): + "[admin] Add a new tag to the database" + usage = _("usage: %prog add-tag [options] name") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--parent", help=_("Specify parent")) + parser.add_option("--arches", help=_("Specify arches")) + (options, args) = parser.parse_args(args) + if len(args) != 1: + parser.error(_("Please specify a name for the tag")) + assert False + activate_session(session) + if not session.hasPerm('admin'): + print "This action requires admin privileges" + return + opts = {} + if options.parent: + opts['parent'] = options.parent + if options.arches: + opts['arches'] = ' '.join(options.arches.replace(',',' ').split()) + session.createTag(args[0],**opts) + +def handle_edit_tag(options, session, args): + "[admin] Alter tag information" + usage = _("usage: %prog edit-tag [options] name") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--arches", help=_("Specify arches")) + parser.add_option("--perm", help=_("Specify permission requirement")) + parser.add_option("--no-perm", action="store_true", help=_("Remove permission requirement")) + parser.add_option("--lock", action="store_true", help=_("Lock the tag")) + parser.add_option("--unlock", action="store_true", help=_("Unlock the tag")) + parser.add_option("--rename", help=_("Rename the tag")) + (options, args) = parser.parse_args(args) + if len(args) != 1: + parser.error(_("Please specify a name for the tag")) + assert False + activate_session(session) + tag = args[0] + opts = {} + if options.arches: + opts['arches'] = ' '.join(options.arches.replace(',',' ').split()) + if options.no_perm: + opts['perm_id'] = None + elif options.perm: + opts['perm'] = options.perm + if options.unlock: + opts['locked'] = False + if options.lock: + opts['locked'] = True + if options.rename: + opts['name'] = options.rename + #XXX change callname + session.editTag2(tag,**opts) + +def handle_lock_tag(options, session, args): + "[admin] Lock a tag" + usage = _("usage: %prog lock-tag [options] [ ...] ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--perm", help=_("Specify permission requirement")) + parser.add_option("--glob", action="store_true", help=_("Treat args as glob patterns")) + parser.add_option("--master", action="store_true", help=_("Lock the master lock")) + parser.add_option("-n", "--test", action="store_true", help=_("Test mode")) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("Please specify a tag")) + assert False + activate_session(session) + pdata = session.getAllPerms() + perms = dict([(p['id'], p['name']) for p in pdata]) + perm_ids = dict([(p['name'], p['id']) for p in pdata]) + perm = options.perm + if perm is None: + perm = 'admin' + perm_id = perm_ids[perm] + if options.glob: + selected = [] + for tag in session.listTags(): + for pattern in args: + if fnmatch.fnmatch(tag['name'], pattern): + selected.append(tag) + break + if not selected: + print _("No tags matched") + else: + selected = [session.getTag(name) for name in args] + for tag in selected: + if options.master: + #set the master lock + if tag['locked']: + print _("Tag %s: master lock already set") % tag['name'] + continue + elif options.test: + print _("Would have set master lock for: %s") % tag['name'] + continue + session.editTag2(tag['id'], locked=True) + else: + if tag['perm_id'] == perm_id: + print _("Tag %s: %s permission already required") % (tag['name'], perm) + continue + elif options.test: + print _("Would have set permission requirement %s for tag %s") % (perm, tag['name']) + continue + session.editTag2(tag['id'], perm=perm_id) + +def handle_unlock_tag(options, session, args): + "[admin] Unlock a tag" + usage = _("usage: %prog unlock-tag [options] [ ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--glob", action="store_true", help=_("Treat args as glob patterns")) + parser.add_option("-n", "--test", action="store_true", help=_("Test mode")) + (options, args) = parser.parse_args(args) + if len(args) < 1: + parser.error(_("Please specify a tag")) + assert False + activate_session(session) + if options.glob: + selected = [] + for tag in session.listTags(): + for pattern in args: + if fnmatch.fnmatch(tag['name'], pattern): + selected.append(tag) + break + if not selected: + print _("No tags matched") + else: + selected = [] + for name in args: + tag = session.getTag(name) + if tag is None: + parser.error(_("No such tag: %s") % name) + assert False + selected.append(tag) + selected = [session.getTag(name) for name in args] + for tag in selected: + opts = {} + if tag['locked']: + opts['locked'] = False + if tag['perm_id']: + opts['perm'] = None + if not opts: + print "Tag %(name)s: not locked" % tag + continue + if options.test: + print "Tag %s: skipping changes: %r" % (tag['name'], opts) + else: + session.editTag2(tag['id'], locked=False, perm_id=None) + +def handle_add_tag_inheritance(options, session, args): + """[admin] Add to a tag's inheritance""" + usage = _("usage: %prog add-tag-inheritance [options] tag parent-tag") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--priority", help=_("Specify priority")) + parser.add_option("--maxdepth", help=_("Specify max depth")) + parser.add_option("--intransitive", action="store_true", help=_("Set intransitive")) + parser.add_option("--noconfig", action="store_true", help=_("Set to packages only")) + parser.add_option("--pkg-filter", help=_("Specify the package filter")) + parser.add_option("--force", help=_("Force adding a parent to a tag that already has that parent tag")) + (options, args) = parser.parse_args(args) + + if len(args) != 2: + parser.error(_("This command takes exctly two argument: a tag name or ID and that tag's new parent name or ID")) + assert False + + activate_session(session) + + tag = session.getTag(args[0]) + if not tag: + parser.error(_("Invalid tag: %s" % args[0])) + + parent = session.getTag(args[1]) + if not parent: + parser.error(_("Invalid tag: %s" % args[1])) + + inheritanceData = session.getInheritanceData(tag['id']) + priority = options.priority and int(options.priority) or 0 + sameParents = [datum for datum in inheritanceData if datum['parent_id'] == parent['id']] + samePriority = [datum for datum in inheritanceData if datum['priority'] == priority] + + if sameParents and not options.force: + print _("Error: You are attempting to add %s as %s's parent even though it already is %s's parent." + % (parent['name'], tag['name'], tag['name'])) + print _("Please use --force if this is what you really want to do.") + return + if samePriority: + print _("Error: There is already an active inheritance with that priority on %s, please specify a different priority with --priority." % tag['name']) + return + + new_data = {} + new_data['parent_id'] = parent['id'] + new_data['priority'] = options.priority or 0 + if options.maxdepth and options.maxdepth.isdigit(): + new_data['maxdepth'] = int(options.maxdepth) + else: + new_data['maxdepth'] = None + new_data['intransitive'] = options.intransitive or False + new_data['noconfig'] = options.noconfig or False + new_data['pkg_filter'] = options.pkg_filter or '' + + inheritanceData.append(new_data) + session.setInheritanceData(tag['id'], inheritanceData) + + +def handle_edit_tag_inheritance(options, session, args): + """[admin] Edit tag inheritance""" + usage = _("usage: %prog edit-tag-inheritance [options] tag ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--priority", help=_("Specify a new priority")) + parser.add_option("--maxdepth", help=_("Specify max depth")) + parser.add_option("--intransitive", action="store_true", help=_("Set intransitive")) + parser.add_option("--noconfig", action="store_true", help=_("Set to packages only")) + parser.add_option("--pkg-filter", help=_("Specify the package filter")) + (options, args) = parser.parse_args(args) + + if len(args) < 1: + parser.error(_("This command takes at lease one argument: a tag name or ID")) + assert False + + if len(args) > 3: + parser.error(_("This command takes at most three argument: a tag name or ID, a parent tag name or ID, and a priority")) + assert False + + activate_session(session) + + tag = session.getTag(args[0]) + if not tag: + parser.error(_("Invalid tag: %s" % args[0])) + + parent = None + priority = None + if len(args) > 1: + parent = session.getTag(args[1]) + if not parent: + parser.error(_("Invalid tag: %s" % args[1])) + if len(args) > 2: + priority = args[2] + + data = session.getInheritanceData(tag['id']) + if parent and data: + data = [datum for datum in data if datum['parent_id'] == parent['id']] + if priority and data: + data = [datum for datum in data if datum['priority'] == priority] + + if len(data) == 0: + print _("No inheritance link found to remove. Please check your arguments") + return + elif len(data) > 1: + print _("Multiple matches for tag.") + if not parent: + print _("Please specify a parent on the command line.") + return + if not priority: + print _("Please specify a priority on the command line.") + return + print _("Error: Key constrainsts may be broken. Exiting.") + return + + # len(data) == 1 + data = data[0] + + inheritanceData = session.getInheritanceData(tag['id']) + samePriority = [datum for datum in inheritanceData if datum['priority'] == options.priority] + if samePriority: + print _("Error: There is already an active inheritance with that priority on %s, please specify a different priority with --priority." % tag['name']) + return + + new_data = data.copy() + if options.priority is not None and options.priority.isdigit(): + new_data['priority'] = int(options.priority) + if options.maxdepth is not None and options.maxdepth.isdigit(): + new_data['maxdepth'] = int(options.maxdepth) + if options.intransitive: + new_data['intransitive'] = options.intransitive + if options.noconfig: + new_data['noconfig'] = options.noconfig + if options.pkg_filter: + new_data['pkg_filter'] = options.pkg_filter + + # find the data we want to edit and replace it + index = inheritanceData.index(data) + inheritanceData[index] = new_data + session.setInheritanceData(tag['id'], inheritanceData) + +def handle_remove_tag_inheritance(options, session, args): + """[admin] Remove a tag inheritance link""" + usage = _("usage: %prog remove-tag-inheritance tag ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + + if len(args) < 1: + parser.error(_("This command takes at lease one argument: a tag name or ID")) + assert False + + if len(args) > 3: + parser.error(_("This command takes at most three argument: a tag name or ID, a parent tag name or ID, and a priority")) + assert False + + activate_session(session) + + tag = session.getTag(args[0]) + if not tag: + parser.error(_("Invalid tag: %s" % args[0])) + + parent = None + priority = None + if len(args) > 1: + parent = session.getTag(args[1]) + if not parent: + parser.error(_("Invalid tag: %s" % args[1])) + if len(args) > 2: + priority = args[2] + + data = session.getInheritanceData(tag['id']) + if parent and data: + data = [datum for datum in data if datum['parent_id'] == parent['id']] + if priority and data: + data = [datum for datum in data if datum['priority'] == priority] + + if len(data) == 0: + print _("No inheritance link found to remove. Please check your arguments") + return + elif len(data) > 1: + print _("Multiple matches for tag.") + if not parent: + print _("Please specify a parent on the command line.") + return + if not priority: + print _("Please specify a priority on the command line.") + return + print _("Error: Key constrainsts may be broken. Exiting.") + return + + # len(data) == 1 + data = data[0] + + inheritanceData = session.getInheritanceData(tag['id']) + + new_data = data.copy() + new_data['delete link'] = True + + # find the data we want to edit and replace it + index = inheritanceData.index(data) + inheritanceData[index] = new_data + session.setInheritanceData(tag['id'], inheritanceData) + +def anon_handle_show_groups(options, session, args): + "Show groups data for a tag" + usage = _("usage: %prog show-groups [options] ") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--comps", action="store_true", help=_("Print in comps format")) + parser.add_option("--spec", action="store_true", help=_("Print build spec")) + (options, args) = parser.parse_args(args) + if len(args) != 1: + parser.error(_("Incorrect number of arguments")) + assert False + activate_session(session) + tag = args[0] + groups = session.getTagGroups(tag) + if options.comps: + print koji.generate_comps(groups) + elif options.spec: + print koji.make_groups_spec(groups,name='buildgroups',buildgroup='build') + else: + pprint.pprint(groups) + +def handle_free_task(options, session, args): + "[admin] Free a task" + usage = _("usage: %prog free-task [options] [ ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + activate_session(session) + tlist = [] + for task_id in args: + try: + tlist.append(int(task_id)) + except ValueError: + parser.error(_("task-id must be an integer")) + assert False + for task_id in tlist: + session.freeTask(task_id) + +def handle_cancel(options, session, args): + "Cancel tasks and/or builds" + usage = _("usage: %prog cancel [options] [ ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--justone", action="store_true", help=_("Do not cancel subtasks")) + parser.add_option("--full", action="store_true", help=_("Full cancellation (admin only)")) + parser.add_option("--force", action="store_true", help=_("Allow subtasks with --full")) + (options, args) = parser.parse_args(args) + if len(args) == 0: + parser.error(_("You must specify at least one task id or build")) + assert False + activate_session(session) + tlist = [] + blist = [] + for arg in args: + try: + tlist.append(int(arg)) + except ValueError: + try: + koji.parse_NVR(arg) + blist.append(arg) + except koji.GenericError: + parser.error(_("please specify only task ids (integer) or builds (n-v-r)")) + assert False + if tlist: + opts = {} + remote_fn = session.cancelTask + if options.justone: + opts['recurse'] = False + elif options.full: + remote_fn = session.cancelTaskFull + if options.force: + opts['strict'] = False + for task_id in tlist: + remote_fn(task_id, **opts) + for build in blist: + session.cancelBuild(build) + +def handle_list_tasks(options, session, args): + "Print the list of tasks" + usage = _("usage: %prog list-tasks [options]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--mine", action="store_true", help=_("Just print your tasks")) + parser.add_option("--quiet", action="store_true", help=_("Do not display the column headers")) + (options, args) = parser.parse_args(args) + if len(args) != 0: + parser.error(_("This command takes no arguments")) + assert False + activate_session(session) + if options.mine: + id = session.getLoggedInUser()['id'] + else: + id = None + tasklist = session.taskReport(owner=id) + #tasks are pre-sorted + tasks = dict([(x['id'], x) for x in tasklist]) + #thread the tasks + if not tasklist: + print "(no tasks)" + return + for t in tasklist: + if t['parent'] is not None: + parent = tasks.get(t['parent']) + if parent: + parent.setdefault('children',[]) + parent['children'].append(t) + t['sub'] = True + seen = {} + if not options.quiet: + print_task_headers() + for t in tasklist: + if t.get('sub'): + # this subtask will appear under another task + continue + print_task_recurse(t) + +def handle_set_pkg_arches(options, session, args): + "[admin] Set the list of extra arches for a package" + usage = _("usage: %prog set-pkg-arches [options] arches tag package [package2 ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--force", action='store_true', help=_("Force operation")) + (options, args) = parser.parse_args(args) + if len(args) < 3: + parser.error(_("Please specify an archlist, a tag, and at least one package")) + assert False + activate_session(session) + arches = ' '.join(args[0].replace(',',' ').split()) + tag = args[1] + for package in args[2:]: + #really should implement multicall... + session.packageListSetArches(tag,package,arches,force=options.force) + +def handle_set_pkg_owner(options, session, args): + "[admin] Set the owner for a package" + usage = _("usage: %prog set-pkg-owner [options] owner tag package [package2 ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--force", action='store_true', help=_("Force operation")) + (options, args) = parser.parse_args(args) + if len(args) < 3: + parser.error(_("Please specify an owner, a tag, and at least one package")) + assert False + activate_session(session) + owner = args[0] + tag = args[1] + for package in args[2:]: + #really should implement multicall... + session.packageListSetOwner(tag,package,owner,force=options.force) + +def handle_set_pkg_owner_global(options, session, args): + "[admin] Set the owner for a package globally" + usage = _("usage: %prog set-pkg-owner-global [options] owner package [package2 ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--verbose", action='store_true', help=_("List changes")) + parser.add_option("--test", action='store_true', help=_("Test mode")) + (options, args) = parser.parse_args(args) + if len(args) < 2: + parser.error(_("Please specify an owner and at least one package")) + assert False + activate_session(session) + owner = args[0] + user = session.getUser(owner) + if not user: + print "No such user: %s" % owner + sys.exit(1) + for package in args[1:]: + entries = session.listPackages(pkgID=package, with_dups=True) + if not entries: + print "No data for package %s" % package + continue + elif len(entries) > 1: + # since we specified exactly one package, the list should + # only have one entry + raise koji.GenericError, "Unexpected return format" + entries = entries[0] + for entry in entries: + if user['id'] == entry['owner_id']: + if options.verbose: + print "Preserving owner=%s for package %s in tag %s" \ + % (user['name'], package, entry['tag_name'] ) + else: + if options.test: + print "Would have changed owner for %s in tag %s: %s -> %s" \ + % (package, entry['tag_name'], entry['owner_name'], user['name']) + continue + if options.verbose: + print "Changing owner for %s in tag %s: %s -> %s" \ + % (package, entry['tag_name'], entry['owner_name'], user['name']) + session.packageListSetOwner(entry['tag_id'], package, user['id']) + +def anon_handle_watch_task(options, session, args): + "Track progress of particular tasks" + usage = _("usage: %prog watch-task [options] [...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + activate_session(session) + tasks = [] + for task in args: + try: + tasks.append(int(task)) + except ValueError: + parser.error(_("task id must be an integer")) + if not tasks: + parser.error(_("at least one task id must be specified")) + + watch_tasks(session,tasks) + +def anon_handle_watch_logs(options, session, args): + "Watch logs in realtime" + usage = _("usage: %prog watch-logs [options] [...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--log", help=_("Watch only a specific log")) + (options, args) = parser.parse_args(args) + activate_session(session) + + tasks = [] + for task in args: + try: + tasks.append(int(task)) + except ValueError: + parser.error(_("task id must be an integer")) + if not tasks: + parser.error(_("at least one task id must be specified")) + + watch_logs(session, tasks, options) + +def handle_runroot(options, session, args): + "[admin] Run a command in a buildroot" + usage = _("usage: %prog runroot [options] target arch command") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.disable_interspersed_args() + parser.add_option("--package", action="append", default=[], help=_("make sure this package is in the chroot")) + parser.add_option("--mount", action="append", default=[], help=_("mount this directory read-write in the chroot")) + parser.add_option("--keep", action="store_true", default=False, + help=_("Preserve the chroot after running (for debugging)")) + parser.add_option("--skip-setarch", action="store_true", default=False, + help=_("Do not use setarch")) + parser.add_option("--use-shell", action="store_true", default=False, + help=_("Run command through a shell, otherwise uses exec")) + (options, args) = parser.parse_args(args) + if len(args) < 3: + parser.error(_("Incorrect number of arguments")) + assert False + activate_session(session) + target = args[0] + arch = args[1] + if options.use_shell: + # everything must be correctly quoted + command = ' '.join(args[2:]) + else: + command = args[2:] + task_id = session.runroot(target, arch, command, keep=options.keep, + packages=options.package, mounts=options.mount, + skip_setarch=options.skip_setarch) + try: + while True: + #wait for the task to finish + if session.taskFinished(task_id): + break + time.sleep(2) + except KeyboardInterrupt: + # this is probably the right thing to do here + print "User interrupt: canceling runroot task" + session.cancelTask(task_id) + return + output = None + try: + output = session.downloadTaskOutput(task_id, "runroot.log") + except koji.GenericError: + pass + if output: + sys.stdout.write(output) + return + +def handle_make_task(options, session, args): + "[admin] Create an arbitrary task" + usage = _("usage: %prog make-task [options] [...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--channel", help=_("set channel")) + parser.add_option("--priority", help=_("set priority")) + parser.add_option("--watch", action="store_true", help=_("watch the task")) + parser.add_option("--arch", help=_("set arch")) + (options, args) = parser.parse_args(args) + activate_session(session) + + taskopts = {} + method = args[0] + taskargs = map(arg_filter,args[1:]) + for key in ('channel','priority','arch'): + value = getattr(options,key,None) + if value is not None: + taskopts[key] = value + task_id = session.makeTask(method=args[0], + arglist=map(arg_filter,args[1:]), + opts=taskopts) + print "Created task id %d" % task_id + if options.watch: + watch_tasks(session,[task_id]) + +def handle_tag_pkg(options, session, args): + "Apply a tag to one or more packages" + usage = _("usage: %prog tag-pkg [options] [...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--force", action="store_true", help=_("force operation")) + parser.add_option("--nowait", action="store_true", help=_("Do not wait on task")) + (options, args) = parser.parse_args(args) + if len(args) < 2: + parser.error(_("This command takes at least two arguments: a tag name/ID and one or more package n-v-r's")) + assert False + activate_session(session) + tasks = [] + for pkg in args[1:]: + task_id = session.tagBuild(args[0], pkg, force=options.force) + #XXX - wait on task + tasks.append(task_id) + print "Created task %s" % task_id + if _running_in_bg() or options.nowait: + return + else: + watch_tasks(session,tasks) + +def handle_move_pkg(options, session, args): + "'Move' one or more packages between tags" + usage = _("usage: %prog move-pkg [options] [...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--force", action="store_true", help=_("force operation")) + parser.add_option("--nowait", action="store_true", help=_("do not wait on tasks")) + parser.add_option("--all", action="store_true", help=_("move all instances of a package, 's are package names")) + (options, args) = parser.parse_args(args) + if len(args) < 3: + if options.all: + parser.error(_("This command, with --all, takes at least three arguments: two tags and one or more package names")) + else: + parser.error(_("This command takes at least three arguments: two tags and one or more package n-v-r's")) + assert False + activate_session(session) + tasks = [] + builds = [] + + if options.all: + for arg in args[2:]: + pkg = session.getPackage(arg) + if not pkg: + print _("Invalid package name %s, skipping." % arg) + continue + tasklist = session.moveAllBuilds(args[0], args[1], arg, options.force) + tasks.extend(tasklist) + else: + for arg in args[2:]: + build = session.getBuild(arg) + if not build: + print _("Invalid build %s, skipping." % arg) + continue + if not build in builds: + builds.append(build) + + for build in builds: + task_id = session.moveBuild(args[0], args[1], build['id'], options.force) + tasks.append(task_id) + print "Created task %s, moving %s" % (task_id, koji.buildLabel(build)) + if _running_in_bg() or options.nowait: + return + else: + watch_tasks(session,tasks) + +def handle_untag_pkg(options, session, args): + "Remove a tag from one or more packages" + usage = _("usage: %prog untag-pkg [options] [...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--all", action="store_true", help=_("untag all versions of the package in this tag")) + parser.add_option("--force", action="store_true", help=_("force operation")) + (options, args) = parser.parse_args(args) + if len(args) < 2: + parser.error(_("This command takes at least two arguments: a tag name/ID and one or more package n-v-r's")) + assert False + activate_session(session) + if options.all: + pkgs = [] + for pkg in args[1:]: + pkgs.extend([x['nvr'] for x in session.listTagged(args[0], package=pkg)]) + else: + pkgs = args[1:] + for pkg in pkgs: + print pkg + #XXX trap errors + session.untagBuild(args[0], pkg, force=options.force) + +def handle_unblock_pkg(options, session, args): + "[admin] Unblock a package in the listing for tag" + usage = _("usage: %prog unblock-pkg [options] tag package [package2 ...]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + (options, args) = parser.parse_args(args) + if len(args) < 2: + parser.error(_("Please specify a tag and at least one package")) + assert False + activate_session(session) + tag = args[0] + for package in args[1:]: + #really should implement multicall... + session.packageListUnblock(tag,package) + +def handle_help(options, session, args): + "List available commands" + usage = _("usage: %prog help [options]") + usage += _("\n(Specify the --help global option for a list of other help options)") + parser = OptionParser(usage=usage) + parser.add_option("--admin", action="store_true", help=_("show admin commands")) + (options, args) = parser.parse_args(args) + list_commands(show_admin=options.admin) + + +def list_commands(show_admin=False): + handlers = [] + for name,value in globals().items(): + if name.startswith('handle_'): + alias = name.replace('handle_','') + alias = alias.replace('_','-') + handlers.append((alias,value)) + elif name.startswith('anon_handle_'): + alias = name.replace('anon_handle_','') + alias = alias.replace('_','-') + handlers.append((alias,value)) + handlers.sort() + print _("Available commands:") + for alias,handler in handlers: + desc = handler.__doc__ + if desc.startswith('[admin] '): + if not show_admin: + continue + desc = desc[8:] + print " %-20s %s" % (alias, desc) + print _('(Type "koji --help" for help about global options') + print _(' or "koji --help" for help about a particular command\'s options.)') + +def error(msg=None, code=1): + if msg: + sys.stderr.write(msg + "\n") + sys.stderr.flush() + sys.exit(code) + +def warn(msg): + sys.stderr.write(msg + "\n") + sys.stderr.flush() + +def activate_session(session): + """Test and login the session is applicable""" + global options + if options.noauth: + #skip authentication + pass + elif options.user: + #authenticate using user/password + session.login() + elif sys.modules.has_key('krbV'): + try: + if options.keytab and options.principal: + session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas) + else: + session.krb_login(proxyuser=options.runas) + except krbV.Krb5Error, e: + error(_("Kerberos authentication failed: '%s' (%s)") % (e.message, e.err_code)) + except socket.error, e: + warn(_("Could not connect to Kerberos authentication service: '%s'") % e.args[1]) + if not options.noauth and not session.logged_in: + error(_("Error: unable to log in")) + ensure_connection(session) + if options.debug: + print "successfully connected to hub" + +if __name__ == "__main__": + + options, command, args = get_options() + + session_opts = {} + for k in ('user', 'password', 'debug_xmlrpc', 'debug'): + session_opts[k] = getattr(options,k) + session = koji.ClientSession(options.server,session_opts) + rv = 0 + try: + rv = locals()[command].__call__(options, session, args) + if not rv: + rv = 0 + except KeyboardInterrupt: + pass + except SystemExit: + rv = 1 + except: + if options.debug: + raise + else: + exctype, value = sys.exc_info()[:2] + rv = 1 + print "%s: %s" % (exctype, value) + try: + session.logout() + except: + pass + sys.exit(rv) diff --git a/cli/koji.conf b/cli/koji.conf new file mode 100644 index 00000000..de4b4f4d --- /dev/null +++ b/cli/koji.conf @@ -0,0 +1,13 @@ +[koji] + +;configuration for koji cli tool + +;url of XMLRPC server +;server = http://hub.example.com/kojihub + +;url of web interface +;weburl = http://www.example.com/koji + +;path to the koji top directory +;topdir = /mnt/koji + diff --git a/docs/HOWTO.html b/docs/HOWTO.html new file mode 100644 index 00000000..2abb3e04 --- /dev/null +++ b/docs/HOWTO.html @@ -0,0 +1,321 @@ + + + Koji HOWTO + + +

Introduction

+ + Koji is a system for building and tracking RPMs. It was designed with the following + features in mind: + +

+ Security +

    +
  • New buildroot for each build
  • +
  • nfs is used (mostly) read-only
  • +
+ + Leverage other software +
    +
  • Uses Yum and Mock open-source components
  • +
  • XML-RPC APIs for easy integration with other tools
  • +
+ + Flexibility +
    +
  • rich data model
  • +
  • active code base
  • +
+ + Usability +
    +
  • Web interface with Kerberos authentication
  • +
  • Thin, portable client
  • +
  • Users can create local buildroots
  • +
+ + Reproducibility +
    +
  • Buildroot contents are tracked in the database
  • +
  • Versioned data
  • +
+ + +

This HOWTO document covers the basic tasks that a developer needs to be + able to accomplish with Koji. +

+ +

Getting started

+ +

The web interface

+

The primary interface for viewing Koji data is a web application. Most of the interface + is read-only, but if you are logged in (see below) and have sufficient privileges there + are some actions that can be performed though the web. For example: +

    +
  • Cancel a build
  • +
  • Resubmit a failed task
  • +
+ Those with admin privileges will find additional actions, such as: +
    +
  • Create/Edit/Delete a tag
  • +
  • Create/Edit/Delete a target
  • +
  • Enable/Disable a build host
  • +
+ +

The web site utilizes Kerberos authentication. In order to log in you will + need a valid Kerberos ticket and your web browser will need to be configured to send the + Kerberos information to the server. + +

In Firefox or Mozilla, you will need to use the about:config page to set a few parameters. + Use the search term 'negotiate' to filter the list. Change + network.negotiate-auth.trusted-uris to the domain you want to authenticate against, + e.g .example.com. You can leave network.negotiate-auth.delegation-uris blank, as it + enables Kerberos ticket passing, which is not required. If you do not see those two + config options listed, your version of Firefox or Mozilla may be too old to support + Negotiate authentication, and you should consider upgrading. + +

In order to obtain a Kerberos ticket, use the kinit command. + + +

Installing the Koji cli

+

There is a single point of entry for most operations. The command is + called 'koji' and is included in the main koji package. + +

Repos/webpage TBD + +

+ The koji tool authenticates to the central server using Kerberos, so you will need + to have a valid Kerberos ticket to use many features. However, many of the read-only + commands will work without authentication. + +

Building a package

+

Builds are initiated with the command line tool. + To build a package, the syntax is:

+
$ koji build <build target> <cvs URL>
+ +

For example:

+
$ koji build dist-fc7-scratch 'cvs://cvs.example.com/cvs/dist?rpms/kernel/FC-7#kernel-2_6_20-1_2925_fc7'
+

+ The koji build command creates a build task in Koji. By default + the tool will wait + and print status updates until the build completes. You can override this with + the --nowait option. To view other options to the build command use the + --help option. +

+ +
$ koji build --help
+
+ +

Build Options

+

+ There are a few options to the build command. Here are some more detailed explanations + of them: +

+ +
+
--skip-tag
+
Normally the package is tagged after the build completes. This option causes + the tagging step to be skipped. The package will be in the system, but untagged + (you can later tag it with the tag-pkg command)
+
--scratch
+
This makes the build into a scratch build. The build will not be + imported into the db, it will just be built. The rpms will land under + <topdir>/scratch. Scratch builds are not tracked and can never + be tagged, but can be convenient for testing. Scratch builds are + typically removed from the filesystem after one week. +
+
--nowait
+
As stated above, this prevents the cli from waiting on the build task.
+
--arch-override
+
This option allows you to override the base set of arches to build for. + This option is really only for testing during the beta period, but it may + be retained for scratch builds in the future.
+
+ +

Build Failures

+

If your package fails to build, you will see something like this.

+
+      420066 buildArch (kernel-2.6.18-1.2739.10.9.el5.jjf.215394.2.src.rpm,
+      ia64): open (build-1.example.com) -> FAILED: BuildrootError:
+      error building package (arch ia64), mock exited with status 10
+    
+ +

You can figure out why the build failed by looking at the log files. If + there is a build.log, start there. Otherwise, look at init.log

+ +
+      $ ls -1 <topdir>/work/tasks/420066/*
+      <topdir>/work/tasks/420066/build.log
+      <topdir>/work/tasks/420066/init.log
+      <topdir>/work/tasks/420066/mockconfig.log
+      <topdir>/work/tasks/420066/root.log
+    
+ +

Filing Bugs

+ +

bug tracking TBD + +

Koji Architecture

+ +

Terminology

+ + In Koji, it is sometimes necessary to distinguish between the a package in general, + a specific build of a package, and the various rpm files created by a build. When + precision is needed, these terms should be interpreted as follows: + +
+
Package
+
The name of a source rpm. This refers to the package in general and not + any particular build or subpackage. For example: kernel, glibc, etc.
+
Build
+
A particular build of a package. This refers to the entire build: all arches + and subpackages. For example: kernel-2.6.9-34.EL, glibc-2.3.4-2.19.
+
RPM
+
A particular rpm. A specific arch and subpackage of a build. + For example: kernel-2.6.9-34.EL.x86_64, kernel-devel-2.6.9-34.EL.s390, + glibc-2.3.4-2.19.i686, glibc-common-2.3.4-2.19.ia64
+
+ + +

Koji Components

+ + Koji is comprised of several components: + +
    +
  • koji-hub is the center of all Koji operations. It is an XML-RPC server + running under mod_python in Apache. koji-hub is passive in that it only + receives XML-RPC calls and relies upon the build daemons and other + components to initiate communication. koji-hub is the only component that + has direct access to the database and is one of the two components that have + write access to the file system.
  • + +
  • kojid is the build daemon that runs on each of the build machines. Its + primary responsibility is polling for incoming build requests and handling + them accordingly. Koji also has support for tasks other than building. + Creating install images is one example. kojid is responsible for handling + these tasks as well. + +

    kojid uses mock for building. It also creates a fresh buildroot for + every build. kojid is written in Python and communicates with koji-hub via + XML-RPC.

  • + +
  • koji-web is a set of scripts that run in mod_python and use the Cheetah + templating engine to provide an web interface to Koji. koji-web exposes a + lot of information and also provides a means for certain operations, such as + cancelling builds.
  • + +
  • koji is a CLI written in Python that provides many hooks into + Koji. It allows the user to query much of the data as well as perform + actions such as build initiation.
  • + +
  • kojirepod is a daemon that keeps the build root repodata + updated.
  • + +
+ +

Package Organization

+

Tags and Targets

+

Koji organizes packages using tags. In Koji a tag is roughly analogous to + a beehive collection instance, but differ in a number of ways:

+
    +
  • Tags are tracked in the database but not on disk
  • +
  • Tags support multiple inheritance
  • +
  • Each tag has its own list of valid packages (inheritable)
  • +
  • Package ownership can be set per-tag (inheritable)
  • +
  • Tag inheritance is more configurable
  • +
  • When you build you specify a target rather than a tag
  • +
+

+ A build target specifies where a package should be built and how it + should be tagged afterwards. This allows target names to remain fixed + as tags change through releases. You can get a full list of build targets + with the following command:

+
$ koji list-targets
+
+ You can see just a single target with the --name option: +
$ koji list-targets --name dist-fc7
+Name                           Buildroot                      Destination
+---------------------------------------------------------------------------------------------
+dist-fc7                       dist-fc7-build                 dist-fc7
+
+ This tells you a build for target dist-fc7 will use a buildroot with packages + from the tag dist-fc7-build and tag the resulting packages as dist-fc7. +

+ You can get a list of tags with the following command:

+
$ koji list-tags
+
+

Package lists

+

+ As mentioned above, each tag has its own list of packages that may be placed + in the tag. To see that list for a tag, use the list-pkgs command:

+
$ koji list-pkgs --tag dist-fc7
+Package                 Tag                     Extra Arches     Owner
+----------------------- ----------------------- ---------------- ----------------
+ElectricFence           dist-fc6                                 pmachata
+GConf2                  dist-fc6                                 rstrode
+lucene                  dist-fc6                                 dbhole
+lvm2                    dist-fc6                                 lvm-team
+ImageMagick             dist-fc6                                 nmurray
+m17n-db                 dist-fc6                                 majain
+m17n-lib                dist-fc6                                 majain
+MAKEDEV                 dist-fc6                                 clumens
+...
+
+ The first column is the name of the package, the second tells you which tag + the package entry has been inherited from, and the third tells you the owner + of the package. +

Latest Builds

+

+ To see the latest builds for a tag, use the latest-pkg command:

+
$ koji latest-pkg --all dist-fc7
+Build                                     Tag                   Built by
+----------------------------------------  --------------------  ----------------
+ConsoleKit-0.1.0-5.fc7                    dist-fc7              davidz
+ElectricFence-2.2.2-20.2.2                dist-fc6              jkeating
+GConf2-2.16.0-6.fc7                       dist-fc7              mclasen
+ImageMagick-6.2.8.0-3.fc6.1               dist-fc6-updates      nmurray
+MAKEDEV-3.23-1.2                          dist-fc6              nalin
+MySQL-python-1.2.1_p2-2                   dist-fc7              katzj
+NetworkManager-0.6.5-0.3.cvs20061025.fc7  dist-fc7              caillon
+ORBit2-2.14.6-1.fc7                       dist-fc7              mclasen
+
+ The output gives you not only the latest builds, but which tag they have + been inherited from and who built them (note: for builds imported from beehive + the "built by" field may be misleading) + + +

Exploring Koji

+ +

We've tried to make Koji self-documenting wherever possible. The command + line tool will print a list of valid commands and each command supports + --help. For example:

+ +
+$ koji help
+Koji commands are:
+        build                Build a package from source
+        cancel-task          Cancel a task
+        help                 List available commands
+        latest-build         Print the latest rpms for a tag
+        latest-pkg           Print the latest builds for a tag
+...
+$ koji build --help
+usage: koji build [options] tag URL
+(Specify the --help global option for a list of other help options)
+
+options:
+  -h, --help            show this help message and exit
+  --skip-tag            Do not attempt to tag package
+  --scratch             Perform a scratch build
+  --nowait              Don't wait on build
+...
+
+ +

Getting Involved

+ + If you would like to be more involved with the Koji project... + +

Project data TBD + + + diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..43d8d68c --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,4 @@ +install: + +clean: + rm -f *.o *.so *.pyc *~ diff --git a/docs/schema.sql b/docs/schema.sql new file mode 100644 index 00000000..02fe02ba --- /dev/null +++ b/docs/schema.sql @@ -0,0 +1,607 @@ + +-- vim:noet:sw=8 +-- still needs work +DROP TABLE build_notifications; + +DROP TABLE log_messages; + +DROP TABLE buildroot_listing; + +DROP TABLE rpmfiles; +DROP TABLE rpmdeps; +DROP TABLE rpminfo; + +DROP TABLE group_package_listing; +DROP TABLE group_req_listing; +DROP TABLE group_config; +DROP TABLE groups; + +DROP TABLE tag_listing; +DROP TABLE tag_packages; + +DROP TABLE buildroot; +DROP TABLE repo; + +DROP TABLE build_target_config; +DROP TABLE build_target; + +DROP TABLE tag_config; +DROP TABLE tag_inheritance; +DROP TABLE tag; + +DROP TABLE build; + +DROP TABLE task; + +DROP TABLE host_channels; +DROP TABLE host; + +DROP TABLE channels; +DROP TABLE package; + +DROP TABLE user_groups; +DROP TABLE user_perms; +DROP TABLE permissions; + +DROP TABLE sessions; +DROP TABLE users; + +DROP TABLE event_labels; +DROP TABLE events; +DROP FUNCTION get_event(); +DROP FUNCTION get_event_time(INTEGER); + +BEGIN WORK; + +-- We use the events table to sequence time +-- in the event that the system clock rolls back, event_ids will retain proper sequencing +CREATE TABLE events ( + id SERIAL NOT NULL PRIMARY KEY, + time TIMESTAMP NOT NULL DEFAULT NOW() +) WITHOUT OIDS; + +-- A function that creates an event and returns the id, used as DEFAULT value for versioned tables +CREATE FUNCTION get_event() RETURNS INTEGER AS ' + INSERT INTO events (time) VALUES (''now''); + SELECT currval(''events_id_seq'')::INTEGER; +' LANGUAGE SQL; + +-- A convenience function for converting events to timestamps, useful for +-- quick queries where you want to avoid JOINs. +CREATE FUNCTION get_event_time(INTEGER) RETURNS TIMESTAMP AS ' + SELECT time FROM events WHERE id=$1; +' LANGUAGE SQL; + +-- this table is used to label events +-- most events will be unlabeled, so keeping this separate saves space +CREATE TABLE event_labels ( + event_id INTEGER NOT NULL REFERENCES events(id), + label VARCHAR(255) UNIQUE NOT NULL +) WITHOUT OIDS; + + +-- User and session data +CREATE TABLE users ( + id SERIAL NOT NULL PRIMARY KEY, + name VARCHAR(255) UNIQUE NOT NULL, + password VARCHAR(255), + status INTEGER, + usertype INTEGER, + krb_principal VARCHAR(255) UNIQUE +) WITHOUT OIDS; + +CREATE TABLE permissions ( + id SERIAL NOT NULL PRIMARY KEY, + name VARCHAR(50) UNIQUE NOT NULL +) WITHOUT OIDS; + +-- Some basic perms +INSERT INTO permissions (name) VALUES ('admin'); +INSERT INTO permissions (name) VALUES ('build'); +INSERT INTO permissions (name) VALUES ('repo'); +INSERT INTO permissions (name) VALUES ('runroot'); + +CREATE TABLE user_perms ( + user_id INTEGER NOT NULL REFERENCES users(id), + perm_id INTEGER NOT NULL REFERENCES permissions(id), +-- versioned - see VERSIONING + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, user_id, perm_id), + UNIQUE (user_id,perm_id,active) +) WITHOUT OIDS; + +-- groups are represented as users w/ usertype=2 +CREATE TABLE user_groups ( + user_id INTEGER NOT NULL REFERENCES users(id), + group_id INTEGER NOT NULL REFERENCES users(id), +-- versioned - see VERSIONING + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, user_id, group_id), + UNIQUE (user_id,group_id,active) +) WITHOUT OIDS; + +-- a session can create subsessions, which are just new sessions whose +-- 'master' field points back to the session. This field should +-- always point to the top session. If the master session is expired, +-- the all its subsessions should be expired as well. +-- If a session is exclusive, it is the only session allowed for its +-- user. The 'exclusive' field is either NULL or TRUE, never FALSE. This +-- is so exclusivity can be enforced with a unique condition. +CREATE TABLE sessions ( + id SERIAL NOT NULL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES users(id), + expired BOOLEAN NOT NULL DEFAULT FALSE, + master INTEGER REFERENCES sessions(id), + key VARCHAR(255), + authtype INTEGER, + hostip VARCHAR(255), + callnum INTEGER, + start_time TIMESTAMP NOT NULL DEFAULT NOW(), + update_time TIMESTAMP NOT NULL DEFAULT NOW(), + exclusive BOOLEAN CHECK (exclusive), + CONSTRAINT no_exclusive_subsessions CHECK ( + master IS NULL OR "exclusive" IS NULL), + CONSTRAINT exclusive_expired_sane CHECK ( + expired IS FALSE OR "exclusive" IS NULL), + UNIQUE (user_id,exclusive) +) WITHOUT OIDS; +CREATE INDEX sessions_master ON sessions(master); +CREATE INDEX sessions_active_and_recent ON sessions(expired, master, update_time) WHERE (expired IS NOT TRUE AND master IS NULL); + +-- Channels are used to limit which tasks are run on which machines. +-- Each task is assigned to a channel and each host 'listens' on one +-- or more channels. A host will only accept tasks for channels it is +-- listening to. +CREATE TABLE channels ( + id SERIAL NOT NULL PRIMARY KEY, + name VARCHAR(128) UNIQUE NOT NULL +) WITHOUT OIDS; + +-- create default channel +INSERT INTO channels (name) VALUES ('default'); +INSERT INTO channels (name) VALUES ('runroot'); + +-- Here we track the build machines +-- each host has an entry in the users table also +-- capacity: the hosts weighted task capacity +CREATE TABLE host ( + id SERIAL NOT NULL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES users (id), + name VARCHAR(128) UNIQUE NOT NULL, + arches TEXT, + task_load FLOAT CHECK (NOT task_load < 0) NOT NULL DEFAULT 0.0, + capacity FLOAT CHECK (capacity > 1) NOT NULL DEFAULT 2.0, + ready BOOLEAN NOT NULL DEFAULT 'false', + enabled BOOLEAN NOT NULL DEFAULT 'true' +) WITHOUT OIDS; +CREATE INDEX HOST_IS_READY_AND_ENABLED ON host(enabled, ready) WHERE (enabled IS TRUE AND ready IS TRUE); + +CREATE TABLE host_channels ( + host_id INTEGER NOT NULL REFERENCES host(id), + channel_id INTEGER NOT NULL REFERENCES channels(id), + UNIQUE (host_id,channel_id) +) WITHOUT OIDS; + + +-- tasks are pretty general and may refer to all sorts of jobs, not +-- just package builds. +-- tasks may spawn subtasks (hence the parent field) +-- top-level tasks have NULL parent +-- the request and result fields are base64-encoded xmlrpc data. +-- this means each task is effectively an xmlrpc call, using this table as +-- the medium. +-- the host_id field indicates which host is running the task. This field +-- is used to lock the task. +-- weight: the weight of the task (vs. host capacity) +-- label: this field is used to label subtasks. top-level tasks will not +-- have a label. some subtasks may be unlabeled. labels are used in task +-- failover to prevent duplication of work. +CREATE TABLE task ( + id SERIAL NOT NULL PRIMARY KEY, + state INTEGER, + create_time TIMESTAMP NOT NULL DEFAULT NOW(), + completion_time TIMESTAMP, + channel_id INTEGER NOT NULL REFERENCES channels(id), + host_id INTEGER REFERENCES host (id), + parent INTEGER REFERENCES task (id), + label VARCHAR(255), + waiting BOOLEAN, + awaited BOOLEAN, + owner INTEGER REFERENCES users(id) NOT NULL, + method TEXT, + request TEXT, + result TEXT, + eta INTEGER, + arch VARCHAR(16) NOT NULL, + priority INTEGER, + weight FLOAT CHECK (NOT weight < 0) NOT NULL DEFAULT 1.0, + CONSTRAINT parent_label_sane CHECK ( + parent IS NOT NULL OR label IS NULL), + UNIQUE (parent,label) +) WITHOUT OIDS; + +CREATE INDEX task_by_state ON task (state); +-- CREATE INDEX task_by_parent ON task (parent); (unique condition creates similar index) +CREATE INDEX task_by_host ON task (host_id); + + +-- by package, we mean srpm +-- we mean the package in general, not an individual build +CREATE TABLE package ( + id SERIAL NOT NULL PRIMARY KEY, + name TEXT UNIQUE NOT NULL +) WITHOUT OIDS; + +-- CREATE INDEX package_by_name ON package (name); +-- (implicitly created by unique constraint) + + +-- here we track the built packages +-- this is at the srpm level, since builds are by srpm +-- see rpminfo for isolated packages +-- even though we track epoch, we demand that N-V-R be unique +-- task_id: a reference to the task creating the build, may be +-- null, or may point to a deleted task. +CREATE TABLE build ( + id SERIAL NOT NULL PRIMARY KEY, + pkg_id INTEGER NOT NULL REFERENCES package (id) DEFERRABLE, + version TEXT NOT NULL, + release TEXT NOT NULL, + epoch INTEGER, + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + completion_time TIMESTAMP, + state INTEGER NOT NULL, + task_id INTEGER REFERENCES task (id), + owner INTEGER NOT NULL REFERENCES users (id), + CONSTRAINT build_pkg_ver_rel UNIQUE (pkg_id, version, release), + CONSTRAINT completion_sane CHECK ((state = 0 AND completion_time IS NULL) OR + (state != 0 AND completion_time IS NOT NULL)) +) WITHOUT OIDS; + +CREATE INDEX build_by_pkg_id ON build (pkg_id); +CREATE INDEX build_completion ON build(completion_time); + +CREATE TABLE changelogs ( + id SERIAL NOT NULL PRIMARY KEY, + build_id INTEGER NOT NULL REFERENCES build (id), + date TIMESTAMP NOT NULL, + author TEXT NOT NULL, + text TEXT +) WITHOUT OIDS; + +CREATE INDEX changelogs_by_date on changelogs (date); +CREATE INDEX changelogs_by_build on changelogs (build_id); + +-- Note: some of these CREATEs may seem a little out of order. This is done to keep +-- the references sane. + +CREATE TABLE tag ( + id SERIAL NOT NULL PRIMARY KEY, + name VARCHAR(50) UNIQUE NOT NULL +) WITHOUT OIDS; + +-- CREATE INDEX tag_by_name ON tag (name); +-- (implicitly created by unique constraint) + + +-- VERSIONING +-- Several tables are versioned with the following scheme. Since this +-- is the first, here is the explanation of how it works. +-- The versioning fields are: create_event, revoke_event, and active +-- The active field is either True or NULL, it is never False! +-- The create_event and revoke_event fields refer to the event table +-- A version is active if active is not NULL +-- (an active version also has NULL revoke_event.) +-- A UNIQUE condition can incorporate the 'active' field, making it +-- apply only to the active versions. +-- When a version is made inactive (revoked): +-- revoke_event is set +-- active is set to NULL +-- Query for current data with WHERE active is not NULL +-- (should be same as WHERE revoke_event is NULL) +-- Query for data at event e with WHERE create_event <= e AND e < revoke_event +CREATE TABLE tag_inheritance ( + tag_id INTEGER NOT NULL REFERENCES tag(id), + parent_id INTEGER NOT NULL REFERENCES tag(id), + priority INTEGER NOT NULL, + maxdepth INTEGER, + intransitive BOOLEAN NOT NULL DEFAULT 'false', + noconfig BOOLEAN NOT NULL DEFAULT 'false', + pkg_filter TEXT, +-- versioned - see desc above + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, tag_id, priority), + UNIQUE (tag_id,priority,active), + UNIQUE (tag_id,parent_id,active) +) WITHOUT OIDS; + +CREATE INDEX tag_inheritance_by_parent ON tag_inheritance (parent_id); + +-- XXX - need more config options listed here +-- perm_id: the permission that is required to apply the tag. can be NULL +-- +CREATE TABLE tag_config ( + tag_id INTEGER NOT NULL REFERENCES tag(id), + arches TEXT, + perm_id INTEGER REFERENCES permissions(id), + locked BOOLEAN NOT NULL DEFAULT 'false', +-- versioned - see desc above + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, tag_id), + UNIQUE (tag_id,active) +) WITHOUT OIDS; + + +-- a build target tells the system where to build the package +-- and how to tag it afterwards. +CREATE TABLE build_target ( + id SERIAL NOT NULL PRIMARY KEY, + name VARCHAR(50) UNIQUE NOT NULL +) WITHOUT OIDS; + + +CREATE TABLE build_target_config ( + build_target_id INTEGER NOT NULL REFERENCES build_target(id), + build_tag INTEGER NOT NULL REFERENCES tag(id), + dest_tag INTEGER NOT NULL REFERENCES tag(id), +-- versioned - see desc above + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, build_target_id), + UNIQUE (build_target_id,active) +) WITHOUT OIDS; + + +-- track repos +CREATE TABLE repo ( + id SERIAL NOT NULL PRIMARY KEY, + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + tag_id INTEGER NOT NULL REFERENCES tag(id), + state INTEGER +) WITHOUT OIDS; + + +-- here we track the buildroots on the machines +CREATE TABLE buildroot ( + id SERIAL NOT NULL PRIMARY KEY, + host_id INTEGER NOT NULL REFERENCES host(id), + repo_id INTEGER NOT NULL REFERENCES repo (id), + arch VARCHAR(16) NOT NULL, + task_id INTEGER REFERENCES task (id), + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + retire_event INTEGER, + state INTEGER, + dirtyness INTEGER +) WITHOUT OIDS; + +-- this table associates tags with builds. an entry here tags a package +CREATE TABLE tag_listing ( + build_id INTEGER NOT NULL REFERENCES build (id), + tag_id INTEGER NOT NULL REFERENCES tag (id), +-- versioned - see earlier description of versioning + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, build_id, tag_id), + UNIQUE (build_id,tag_id,active) +) WITHOUT OIDS; +CREATE INDEX tag_listing_tag_id_key ON tag_listing(tag_id); + +-- this is a per-tag list of packages, with some extra info +-- so this allows you to explicitly state which packages belong where +-- (as opposed to beehive where this can only be done at the collection level) +-- these are packages in general, not specific builds. +-- this list limits which builds can be tagged with which tags +-- if blocked is true, then the package is specifically not included. this +-- prevents the package from being included via inheritance +CREATE TABLE tag_packages ( + package_id INTEGER NOT NULL REFERENCES package (id), + tag_id INTEGER NOT NULL REFERENCES tag (id), + owner INTEGER NOT NULL REFERENCES users(id), + blocked BOOLEAN NOT NULL DEFAULT FALSE, + extra_arches TEXT, +-- versioned - see earlier description of versioning + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, package_id, tag_id), + UNIQUE (package_id,tag_id,active) +) WITHOUT OIDS; + +-- package groups (per tag). used for generating comps for the tag repos +CREATE TABLE groups ( + id SERIAL NOT NULL PRIMARY KEY, + name VARCHAR(50) UNIQUE NOT NULL + -- corresponds to the id field in a comps group +) WITHOUT OIDS; + +-- if blocked is true, then the group is specifically not included. this +-- prevents the group from being included via inheritance +CREATE TABLE group_config ( + group_id INTEGER NOT NULL REFERENCES groups (id), + tag_id INTEGER NOT NULL REFERENCES tag (id), + blocked BOOLEAN NOT NULL DEFAULT FALSE, + exported BOOLEAN DEFAULT TRUE, + display_name TEXT NOT NULL, + is_default BOOLEAN, + uservisible BOOLEAN, + description TEXT, + langonly TEXT, + biarchonly BOOLEAN, +-- versioned - see earlier description of versioning + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, group_id, tag_id), + UNIQUE (group_id,tag_id,active) +) WITHOUT OIDS; + +CREATE TABLE group_req_listing ( + group_id INTEGER NOT NULL REFERENCES groups (id), + tag_id INTEGER NOT NULL REFERENCES tag (id), + req_id INTEGER NOT NULL REFERENCES groups (id), + blocked BOOLEAN NOT NULL DEFAULT FALSE, + type VARCHAR(25), + is_metapkg BOOLEAN NOT NULL DEFAULT FALSE, +-- versioned - see earlier description of versioning + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, group_id, tag_id, req_id), + UNIQUE (group_id,tag_id,req_id,active) +) WITHOUT OIDS; + +-- if blocked is true, then the package is specifically not included. this +-- prevents the package from being included in the group via inheritance +-- package refers to an rpm name, not necessarily an srpm name (so it does +-- not reference the package table). +CREATE TABLE group_package_listing ( + group_id INTEGER NOT NULL REFERENCES groups (id), + tag_id INTEGER NOT NULL REFERENCES tag (id), + package TEXT, + blocked BOOLEAN NOT NULL DEFAULT FALSE, + type VARCHAR(25) NOT NULL, + basearchonly BOOLEAN, + requires TEXT, +-- versioned - see earlier description of versioning + create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(), + revoke_event INTEGER REFERENCES events(id), + active BOOLEAN DEFAULT 'true' CHECK (active), + CONSTRAINT active_revoke_sane CHECK ( + (active IS NULL AND revoke_event IS NOT NULL ) + OR (active IS NOT NULL AND revoke_event IS NULL )), + PRIMARY KEY (create_event, group_id, tag_id, package), + UNIQUE (group_id,tag_id,package,active) +) WITHOUT OIDS; + +-- rpminfo tracks individual rpms (incl srpms) +-- buildroot_id can be NULL (for externally built packages) +-- even though we track epoch, we demand that N-V-R.A be unique +-- we don't store filename b/c filename should be N-V-R.A.rpm +CREATE TABLE rpminfo ( + id SERIAL NOT NULL PRIMARY KEY, + build_id INTEGER REFERENCES build (id), + buildroot_id INTEGER REFERENCES buildroot (id), + name TEXT NOT NULL, + version TEXT NOT NULL, + release TEXT NOT NULL, + epoch INTEGER, + arch VARCHAR(16) NOT NULL, + payloadhash TEXT NOT NULL, + size INTEGER NOT NULL, + buildtime BIGINT NOT NULL, + CONSTRAINT rpminfo_unique_nvra UNIQUE (name,version,release,arch) +) WITHOUT OIDS; +CREATE INDEX rpminfo_build ON rpminfo(build_id); + +-- sighash is the checksum of the signature header +CREATE TABLE rpmsigs ( + rpm_id INTEGER NOT NULL REFERENCES rpminfo (id), + sigkey TEXT NOT NULL, + sighash TEXT NOT NULL, + CONSTRAINT rpmsigs_no_resign UNIQUE (rpm_id, sigkey) +) WITHOUT OIDS; + +-- buildroot_listing needs to be created after rpminfo so it can reference it +CREATE TABLE buildroot_listing ( + buildroot_id INTEGER NOT NULL REFERENCES buildroot(id), + rpm_id INTEGER NOT NULL REFERENCES rpminfo(id), + is_update BOOLEAN NOT NULL DEFAULT FALSE, + UNIQUE (buildroot_id,rpm_id) +) WITHOUT OIDS; +CREATE INDEX buildroot_listing_rpms ON buildroot_listing(rpm_id); + +-- this table holds the requires, provides, obsoletes, and conflicts +-- for an rpminfo entry +CREATE TABLE rpmdeps ( + pkey SERIAL NOT NULL PRIMARY KEY, + rpm_id INTEGER NOT NULL REFERENCES rpminfo (id), + dep_name TEXT NOT NULL, + dep_version TEXT, + dep_flags INTEGER, + dep_type INTEGER NOT NULL +) WITHOUT OIDS; + +CREATE INDEX rpmdeps_by_rpm_id ON rpmdeps (rpm_id); +CREATE INDEX rpmdeps_by_depssolve ON rpmdeps (dep_type, dep_name, dep_flags, dep_version); + +CREATE TABLE rpmfiles ( + rpm_id INTEGER NOT NULL REFERENCES rpminfo (id), + filename TEXT NOT NULL, + filemd5 VARCHAR(32) NOT NULL, + filesize INTEGER NOT NULL, + fileflags INTEGER NOT NULL, + PRIMARY KEY (filename, rpm_id) +) WITHOUT OIDS; + +CREATE INDEX rpmfiles_by_rpm_id ON rpmfiles (rpm_id); +CREATE INDEX rpmfiles_by_filename ON rpmfiles (filename); + +CREATE TABLE log_messages ( + id SERIAL NOT NULL PRIMARY KEY, + message TEXT NOT NULL, + message_time TIMESTAMP NOT NULL DEFAULT NOW(), + logger_name VARCHAR(200) NOT NULL, + level VARCHAR(10) NOT NULL, + location VARCHAR(200), + host VARCHAR(200) +) WITHOUT OIDS; + +CREATE TABLE build_notifications ( + id SERIAL NOT NULL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES users (id), + package_id INTEGER REFERENCES package (id), + tag_id INTEGER REFERENCES tag (id), + success_only BOOLEAN NOT NULL DEFAULT FALSE, + email TEXT NOT NULL +) WITHOUT OIDS; + +GRANT SELECT ON build, package, task, tag, +tag_listing, tag_config, tag_inheritance, tag_packages, +rpminfo, rpmdeps, +rpmfiles TO PUBLIC; + +-- example code to add initial admins +-- insert into users (name, usertype, krb_principal) values ('admin', 0, 'admin@EXAMPLE.COM'); +-- insert into user_perms (user_id, perm_id) +-- select users.id, permissions.id from users, permissions +-- where users.name in ('admin') +-- and permissions.name = 'admin'; + +COMMIT WORK; diff --git a/hub/.cvsignore b/hub/.cvsignore new file mode 100644 index 00000000..0d20b648 --- /dev/null +++ b/hub/.cvsignore @@ -0,0 +1 @@ +*.pyc diff --git a/hub/Makefile b/hub/Makefile new file mode 100644 index 00000000..8d8b36d5 --- /dev/null +++ b/hub/Makefile @@ -0,0 +1,33 @@ +PYTHON=python +PACKAGE = $(shell basename `pwd`) +PYFILES = $(wildcard *.py) +PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)') +PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix') +PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER) +PKGDIR = $(PYLIBDIR)/site-packages/$(PACKAGE) + +SERVERDIR = /var/www/koji-hub +PYFILES = $(wildcard *.py) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/etc/httpd/conf.d + install -m 644 httpd.conf $(DESTDIR)/etc/httpd/conf.d/kojihub.conf + + mkdir -p $(DESTDIR)/$(SERVERDIR) + for p in $(PYFILES) ; do \ + install -m 644 $$p $(DESTDIR)/$(SERVERDIR)/$$p; \ + done + $(PYTHON) -c "import compileall; compileall.compile_dir('$(DESTDIR)/$(SERVERDIR)', 1, '$(PYDIR)', 1)" + diff --git a/hub/httpd.conf b/hub/httpd.conf new file mode 100644 index 00000000..5d3bd69e --- /dev/null +++ b/hub/httpd.conf @@ -0,0 +1,28 @@ +# +# koji-hub is an xmlrpc interface to the Koji database +# + +Alias /kojihub "/var/www/koji-hub/XMLRPC" +Alias /koji-hub "/var/www/koji-hub/XMLRPC" + + + SetHandler mod_python + PythonHandler kojixmlrpc + PythonOption DBName koji + PythonOption DBUser koji + PythonOption DBHost db.example.com + PythonOption KojiDir /mnt/koji + PythonOption AuthPrincipal kojihub@EXAMPLE.COM + PythonOption AuthKeytab /etc/koji.keytab + PythonOption ProxyPrincipals kojihub@EXAMPLE.COM + PythonOption KojiWebURL http://kojiweb.example.com/koji + #format string for host principals (%s = hostname) + PythonOption HostPrincipalFormat %s@EXAMPLE.COM + #PythonOption KojiDebug On + #PythonOption KojiTraceback "extended" + PythonDebug Off + #sending tracebacks to the client isn't very helpful for debugging xmlrpc + PythonAutoReload Off + #autoreload is mostly useless to us (it would only reload kojixmlrpc.py) + + diff --git a/hub/kojihub.py b/hub/kojihub.py new file mode 100644 index 00000000..ba535a5a --- /dev/null +++ b/hub/kojihub.py @@ -0,0 +1,6130 @@ +# Python library + +# kojihub - library for koji's XMLRPC interface +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: +# Mike McLean + +import base64 +import koji +import koji.auth +import koji.db +import datetime +import errno +import logging +import logging.handlers +import fcntl +import md5 +import os +import pgdb +import random +import re +import rpm +import stat +import sys +import tempfile +import xmlrpclib +from koji.context import context + +def log_error(msg): + if hasattr(context,'req'): + context.req.log_error(msg) + else: + sys.stderr.write(msg + "\n") + logging.getLogger('koji.hub').error(msg) + + +class Task(object): + """A task for the build hosts""" + + fields = ( + ('task.id', 'id'), + ('task.state', 'state'), + ('task.create_time', 'create_time'), + ('EXTRACT(EPOCH FROM create_time)','create_ts'), + ('task.completion_time', 'completion_time'), + ('EXTRACT(EPOCH FROM completion_time)','completion_ts'), + ('task.channel_id', 'channel_id'), + ('task.host_id', 'host_id'), + ('task.parent', 'parent'), + ('task.label', 'label'), + ('task.waiting', 'waiting'), + ('task.awaited', 'awaited'), + ('task.owner', 'owner'), + ('task.method', 'method'), + ('task.arch', 'arch'), + ('task.priority', 'priority'), + ('task.weight', 'weight')) + + def __init__(self,id): + self.id = id + self.logger = logging.getLogger("koji.hub.Task") + + def verifyHost(self,host_id=None): + """Verify that host owns task""" + if host_id is None: + host_id = context.session.host_id + if host_id is None: + return False + task_id = self.id + #getting a row lock on this task to ensure task assignment sanity + #no other concurrent transaction should be altering this row + q = """SELECT state,host_id FROM task WHERE id=%(task_id)s FOR UPDATE""" + r = _fetchSingle(q, locals()) + if not r: + raise koji.GenericError, "No such task: %i" % task_id + state, otherhost = r + return (state == koji.TASK_STATES['OPEN'] and otherhost == host_id) + + def assertHost(self,host_id): + if not self.verifyHost(host_id): + raise koji.NotAllowed, "host %d does not own task %d" % (host_id,self.id) + + def getOwner(self): + """Return the owner (user_id) for this task""" + q = """SELECT owner FROM task WHERE id=%(id)i""" + return _singleValue(q, vars(self)) + + def verifyOwner(self,user_id=None): + """Verify that user owns task""" + if user_id is None: + user_id = context.session.user_id + if user_id is None: + return False + task_id = self.id + #getting a row lock on this task to ensure task state sanity + q = """SELECT owner FROM task WHERE id=%(task_id)s FOR UPDATE""" + r = _fetchSingle(q, locals()) + if not r: + raise koji.GenericError, "No such task: %i" % task_id + (owner,) = r + return (owner == user_id) + + def assertOwner(self,user_id=None): + if not self.verifyOwner(user_id): + raise koji.NotAllowed, "user %d does not own task %d" % (user_id,self.id) + + def lock(self,host_id,newstate='OPEN',force=False): + """Attempt to associate the task for host, either to assign or open + + returns True if successful, False otherwise""" + #we use row-level locks to keep things sane + #note the SELECT...FOR UPDATE + task_id = self.id + if not force: + q = """SELECT state,host_id FROM task WHERE id=%(task_id)i FOR UPDATE""" + r = _fetchSingle(q,locals()) + if not r: + raise koji.GenericError, "No such task: %i" % task_id + state, otherhost = r + if state == koji.TASK_STATES['FREE']: + if otherhost is not None: + log_error("Error: task %i is both free and locked (host %i)" + % (task_id,otherhost)) + return False + elif state == koji.TASK_STATES['ASSIGNED']: + if otherhost is None: + log_error("Error: task %i is assigned, but has no assignee" + % (task_id)) + return False + elif otherhost != host_id: + #task is assigned to someone else + return False + #otherwise the task is assigned to host_id, so keep going + else: + if otherhost is None: + log_error("Error: task %i is non-free but unlocked (state %i)" + % (task_id,state)) + return False + #if we reach here, task is either + # - free and unlocked + # - assigned to host_id + # - force option is enabled + state = koji.TASK_STATES[newstate] + q = """UPDATE task SET state=%(state)s,host_id=%(host_id)s + WHERE id=%(task_id)s""" + _dml(q,locals()) + return True + + def assign(self,host_id,force=False): + """Attempt to assign the task to host. + + returns True if successful, False otherwise""" + return self.lock(host_id,'ASSIGNED',force) + + def open(self,host_id): + """Attempt to open the task for host. + + returns task data if successful, None otherwise""" + if self.lock(host_id,'OPEN'): + # get more complete data to return + fields = self.fields + (('task.request', 'request'),) + q = """SELECT %s FROM task WHERE id=%%(id)i""" % ','.join([f[0] for f in fields]) + return _singleRow(q, vars(self), [f[1] for f in fields], strict=True) + else: + return None + + def free(self): + """Free a task""" + task_id = self.id + # access checks should be performed by calling function + query = """SELECT state FROM task WHERE id = %(id)i FOR UPDATE""" + row = _fetchSingle(query,vars(self)) + if not row: + raise koji.GenericError, "No such task: %i" % self.id + oldstate = row[0] + if koji.TASK_STATES[oldstate] in ['CLOSED','CANCELED','FAILED']: + raise koji.GenericError, "Cannot free task %i, state is %s" % \ + (self.id,koji.TASK_STATES[oldstate]) + newstate = koji.TASK_STATES['FREE'] + newhost = None + q = """UPDATE task SET state=%(newstate)s,host_id=%(newhost)s + WHERE id=%(task_id)s""" + _dml(q,locals()) + return True + + def setWeight(self,weight): + """Set weight for task""" + task_id = self.id + # access checks should be performed by calling function + q = """UPDATE task SET weight=%(weight)s WHERE id = %(task_id)s""" + _dml(q,locals()) + + def _close(self,result,state): + """Mark task closed and set response + + Returns True if successful, False if not""" + task_id = self.id + # access checks should be performed by calling function + st_closed = koji.TASK_STATES['CLOSED'] + update = """UPDATE task SET result = %(result)s, state = %(state)s, completion_time = NOW() + WHERE id = %(task_id)d + """ + _dml(update,locals()) + + def close(self,result): + # access checks should be performed by calling function + self._close(result,koji.TASK_STATES['CLOSED']) + + def fail(self,result): + # access checks should be performed by calling function + self._close(result,koji.TASK_STATES['FAILED']) + + def getState(self): + query = """SELECT state FROM task WHERE id = %(id)i""" + return _singleValue(query, vars(self)) + + def isFinished(self): + return (koji.TASK_STATES[self.getState()] in ['CLOSED','CANCELED','FAILED']) + + def isCanceled(self): + return (self.getState() == koji.TASK_STATES['CANCELED']) + + def isFailed(self): + return (self.getState() == koji.TASK_STATES['FAILED']) + + def cancel(self,recurse=True): + """Cancel this task. + + A task can only be canceled if it is not already in the 'CLOSED' state. + If it is, no action will be taken. Return True if the task is + successfully canceled, or if it was already canceled, False if it is + closed.""" + # access checks should be performed by calling function + task_id = self.id + q = """SELECT state FROM task WHERE id = %(task_id)s FOR UPDATE""" + state = _singleValue(q,locals()) + st_canceled = koji.TASK_STATES['CANCELED'] + st_closed = koji.TASK_STATES['CLOSED'] + st_failed = koji.TASK_STATES['FAILED'] + if state == st_canceled: + return True + elif state in [st_closed,st_failed]: + return False + update = """UPDATE task SET state = %(st_canceled)i, completion_time = NOW() + WHERE id = %(task_id)i""" + _dml(update, locals()) + #cancel associated builds (only if state is 'BUILDING') + #since we check build state, we avoid loops with cancel_build on our end + b_building = koji.BUILD_STATES['BUILDING'] + q = """SELECT id FROM build WHERE task_id = %(task_id)i + AND state = %(b_building)i + FOR UPDATE""" + for (build_id,) in _fetchMulti(q, locals()): + cancel_build(build_id, cancel_task=False) + if recurse: + #also cancel child tasks + self.cancelChildren() + return True + + def cancelChildren(self): + """Cancel child tasks""" + task_id = self.id + q = """SELECT id FROM task WHERE parent = %(task_id)i""" + for (id,) in _fetchMulti(q,locals()): + Task(id).cancel(recurse=True) + + def cancelFull(self,strict=True): + """Cancel this task and every other task in its group + + If strict is true, then this must be a top-level task + Otherwise we will follow up the chain to find the top-level task + """ + task_id = self.id + q = """SELECT parent FROM task WHERE id = %(task_id)i FOR UPDATE""" + parent = _singleValue(q,locals()) + if parent is not None: + if strict: + raise koji.GenericError, "Task %d is not top-level (parent=%d)" % (task_id,parent) + #otherwise, find the top-level task and go from there + seen = {task_id:1} + while parent is not None: + if seen.has_key(parent): + raise koji.GenericError, "Task LOOP at task %i" % task_id + task_id = parent + seen[task_id] = 1 + parent = _singleValue(q,locals()) + return Task(task_id).cancelFull(strict=True) + #We handle the recursion ourselves, since self.cancel will stop at + #canceled or closed tasks. + tasklist = [task_id] + seen = {} + #query for use in loop + q_children = """SELECT id FROM task WHERE parent = %(task_id)i""" + for task_id in tasklist: + if seen.has_key(task_id): + #shouldn't happen + raise koji.GenericError, "Task LOOP at task %i" % task_id + seen[task_id] = 1 + Task(task_id).cancel(recurse=False) + for (child_id,) in _fetchMulti(q_children,locals()): + tasklist.append(child_id) + + def getRequest(self): + id = self.id + query = """SELECT request FROM task WHERE id = %(id)i""" + encoded_request = _singleValue(query, locals()) + params, method = xmlrpclib.loads(base64.decodestring(encoded_request)) + return params + + def getResult(self): + query = """SELECT state,result FROM task WHERE id = %(id)i""" + r = _fetchSingle(query, vars(self)) + if not r: + raise koji.GenericError, "No such task" + state, encoded_result = r + if koji.TASK_STATES[state] == 'CANCELED': + raise koji.GenericError, "Task %i is canceled" % self.id + elif koji.TASK_STATES[state] not in ['CLOSED','FAILED']: + raise koji.GenericError, "Task %i is not finished" % self.id + # If the result is a Fault, then loads will raise it + # This is probably what we want to happen. + # Note that you can't really 'return' a fault over xmlrpc, you + # can only 'raise' them. + # If you try to return a fault as a value, it gets reduced to + # a mere struct. + # f = Fault(1,"hello"); print dumps((f,)) + result,method = xmlrpclib.loads(base64.decodestring(encoded_result)) + return result[0] + + def getInfo(self, strict=True, request=False): + """Return information about the task in a dictionary. If "request" is True, + the request will be decoded and included in the dictionary.""" + q = """SELECT %s FROM task WHERE id = %%(id)i""" % ','.join([f[0] for f in self.fields]) + result = _singleRow(q, vars(self), [f[1] for f in self.fields], strict) + if request: + result['request'] = self.getRequest() + return result + + def getChildren(self, request=False): + """Return information about tasks with this task as their + parent. If there are no such Tasks, return an empty list.""" + fields = self.fields + if request: + fields = fields + (('request', 'request'),) + query = """SELECT %s FROM task WHERE parent = %%(id)i""" % ', '.join([f[0] for f in fields]) + results = _multiRow(query, vars(self), [f[1] for f in fields]) + if request: + for task in results: + task['request'] = xmlrpclib.loads(base64.decodestring(task['request']))[0] + return results + +def make_task(method,arglist,**opts): + """Create a task + + This call should not be directly exposed via xmlrpc + Optional args: + parent: the id of the parent task (creates a subtask) + label: (subtasks only) the label of the subtask + owner: the user_id that should own the task + channel: the channel to place the task in + arch: the arch for the task + priority: the priority of the task + """ + if opts.has_key('parent'): + # for subtasks, we use some of the parent's options as defaults + fields = ('state','owner','channel_id','priority','arch') + q = """SELECT %s FROM task WHERE id = %%(parent)i""" % ','.join(fields) + r = _fetchSingle(q,opts) + if not r: + raise koji.GenericError, "Invalid parent task: %(parent)s" % opts + pdata = dict(zip(fields,r)) + if pdata['state'] != koji.TASK_STATES['OPEN']: + raise koji.GenericError, "Parent task (id %(parent)s) is not open" % opts + #default to a higher priority than parent + opts.setdefault('priority', pdata['priority'] - 1) + for f in ('owner','channel_id','arch'): + opts.setdefault(f,pdata[f]) + opts.setdefault('label',None) + else: + opts.setdefault('priority',koji.PRIO_DEFAULT) + #calling function should enforce priority limitations, if applicable + opts.setdefault('arch','noarch') + opts.setdefault('channel','default') + #no labels for top-level tasks + #calling function should enforce channel limitations, if applicable + opts['channel_id'] = get_channel_id(opts['channel'],strict=True) + if not context.session.logged_in: + opts['owner'] = None + else: + opts['owner'] = context.session.user_id + opts['label'] = None + opts['parent'] = None + #XXX - temporary workaround + if method in ('buildArch', 'runroot') and opts['arch'] == 'noarch': + #not all arches can generate a proper buildroot for all tags + if method == 'buildArch': + tag = get_tag(arglist[1]) + else: + tag = get_tag(arglist[0]) + fullarches = ('i386', 'ia64', 'ppc', 'ppc64', 's390', 's390x', 'x86_64') + tagarches = tag['arches'].split() + for a in fullarches: + if a not in tagarches: + opts['arch'] = koji.canonArch(random.choice(tagarches)) + break + + # encode xmlrpc request + opts['request'] = base64.encodestring( + xmlrpclib.dumps(tuple(arglist),methodname=method,allow_none=1)) + opts['state'] = koji.TASK_STATES['FREE'] + opts['method'] = method + # stick it in the database + q = """ + INSERT INTO task (state,owner,method,request,priority, + parent,label,channel_id,arch) + VALUES (%(state)s,%(owner)s,%(method)s,%(request)s,%(priority)s, + %(parent)s,%(label)s,%(channel_id)s,%(arch)s); + """ + _dml(q,opts) + q = """SELECT currval('task_id_seq')""" + task_id = _singleValue(q, {}) + return task_id + +def mktask(__taskopts,__method,*args,**opts): + """A wrapper around make_task with alternate signature + + Parameters: + _taskopts: a dictionary of task options (e.g. priority, ...) + _method: the method to be invoked + + All remaining args (incl. optional ones) are passed on to the task. + """ + return make_task(__method,koji.encode_args(*args,**opts),**__taskopts) + +def eventCondition(event, table=None): + """return the proper WHERE condition to select data at the time specified by event. """ + if not table: + table = '' + else: + table += '.' + if event is None: + return """(active = TRUE)""" + elif isinstance(event, int) or isinstance(event, long): + return """(%(table)screate_event <= %(event)d AND ( %(table)srevoke_event IS NULL OR %(event)d < %(table)srevoke_event ))""" \ + % locals() + else: + raise koji.GenericError, "Invalid event: %r" % event + +def readGlobalInheritance(event=None): + c=context.cnx.cursor() + fields = ('tag_id','parent_id','name','priority','maxdepth','intransitive', + 'noconfig','pkg_filter') + q="""SELECT %s FROM tag_inheritance JOIN tag ON parent_id = id + WHERE %s + ORDER BY priority + """ % (",".join(fields), eventCondition(event)) + c.execute(q,locals()) + #convert list of lists into a list of dictionaries + return [ dict(zip(fields,x)) for x in c.fetchall() ] + +def readInheritanceData(tag_id,event=None): + c=context.cnx.cursor() + fields = ('parent_id','name','priority','maxdepth','intransitive','noconfig','pkg_filter') + q="""SELECT %s FROM tag_inheritance JOIN tag ON parent_id = id + WHERE %s AND tag_id = %%(tag_id)i + ORDER BY priority + """ % (",".join(fields), eventCondition(event)) + c.execute(q,locals()) + #convert list of lists into a list of dictionaries + data = [ dict(zip(fields,x)) for x in c.fetchall() ] + # include the current tag_id as child_id, so we can retrace the inheritance chain later + for datum in data: + datum['child_id'] = tag_id + return data + +def readDescendantsData(tag_id,event=None): + c=context.cnx.cursor() + fields = ('tag_id','parent_id','name','priority','maxdepth','intransitive','noconfig','pkg_filter') + q="""SELECT %s FROM tag_inheritance JOIN tag ON tag_id = id + WHERE %s AND parent_id = %%(tag_id)i + ORDER BY priority + """ % (",".join(fields), eventCondition(event)) + c.execute(q,locals()) + #convert list of lists into a list of dictionaries + data = [ dict(zip(fields,x)) for x in c.fetchall() ] + return data + +def writeInheritanceData(tag_id, changes, clear=False): + """Add or change inheritance data for a tag""" + context.session.assertPerm('admin') + fields = ('parent_id','priority','maxdepth','intransitive','noconfig','pkg_filter') + if isinstance(changes,dict): + changes = [changes] + for link in changes: + check_fields = fields + if link.get('delete link'): + check_fields = ('parent_id') + for f in fields: + if not link.has_key(f): + raise koji.GenericError, "No value for %s" % f + # read current data and index + data = dict([[link['parent_id'],link] for link in readInheritanceData(tag_id)]) + for link in changes: + link['is_update'] = True + parent_id = link['parent_id'] + orig = data.get(parent_id) + if link.get('delete link'): + if orig: + data[parent_id] = link + elif not orig or clear: + data[parent_id] = link + else: + #not a delete request and we have a previous link to parent + for f in fields: + if orig[f] != link[f]: + data[parent_id] = link + break + if clear: + for link in data.itervalues(): + if not link.get('is_update'): + link['delete link'] = True + link['is_update'] = True + changed = False + for link in data.itervalues(): + if link.get('is_update'): + changed = True + break + if not changed: + # nothing to do + log_error("No inheritance changes") + return + #check for duplicate priorities + pri_index = {} + for link in data.itervalues(): + if link.get('delete link'): + continue + pri_index.setdefault(link['priority'], []).append(link) + for pri, dups in pri_index.iteritems(): + if len(dups) <= 1: + continue + #oops, duplicate entries for a single priority + dup_ids = [ link['parent_id'] for link in dups] + raise koji.GenericError, "Inheritance priorities must be unique (pri %s: %r )" % (pri, dup_ids) + # get an event + event = _singleValue("SELECT get_event()") + for parent_id, link in data.iteritems(): + if not link.get('is_update'): + continue + # revoke old values + q = """ + UPDATE tag_inheritance SET active=NULL,revoke_event=%(event)s + WHERE tag_id=%(tag_id)s AND parent_id = %(parent_id)s AND active = TRUE + """ + _dml(q,locals()) + for parent_id, link in data.iteritems(): + if not link.get('is_update'): + continue + # skip rest if we are just deleting + if link.get('delete link'): + continue + # insert new value + newlink = {} + for f in fields: + newlink[f] = link[f] + newlink['tag_id'] = tag_id + newlink['create_event'] = event + # defaults ok for the rest + keys = newlink.keys() + flist = ','.join(["%s" % k for k in keys]) + vlist = ','.join(["%%(%s)s" % k for k in keys]) + q = """ + INSERT INTO tag_inheritance (%(flist)s) + VALUES (%(vlist)s) + """ % locals() + _dml(q,newlink) + +def readFullInheritance(tag_id,event=None,reverse=False,stops={},jumps={}): + """Returns a list representing the full, ordered inheritance from tag""" + order = [] + readFullInheritanceRecurse(tag_id,event,order,stops,{},{},0,None,False,[],reverse,jumps) + return order + +def readFullInheritanceRecurse(tag_id,event,order,prunes,top,hist,currdepth,maxdepth,noconfig,pfilter,reverse,jumps): + if maxdepth is not None and maxdepth < 1: + return + #note: maxdepth is relative to where we are, but currdepth is absolute from + #the top. + currdepth += 1 + top = top.copy() + top[tag_id] = 1 + if reverse: + node = readDescendantsData(tag_id,event) + else: + node = readInheritanceData(tag_id,event) + for link in node: + if reverse: + id = link['tag_id'] + else: + id = link['parent_id'] + if jumps.has_key(id): + id = jumps[id] + if top.has_key(id): + #LOOP! + log_error("Warning: INHERITANCE LOOP detected at %s -> %s, pruning" % (tag_id,id)) + #auto prune + continue + if prunes.has_key(id): + # ignore pruned tags + continue + if link['intransitive'] and len(top) > 1: + # ignore intransitive inheritance links, except at root + continue + if link['priority'] < 0: + #negative priority indicates pruning, rather than inheritance + prunes[id] = 1 + continue + #propagate maxdepth + nextdepth = link['maxdepth'] + if nextdepth is None: + if maxdepth is not None: + nextdepth = maxdepth - 1 + elif maxdepth is not None: + nextdepth = min(nextdepth,maxdepth) - 1 + link['nextdepth'] = nextdepth + link['currdepth'] = currdepth + #propagate noconfig and pkg_filter controls + if link['noconfig']: + noconfig = True + filter = list(pfilter) # copy + pattern = link['pkg_filter'] + if pattern: + filter.append(pattern) + link['filter'] = filter + # check history to avoid redundant entries + if hist.has_key(id): + #already been there + #BUT, options may have been different + rescan = True + #since rescans are possible, we might have to consider more than one previous hit + for previous in hist[id]: + sufficient = True # is previous sufficient? + # if last depth was less than current, then previous insufficient + lastdepth = previous['nextdepth'] + if nextdepth is None: + if lastdepth is not None: + sufficient = False + elif lastdepth is not None and lastdepth < nextdepth: + sufficient = False + # if noconfig was on before, but not now, then insuffient + if previous['noconfig'] and not noconfig: + sufficient = False + # if we had a filter before, then insufficient + if len(previous['filter']) > 0: + # FIXME - we could probably be a little more precise here + sufficient = False + if sufficient: + rescan = False + if not rescan: + continue + else: + hist[id] = [] + hist[id].append(link) #record history + order.append(link) + readFullInheritanceRecurse(id,event,order,prunes,top,hist,currdepth,nextdepth,noconfig,filter,reverse,jumps) + +# tag-package operations +# add +# remove +# block +# unblock +# change owner +# list + + +def _pkglist_remove(tag_id,pkg_id,event_id=None): + if event_id is None: + event_id = _singleValue("SELECT get_event()") + q = """UPDATE tag_packages SET active=NULL,revoke_event=%(event_id)i + WHERE active = TRUE AND package_id=%(pkg_id)i AND tag_id=%(tag_id)i""" + _dml(q,locals()) + +def _pkglist_add(tag_id,pkg_id,owner,block,extra_arches,event_id=None): + if event_id is None: + event_id = _singleValue("SELECT get_event()") + #revoke old entry (if present) + _pkglist_remove(tag_id,pkg_id,event_id) + q = """INSERT INTO tag_packages(package_id,tag_id,owner,blocked,extra_arches,create_event) + VALUES (%(pkg_id)s,%(tag_id)s,%(owner)s,%(block)s,%(extra_arches)s,%(event_id)s) """ + _dml(q,locals()) + +def pkglist_add(taginfo,pkginfo,owner=None,block=None,extra_arches=None,force=False,update=False): + """Add to (or update) package list for tag""" + #only admins.... + context.session.assertPerm('admin') + tag = get_tag(taginfo, strict=True) + pkg = lookup_package(pkginfo, create=True) + tag_id = tag['id'] + pkg_id = pkg['id'] + if owner is not None: + owner = get_user(owner,strict=True)['id'] + # first check to see if package is: + # already present (via inheritance) + # blocked + pkglist = readPackageList(tag_id, pkgID=pkg_id, inherit=True) + previous = pkglist.get(pkg_id,None) + if previous is None: + if block is None: + block = False + else: + block = bool(block) + if update and not force: + #if update flag is true, require that there be a previous entry + raise koji.GenericError, "cannot update: tag %s has no data for package %s" \ + % (tag['name'],pkg['name']) + else: + #already there (possibly via inheritance) + if owner is None: + owner = previous['owner_id'] + if block is None: + block = previous['blocked'] + else: + block = bool(block) + #see if the data is the same + changed = False + for key,value in (('owner_id',owner), + ('blocked',block), + ('extra_arches',extra_arches)): + if previous[key] != value: + changed = True + break + if not changed and not force: + #no point in adding it again with the same data + return + if previous['blocked'] and not block and not force: + raise koji.GenericError, "package %s is blocked in tag %s" % (pkg['name'],tag['name']) + if owner is None: + if force: + owner = context.session.user_id + else: + raise koji.GenericError, "owner not specified" + _pkglist_add(tag_id,pkg_id,owner,block,extra_arches) + +def pkglist_remove(taginfo,pkginfo,force=False): + """Remove package from the list for tag + + Most of the time you really want to use the block or unblock functions + + The main reason to remove an entry like this is to remove an override so + that the package data can be inherited from elsewhere. + """ + #only admins.... + context.session.assertPerm('admin') + tag_id = get_tag_id(taginfo, strict=True) + pkg_id = get_package_id(pkginfo, strict=True) + _pkglist_remove(tag_id,pkg_id) + +def pkglist_block(taginfo,pkginfo): + """Block the package in tag""" + pkglist_add(taginfo,pkginfo,block=True) + +def pkglist_unblock(taginfo,pkginfo): + """Unblock the package in tag + + Generally this just adds a unblocked duplicate of the blocked entry. + However, if the block is actually in tag directly (not through inheritance), + the blocking entry is simply removed""" + tag = get_tag(taginfo, strict=True) + pkg = lookup_package(pkginfo, strict=True) + tag_id = tag['id'] + pkg_id = pkg['id'] + pkglist = readPackageList(tag_id, pkgID=pkg_id, inherit=True) + previous = pkglist.get(pkg_id,None) + if previous is None: + raise koji.GenericError, "no data (blocked or otherwise) for package %s in tag %s" \ + % (pkg['name'],tag['name']) + if not previous['blocked']: + raise koji.GenericError, "package %s NOT blocked in tag %s" % (pkg['name'],tag['name']) + event_id = _singleValue("SELECT get_event()") + if previous['tag_id'] != tag_id: + _pkglist_add(tag_id,pkg_id,previous['owner_id'],False,previous['extra_arches']) + else: + #just remove the blocking entry + event_id = _singleValue("SELECT get_event()") + _pkglist_remove(tag_id,pkg_id,event_id) + #it's possible this was the only entry in the inheritance or that the next entry + #back is also a blocked entry. if so, we need to add it back as unblocked + pkglist = readPackageList(tag_id, pkgID=pkg_id, inherit=True) + if not pkglist.has_key(pkg_id) or pkglist[pkg_id]['blocked']: + _pkglist_add(tag_id,pkg_id,previous['owner_id'],False,previous['extra_arches'], + event_id) + +def pkglist_setowner(taginfo,pkginfo,owner,force=False): + """Set the owner for package in tag""" + pkglist_add(taginfo,pkginfo,owner=owner,force=force,update=True) + +def pkglist_setarches(taginfo,pkginfo,arches,force=False): + """Set extra_arches for package in tag""" + pkglist_add(taginfo,pkginfo,extra_arches=arches,force=force,update=True) + +def readPackageList(tagID=None, userID=None, pkgID=None, event=None, inherit=False, with_dups=False): + """Returns the package list for the specified tag or user. + + One of (tagID,userID,pkgID) must be specified + + Note that the returned data includes blocked entries + """ + if tagID is None and userID is None and pkgID is None: + raise koji.GenericError, 'tag,user, and/or pkg must be specified' + + packages = {} + fields = (('package.id', 'package_id'), ('package.name', 'package_name'), + ('tag.id', 'tag_id'), ('tag.name', 'tag_name'), + ('users.id', 'owner_id'), ('users.name', 'owner_name'), + ('extra_arches','extra_arches'), + ('tag_packages.blocked', 'blocked')) + flist = ', '.join([pair[0] for pair in fields]) + cond = eventCondition(event) + q = """ + SELECT %(flist)s + FROM tag_packages + JOIN tag on tag.id = tag_packages.tag_id + JOIN package ON package.id = tag_packages.package_id + JOIN users ON users.id = tag_packages.owner + WHERE %(cond)s""" + if tagID != None: + q += """ + AND tag.id = %%(tagID)i""" + if userID != None: + q += """ + AND users.id = %%(userID)i""" + if pkgID != None: + if isinstance(pkgID, int) or isinstance(pkgID, long): + q += """ + AND package.id = %%(pkgID)i""" + else: + q += """ + AND package.name = %%(pkgID)s""" + + q = q % locals() + for p in _multiRow(q, locals(), [pair[1] for pair in fields]): + # things are simpler for the first tag + pkgid = p['package_id'] + if with_dups: + packages.setdefault(pkgid,[]).append(p) + else: + packages[pkgid] = p + + if tagID is None or (not inherit): + return packages + + order = readFullInheritance(tagID, event) + + re_cache = {} + for link in order: + tagID = link['parent_id'] + filter = link['filter'] + # precompile filter patterns + re_list = [] + for pat in filter: + prog = re_cache.get(pat,None) + if prog is None: + prog = re.compile(pat) + re_cache[pat] = prog + re_list.append(prog) + # same query as before, with different params + for p in _multiRow(q, locals(), [pair[1] for pair in fields]): + pkgid = p['package_id'] + if not with_dups and packages.has_key(pkgid): + #previous data supercedes + continue + # apply package filters + skip = False + for prog in re_list: + # the list of filters is cumulative, i.e. + # the package name must match all of them + if prog.match(p['package_name']) is None: + skip = True + break + if skip: + continue + if with_dups: + packages.setdefault(pkgid,[]).append(p) + else: + packages[pkgid] = p + return packages + + +def readTaggedBuilds(tag,event=None,inherit=False,latest=False,package=None): + """Returns a list of builds for specified tag + + set inherit=True to follow inheritance + set event to query at a time in the past + set latest=True to get only the latest build per package + """ + # build - id pkg_id version release epoch + # tag_listing - id build_id tag_id + + taglist = [tag] + if inherit: + taglist += [link['parent_id'] for link in readFullInheritance(tag, event)] + + #regardless of inherit setting, we need to use inheritance to read the + #package list + packages = readPackageList(tagID=tag, event=event, inherit=True, pkgID=package) + + #these values are used for each iteration + fields = (('tag.id', 'tag_id'), ('tag.name', 'tag_name'), ('build.id', 'id'), + ('build.id', 'build_id'), ('build.version', 'version'), ('build.release', 'release'), + ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'), + ('build.task_id','task_id'), + ('events.id', 'creation_event_id'), ('events.time', 'creation_time'), + ('package.id', 'package_id'), ('package.name', 'package_name'), + ('package.name', 'name'), + ("package.name || '-' || build.version || '-' || build.release", 'nvr'), + ('users.id', 'owner_id'), ('users.name', 'owner_name')) + st_complete = koji.BUILD_STATES['COMPLETE'] + + q="""SELECT %s + FROM tag_listing + JOIN tag ON tag.id = tag_listing.tag_id + JOIN build ON build.id = tag_listing.build_id + JOIN users ON users.id = build.owner + JOIN events ON events.id = build.create_event + JOIN package ON package.id = build.pkg_id + WHERE %s AND tag_id=%%(tagid)s + AND build.state=%%(st_complete)i + """ % (', '.join([pair[0] for pair in fields]), eventCondition(event, 'tag_listing')) + if package: + q += """AND package.name = %(package)s + """ + q += """ORDER BY tag_listing.create_event DESC + """ + # i.e. latest first + + builds = [] + seen = {} # used to enforce the 'latest' option + for tagid in taglist: + #log_error(koji.db._quoteparams(q,locals())) + for build in _multiRow(q, locals(), [pair[1] for pair in fields]): + pkgid = build['package_id'] + pinfo = packages.get(pkgid,None) + if pinfo is None or pinfo['blocked']: + # note: + # tools should endeavor to keep tag_listing sane w.r.t. + # the package list, but if there is disagreement the package + # list should take priority + continue + if latest: + if seen.has_key(pkgid): + #only take the first (note ordering in query above) + continue + seen[pkgid] = 1 + builds.append(build) + + return builds + +def readTaggedRPMS(tag, package=None, arch=None, event=None,inherit=False,latest=True,rpmsigs=False): + """Returns a list of rpms for specified tag + + set inherit=True to follow inheritance + set event to query at a time in the past + set latest=False to get all tagged RPMS (not just from the latest builds) + """ + taglist = [tag] + if inherit: + #XXX really should cache this - it gets called several places + # (however, it is fairly quick) + taglist += [link['parent_id'] for link in readFullInheritance(tag, event)] + + builds = readTaggedBuilds(tag, event=event, inherit=inherit, latest=latest, package=package) + #index builds + build_idx = dict([(b['build_id'],b) for b in builds]) + + #the following query is run for each tag in the inheritance + fields = [('rpminfo.name', 'name'), + ('rpminfo.version', 'version'), + ('rpminfo.release', 'release'), + ('rpminfo.arch', 'arch'), + ('rpminfo.id', 'id'), + ('rpminfo.epoch', 'epoch'), + ('rpminfo.payloadhash', 'payloadhash'), + ('rpminfo.size', 'size'), + ('rpminfo.buildtime', 'buildtime'), + ('rpminfo.buildroot_id', 'buildroot_id'), + ('rpminfo.build_id', 'build_id')] + if rpmsigs: + fields.append(('rpmsigs.sigkey', 'sigkey')) + q="""SELECT %s FROM rpminfo + JOIN tag_listing ON rpminfo.build_id = tag_listing.build_id + """ % ', '.join([pair[0] for pair in fields]) + if package: + q += """JOIN build ON rpminfo.build_id = build.id + JOIN package ON package.id = build.pkg_id + """ + if rpmsigs: + q += """LEFT OUTER JOIN rpmsigs on rpminfo.id = rpmsigs.rpm_id + """ + q += """WHERE %s AND tag_id=%%(tagid)s + """ % eventCondition(event) + if package: + q += """AND package.name = %(package)s + """ + if arch: + q += """AND rpminfo.arch = %(arch)s + """ + # unique constraints ensure that each of these queries will not report + # duplicate rpminfo entries, BUT since we make the query multiple times, + # we can get duplicates if a package is multiply tagged. + rpms = [] + for tagid in taglist: + for rpminfo in _multiRow(q, locals(), [pair[1] for pair in fields]): + #note: we're checking against the build list because + # it has been filtered by the package list for the tag + # tools should endeavor to keep tag_listing sane w.r.t. + # the package list, but if there is disagreement the package + # list should take priority + build = build_idx.get(rpminfo['build_id'],None) + if build is None: + continue + elif build['tag_id'] != tagid: + #wrong tag + continue + rpms.append(rpminfo) + return [rpms,builds] + +def check_tag_access(tag_id,user_id=None): + """Determine if user has access to tag package with tag. + + Returns a tuple (access, override, reason) + access: a boolean indicating whether access is allowed + override: a boolean indicating whether access may be forced + reason: the reason access is blocked + """ + if user_id is None: + user_id = context.session.user_id + if user_id is None: + raise koji.GenericError, "a user_id is required" + perms = koji.auth.get_user_perms(user_id) + override = False + if 'admin' in perms: + override = True + tag = get_tag(tag_id) + if tag['locked']: + return (False, override, "tag is locked") + if tag['perm_id']: + needed_perm = lookup_perm(tag['perm_id'],strict=True)['name'] + if needed_perm not in perms: + return (False, override, "tag is locked") + return (True,override,"") + +def assert_tag_access(tag_id,user_id=None,force=False): + access, override, reason = check_tag_access(tag_id,user_id) + if not access and not (override and force): + raise koji.NotAllowed, reason + +def _tag_build(tag,build,user_id=None,force=False): + """Tag a build + + This function makes access checks based on user_id, which defaults to the + user_id of the session. + + Tagging with a locked tag is not allowed unless force is true (and even + then admin permission is required). + + Retagging is not allowed unless force is true. (retagging changes the order + of entries will affect which build is the latest) + """ + tag = get_tag(tag, strict=True) + build = get_build(build, strict=True) + tag_id = tag['id'] + build_id = build['id'] + nvr = "%(name)s-%(version)s-%(release)s" % build + if build['state'] != koji.BUILD_STATES['COMPLETE']: + # incomplete builds may not be tagged, not even when forced + state = koji.BUILD_STATES[build['state']] + raise koji.TagError, "build %s not complete: state %s" % (nvr,state) + #access check + assert_tag_access(tag['id'],user_id=user_id,force=force) + #XXX - add another check based on package ownership? + # see if it's already tagged + retag = False + q = """SELECT build_id FROM tag_listing WHERE tag_id=%(tag_id)i + AND build_id=%(build_id)i AND active = TRUE FOR UPDATE""" + #note: tag_listing is unique on (build_id, tag_id, active) + if _fetchSingle(q,locals()): + #already tagged + if not force: + raise koji.TagError, "build %s already tagged (%s)" % (nvr,tag['name']) + #otherwise we retag + retag = True + event_id = _singleValue("SELECT get_event()") + if retag: + #revoke the old tag first + q = """UPDATE tag_listing SET active=NULL,revoke_event=%(event_id)i + WHERE tag_id=%(tag_id)i AND build_id=%(build_id)i AND active = TRUE""" + _dml(q,locals()) + #tag the package + q = """INSERT INTO tag_listing(tag_id,build_id,active,create_event) + VALUES(%(tag_id)i,%(build_id)i,TRUE,%(event_id)i)""" + _dml(q,locals()) + +def _untag_build(tag,build,user_id=None,strict=True,force=False): + """Untag a build + + If strict is true, assert that build is actually tagged + The force option overrides a lock (if the user is an admin) + + This function makes access checks based on user_id, which defaults to the + user_id of the session. + """ + tag = get_tag(tag, strict=True) + build = get_build(build, strict=True) + tag_id = tag['id'] + build_id = build['id'] + assert_tag_access(tag_id,user_id=user_id,force=force) + #XXX - add another check based on package ownership? + q = """UPDATE tag_listing SET active=NULL,revoke_event=get_event() + WHERE tag_id=%(tag_id)i AND build_id=%(build_id)i AND active = TRUE + """ + count = _dml(q,locals()) + if count == 0 and strict: + nvr = "%(name)s-%(version)s-%(release)s" % build + raise koji.TagError, "build %s not in tag %s" % (nvr,tag['name']) + +# tag-group operations +# add +# remove +# block +# unblock +# list (readTagGroups) + +def grplist_add(taginfo,grpinfo,block=False,force=False,**opts): + """Add to (or update) group list for tag""" + #only admins.... + context.session.assertPerm('admin') + tag = get_tag(taginfo) + group = lookup_group(grpinfo,create=True) + block = bool(block) + # check current group status (incl inheritance) + groups = get_tag_groups(tag['id'], inherit=True, incl_pkgs=False,incl_reqs=False) + previous = groups.get(group['id'],None) + cfg_fields = ('exported','display_name','is_default','uservisible', + 'description','langonly','biarchonly',) + if previous is not None: + #already there (possibly via inheritance) + if previous['blocked'] and not force: + raise koji.GenericError, "group %s is blocked in tag %s" % (group['name'],tag['name']) + #check for duplication and grab old data for defaults + changed = False + for field in cfg_fields: + old = previous[field] + if opts.has_key(field): + if opts[field] != old: + changed = True + else: + opts[field] = old + if not changed: + #no point in adding it again with the same data + return + #provide available defaults and sanity check data + opts.setdefault('display_name',group['name']) + opts.setdefault('biarchonly',False) + opts.setdefault('exported',True) + opts.setdefault('uservisible',True) + # XXX ^^^ + opts['tag_id'] = tag['id'] + opts['grp_id'] = group['id'] + opts['blocked'] = block + opts['event_id'] = _singleValue("SELECT get_event()") + #revoke old entry (if present) + q = """UPDATE group_config SET active=NULL,revoke_event=%(event_id)s + WHERE active = TRUE AND group_id=%(grp_id)s AND tag_id=%(tag_id)s""" + _dml(q,opts) + #add new entry + x_fields = filter(opts.has_key,cfg_fields) + params = [ '%%(%s)s' % f for f in x_fields ] + q = """INSERT INTO group_config(group_id,tag_id,blocked,create_event,%s) + VALUES (%%(grp_id)s,%%(tag_id)s,%%(blocked)s,%%(event_id)s,%s) """ \ + % ( ','.join(x_fields), ','.join(params)) + _dml(q,opts) + +def grplist_remove(taginfo,grpinfo,force=False): + """Remove group from the list for tag + + Really this shouldn't be used except in special cases + Most of the time you really want to use the block or unblock functions + """ + #only admins.... + context.session.assertPerm('admin') + tag = get_tag(taginfo) + group = lookup_group(grpinfo, strict=True) + tag_id = tag['id'] + grp_id = group['id'] + q = """UPDATE group_config SET active=NULL,revoke_event=get_event() + WHERE active = TRUE AND package_id=%(pkg_id)s AND tag_id=%(tag_id)s""" + _dml(q,locals()) + +def grplist_block(taginfo,grpinfo): + """Block the group in tag""" + grplist_add(taginfo,grpinfo,block=True) + +def grplist_unblock(taginfo,grpinfo): + """Unblock the group in tag + + If the group is blocked in this tag, then simply remove the block. + Otherwise, raise an error + """ + # only admins... + context.session.assertPerm('admin') + tag = lookup_tag(taginfo,strict=True) + group = lookup_group(grpinfo,strict=True) + tag_id = tag['id'] + grp_id = group['id'] + q = """SELECT blocked FROM group_config + WHERE active = TRUE AND group_id=%(grp_id)s AND tag_id=%(tag_id)s + FOR UPDATE""" + blocked = _singleValue(q,locals()) + if not blocked: + raise koji.GenericError, "group %s is NOT blocked in tag %s" % (group['name'],tag['name']) + q = """UPDATE group_config SET active=NULL,revoke_event=get_event() + WHERE id=%(row_id)s""" + _dml(q,locals()) + + +# tag-group-pkg operations +# add +# remove +# block +# unblock +# list (readTagGroups) + +def grp_pkg_add(taginfo,grpinfo,pkg_name,block=False,force=False,**opts): + """Add package to group for tag""" + #only admins.... + context.session.assertPerm('admin') + tag = lookup_tag(taginfo, strict=True) + group = lookup_group(grpinfo,strict=True) + block = bool(block) + # check current group status (incl inheritance) + groups = get_tag_groups(tag['id'], inherit=True, incl_pkgs=True, incl_reqs=False) + grp_cfg = groups.get(group['id'],None) + if grp_cfg is None: + raise koji.GenericError, "group %s not present in tag %s" % (group['name'],tag['name']) + elif grp_cfg['blocked']: + raise koji.GenericError, "group %s is blocked in tag %s" % (group['name'],tag['name']) + previous = grp_cfg['packagelist'].get(pkg_name,None) + cfg_fields = ('type','basearchonly','requires') + if previous is not None: + #already there (possibly via inheritance) + if previous['blocked'] and not force: + raise koji.GenericError, "package %s blocked in group %s, tag %s" \ + % (pkg_name,group['name'],tag['name']) + #check for duplication and grab old data for defaults + changed = False + for field in cfg_fields: + old = previous[field] + if opts.has_key(field): + if opts[field] != old: + changed = True + else: + opts[field] = old + if block: + #from condition above, either previous is not blocked or force is on, + #either way, we should add the entry + changed = True + if not changed and not force: + #no point in adding it again with the same data (unless force is on) + return + #XXX - sanity check data? + opts.setdefault('type','default') + opts['group_id'] = group['id'] + opts['tag_id'] = tag['id'] + opts['package'] = pkg_name + opts['blocked'] = block + opts['event_id'] = _singleValue("SELECT get_event()") + #revoke old entry (if present) + q = """UPDATE group_package_listing SET active=NULL,revoke_event=%(event_id)s + WHERE active = TRUE AND group_id=%(group_id)s AND tag_id=%(tag_id)s + AND package=%(package)s""" + _dml(q,opts) + #add new entry + x_fields = filter(opts.has_key,cfg_fields) \ + + ('group_id','tag_id','package','blocked') + params = [ '%%(%s)s' % f for f in x_fields ] + q = """INSERT INTO group_package_listing(create_event,%s) + VALUES (%%(event_id)s,%s) """ \ + % ( ','.join(x_fields), ','.join(params)) + _dml(q,opts) + +def grp_pkg_remove(taginfo,grpinfo,pkg_name,force=False): + """Remove package from the list for group-tag + + Really this shouldn't be used except in special cases + Most of the time you really want to use the block or unblock functions + """ + #only admins.... + context.session.assertPerm('admin') + tag_id = get_tag_id(taginfo,strict=True) + grp_id = get_group_id(grpinfo,strict=True) + q = """UPDATE group_package_listing SET active=NULL,revoke_event=get_event() + WHERE active = TRUE AND package=%(pkg_name)s AND tag_id=%(tag_id)s + AND group_id = %(grp_id)s""" + _dml(q,locals()) + +def grp_pkg_block(taginfo,grpinfo, pkg_name): + """Block the package in group-tag""" + grp_pkg_add(taginfo,grpinfo,pkg_name,block=True) + +def grp_pkg_unblock(taginfo,grpinfo,pkg_name): + """Unblock the package in group-tag + + If blocked (directly) in this tag, then simply remove the block. + Otherwise, raise an error + """ + # only admins... + context.session.assertPerm('admin') + tag_id = get_tag_id(taginfo,strict=True) + grp_id = get_group_id(grpinfo,strict=True) + q = """SELECT blocked FROM group_package_listing + WHERE active = TRUE AND group_id=%(grp_id)s AND tag_id=%(tag_id)s + AND package = %(pkg_name)s + FOR UPDATE""" + blocked = _singleValue(q, locals(), strict=False) + if not blocked: + raise koji.GenericError, "package %s is NOT blocked in group %s, tag %s" \ + % (pkg_name,grp_id,tag_id) + q = """UPDATE group_package_listing SET active=NULL,revoke_event=get_event() + WHERE active = TRUE AND group_id=%(grp_id)s AND tag_id=%(tag_id)s + AND package = %(pkg_name)s""" + _dml(q,locals()) + +# tag-group-req operations +# add +# remove +# block +# unblock +# list (readTagGroups) + +def grp_req_add(taginfo,grpinfo,reqinfo,block=False,force=False,**opts): + """Add group requirement to group for tag""" + #only admins.... + context.session.assertPerm('admin') + tag = lookup_tag(taginfo, strict=True) + group = lookup_group(grpinfo, strict=True, create=False) + req = lookup_group(reqinfo, strict=True, create=False) + block = bool(block) + # check current group status (incl inheritance) + groups = get_tag_groups(tag['id'], inherit=True, incl_pkgs=False, incl_reqs=True) + grp_cfg = groups.get(group['id'],None) + if grp_cfg is None: + raise koji.GenericError, "group %s not present in tag %s" % (group['name'],tag['name']) + elif grp_cfg['blocked']: + raise koji.GenericError, "group %s is blocked in tag %s" % (group['name'],tag['name']) + previous = grp_cfg['grouplist'].get(req['id'],None) + cfg_fields = ('type','is_metapkg') + if previous is not None: + #already there (possibly via inheritance) + if previous['blocked'] and not force: + raise koji.GenericError, "requirement on group %s blocked in group %s, tag %s" \ + % (req['name'],group['name'],tag['name']) + #check for duplication and grab old data for defaults + changed = False + for field in cfg_fields: + old = previous[field] + if opts.has_key(field): + if opts[field] != old: + changed = True + else: + opts[field] = old + if not changed: + #no point in adding it again with the same data + return + #XXX - sanity check data? + opts.setdefault('type','mandatory') + opts['group_id'] = group['id'] + opts['tag_id'] = tag['id'] + opts['req_id'] = req['id'] + opts['blocked'] = block + opts['event_id'] = _singleValue("SELECT get_event()") + #revoke old entry (if present) + q = """UPDATE group_req_listing SET active=NULL,revoke_event=%(event_id)s + WHERE active = TRUE AND group_id=%(group_id)s AND tag_id=%(tag_id)s + AND req_id=%(req_id)s""" + _dml(q,opts) + #add new entry + x_fields = filter(opts.has_key,cfg_fields) \ + + ('group_id','tag_id','req_id','blocked') + params = [ '%%(%s)s' % f for f in x_fields ] + q = """INSERT INTO group_req_listing(create_event,%s) + VALUES (%%(event_id)s,%s) """ \ + % ( ','.join(x_fields), ','.join(params)) + _dml(q,opts) + +def grp_req_remove(taginfo,grpinfo,reqinfo,force=False): + """Remove group requirement from the list for group-tag + + Really this shouldn't be used except in special cases + Most of the time you really want to use the block or unblock functions + """ + #only admins.... + context.session.assertPerm('admin') + tag_id = get_tag_id(taginfo,strict=True) + grp_id = get_group_id(grpinfo,strict=True) + req_id = get_group_id(reqinfo,strict=True) + q = """UPDATE group_req_listing SET active=NULL,revoke_event=get_event() + WHERE active = TRUE AND req_id=%(req_id)s AND tag_id=%(tag_id)s + AND group_id = %(grp_id)s""" + _dml(q,locals()) + +def grp_req_block(taginfo,grpinfo,reqinfo): + """Block the group requirement in group-tag""" + grp_req_add(taginfo,grpinfo,reqinfo,block=True) + +def grp_req_unblock(taginfo,grpinfo,reqinfo): + """Unblock the group requirement in group-tag + + If blocked (directly) in this tag, then simply remove the block. + Otherwise, raise an error + """ + # only admins... + context.session.assertPerm('admin') + tag_id = get_tag_id(taginfo,strict=True) + grp_id = get_group_id(grpinfo,strict=True) + req_id = get_group_id(reqinfo,strict=True) + q = """SELECT blocked FROM group_req_listing + WHERE active = TRUE AND group_id=%(grp_id)s AND tag_id=%(tag_id)s + AND req_id = %(req_id)s + FOR UPDATE""" + blocked = _singleValue(q,locals()) + if not blocked: + raise koji.GenericError, "group req %s is NOT blocked in group %s, tag %s" \ + % (req_id,grp_id,tag_id) + q = """UPDATE group_req_listing SET active=NULL,revoke_event=get_event() + WHERE id=%(row_id)s""" + _dml(q,locals()) + +def get_tag_groups(tag,event=None,inherit=True,incl_pkgs=True,incl_reqs=True): + """Return group data for the tag + + If inherit is true, follow inheritance + If event is specified, query at event + If incl_pkgs is true (the default), include packagelist data + If incl_reqs is true (the default), include groupreq data + + Note: the data returned includes some blocked entries that may need to be + filtered out. + """ + order = None + tag = get_tag_id(tag,strict=True) + taglist = [tag] + if inherit: + order = readFullInheritance(tag,event) + taglist += [link['parent_id'] for link in order] + evcondition = eventCondition(event) + + # First get the list of groups + fields = ('name','group_id','tag_id','blocked','exported','display_name', + 'is_default','uservisible','description','langonly','biarchonly',) + q=""" + SELECT %s FROM group_config JOIN groups ON group_id = id + WHERE %s AND tag_id = %%(tagid)s + """ % (",".join(fields),evcondition) + groups = {} + for tagid in taglist: + for group in _multiRow(q,locals(),fields): + grp_id = group['group_id'] + # we only take the first entry for group as we go through inheritance + groups.setdefault(grp_id,group) + + if incl_pkgs: + for group in groups.itervalues(): + group['packagelist'] = {} + fields = ('group_id','tag_id','package','blocked','type','basearchonly','requires') + q = """ + SELECT %s FROM group_package_listing + WHERE %s AND tag_id = %%(tagid)s + """ % (",".join(fields),evcondition) + for tagid in taglist: + for grp_pkg in _multiRow(q,locals(),fields): + grp_id = grp_pkg['group_id'] + if not groups.has_key(grp_id): + #tag does not have this group + continue + group = groups[grp_id] + if group['blocked']: + #ignore blocked groups + continue + pkg_name = grp_pkg['package'] + group['packagelist'].setdefault(pkg_name,grp_pkg) + + if incl_reqs: + # and now the group reqs + for group in groups.itervalues(): + group['grouplist'] = {} + fields = ('group_id','tag_id','req_id','blocked','type','is_metapkg','name') + q = """SELECT %s FROM group_req_listing JOIN groups on req_id = id + WHERE %s AND tag_id = %%(tagid)s + """ % (",".join(fields),evcondition) + for tagid in taglist: + for grp_req in _multiRow(q,locals(),fields): + grp_id = grp_req['group_id'] + if not groups.has_key(grp_id): + #tag does not have this group + continue + group = groups[grp_id] + if group['blocked']: + #ignore blocked groups + continue + req_id = grp_req['req_id'] + if not groups.has_key(req_id): + #tag does not have this group + continue + elif groups[req_id]['blocked']: + #ignore blocked groups + continue + group['grouplist'].setdefault(req_id,grp_req) + + return groups + +def readTagGroups(tag,event=None,inherit=True,incl_pkgs=True,incl_reqs=True): + """Return group data for the tag with blocked entries removed + + Also scrubs data into an xmlrpc-safe format (no integer keys) + """ + groups = get_tag_groups(tag,event,inherit,incl_pkgs,incl_reqs) + for group in groups.values(): + #filter blocked entries and collapse to a list + group['packagelist'] = filter(lambda x: not x['blocked'], + group['packagelist'].values()) + group['grouplist'] = filter(lambda x: not x['blocked'], + group['grouplist'].values()) + #filter blocked entries and collapse to a list + return filter(lambda x: not x['blocked'],groups.values()) + +def set_host_enabled(hostname, enabled=True): + context.session.assertPerm('admin') + if not get_host(hostname): + raise koji.GenericError, 'host does not exists: %s' % hostname + c = context.cnx.cursor() + c.execute("""UPDATE host SET enabled = %(enabled)s WHERE name = %(hostname)s""", locals()) + context.commit_pending = True + +def add_host_to_channel(hostname, channel_name): + context.session.assertPerm('admin') + host = get_host(hostname) + if host == None: + raise koji.GenericError, 'host does not exists: %s' % hostname + host_id = host['id'] + channel_id = get_channel_id(channel_name) + if channel_id == None: + raise koji.GenericError, 'channel does not exists: %s' % channel_name + channels = list_channels(host_id) + for channel in channels: + if channel['id'] == channel_id: + raise koji.GenericError, 'host %s is already subscribed to the %s channel' % (hostname, channel_name) + c = context.cnx.cursor() + c.execute("""INSERT INTO host_channels (host_id, channel_id) values (%(host_id)d, %(channel_id)d)""", locals()) + context.commit_pending = True + +def remove_host_from_channel(hostname, channel_name): + context.session.assertPerm('admin') + host = get_host(hostname) + if host == None: + raise koji.GenericError, 'host does not exists: %s' % hostname + host_id = host['id'] + channel_id = get_channel_id(channel_name) + if channel_id == None: + raise koji.GenericError, 'channel does not exists: %s' % channel_name + found = False + channels = list_channels(host_id) + for channel in channels: + if channel['id'] == channel_id: + found = True + break + if not found: + raise koji.GenericError, 'host %s is not subscribed to the %s channel' % (hostname, channel_name) + c = context.cnx.cursor() + c.execute("""DELETE FROM host_channels WHERE host_id = %(host_id)d and channel_id = %(channel_id)d""", locals()) + context.commit_pending = True + +def get_ready_hosts(): + """Return information about hosts that are ready to build. + + Hosts set the ready flag themselves + Note: We ignore hosts that are late checking in (even if a host + is busy with tasks, it should be checking in quite often). + """ + c = context.cnx.cursor() + fields = ('host.id','name','arches','task_load', 'capacity') + aliases = ('id','name','arches','task_load', 'capacity') + q = """ + SELECT %s FROM host + JOIN sessions USING (user_id) + WHERE enabled = TRUE AND ready = TRUE + AND expired = FALSE + AND master IS NULL + AND update_time > NOW() - '5 minutes'::interval + """ % ','.join(fields) + # XXX - magic number in query + c.execute(q) + hosts = [dict(zip(aliases,row)) for row in c.fetchall()] + for host in hosts: + q = """SELECT channel_id FROM host_channels WHERE host_id=%(id)s""" + c.execute(q,host) + host['channels'] = [row[0] for row in c.fetchall()] + return hosts + +def get_active_tasks(): + """Return data on tasks that are yet to be run""" + c = context.cnx.cursor() + fields = ['id','state','channel_id','host_id','arch'] + q = """ + SELECT %s FROM task + WHERE state IN (%%(FREE)s,%%(ASSIGNED)s) + ORDER BY priority,create_time + LIMIT 100 + """ % ','.join(fields) + c.execute(q,koji.TASK_STATES) + return [dict(zip(fields,row)) for row in c.fetchall()] + +def get_task_descendents(task, childMap=None, request=False): + if childMap == None: + childMap = {} + children = task.getChildren(request=request) + children.sort(lambda a, b: cmp(a['id'], b['id'])) + # xmlrpclib requires dict keys to be strings + childMap[str(task.id)] = children + for child in children: + get_task_descendents(Task(child['id']), childMap, request) + return childMap + +def repo_new(tag_id): + """Create a new repo entry in the INIT state, return full repo data + + Returns a dictionary containing + repo_id, event_id, rpms, builds, groups + """ + context.session.assertPerm('repo') + c = context.cnx.cursor() + state = koji.REPO_INIT + id = get_tag_id(tag_id) + if id is None: + raise koji.GenericError("Could not find ID for tag: %s" % tag_id) + q = """INSERT INTO repo(tag_id,state) VALUES(%(id)s,%(state)s)""" + context.commit_pending = True + c.execute(q,locals()) + #get event_id and repo_id + q = """SELECT currval('repo_id_seq'),currval('events_id_seq')""" + c.execute(q) + ret = {} + ret['repo_id'], ret['event_id'] = c.fetchone() + # no need to pass explicit event, since this is all one transaction + ret['rpms'], ret['builds'] = readTaggedRPMS(tag_id,event=None,inherit=True,latest=True) + ret['groups'] = readTagGroups(id,event=None,inherit=True) + return ret + +def repo_init(tag, with_src=False): + """Create a new repo entry in the INIT state, return full repo data + + Returns a dictionary containing + repo_id, event_id + """ + logger = logging.getLogger("koji.hub.repo_init") + state = koji.REPO_INIT + tinfo = get_tag(tag, strict=True) + tag_id = tinfo['id'] + repo_id = _singleValue("SELECT nextval('repo_id_seq')") + event_id = _singleValue("SELECT get_event()") + q = """INSERT INTO repo(id, create_event, tag_id, state) + VALUES(%(repo_id)s, %(event_id)s, %(tag_id)s, %(state)s)""" + _dml(q,locals()) + # no need to pass explicit event, since this is all one transaction + rpms, builds = readTaggedRPMS(tag_id, event=None, inherit=True, latest=True) + groups = readTagGroups(tag_id, event=None, inherit=True) + repodir = koji.pathinfo.repo(repo_id, tinfo['name']) + os.makedirs(repodir) #should not already exist + #index builds + builds = dict([[build['build_id'],build] for build in builds]) + #index the packages by arch + packages = {} + for rpminfo in rpms: + build = builds[rpminfo['build_id']] + if not build.has_key('name'): + #XXX -workaround for broken return fields + build['name'] = build['package_name'] + arch = rpminfo['arch'] + rpminfo['path'] = "%s/%s" % (koji.pathinfo.build(build), koji.pathinfo.rpm(rpminfo)) + if not os.path.exists(rpminfo['path']): + logger.warn("Error: no such file: %(path)s" % rpminfo) + continue + repoarch = koji.canonArch(arch) + packages.setdefault(repoarch,[]).append(rpminfo) + #generate comps and groups.spec + groupsdir = "%s/groups" % (repodir) + koji.ensuredir(groupsdir) + comps = koji.generate_comps(groups) + fo = file("%s/comps.xml" % groupsdir,'w') + fo.write(comps) + fo.close() + spec = koji.make_groups_spec(groups, name='buildsys-build', buildgroup='build') + fn = "%s/groups.spec" % groupsdir + fo = file(fn, 'w') + fo.write(spec) + fo.close() + #link packages + for arch in packages.iterkeys(): + if arch in ['src','noarch']: + continue + # src and noarch special-cased -- see below + rpmdir = "%s/%s/RPMS" % (repodir,arch) + srpmdir = "%s/%s/SRPMS" % (repodir,arch) + koji.ensuredir(rpmdir) + koji.ensuredir(srpmdir) + logger.info("Linking %d packages for %s" % (len(packages[arch]),arch)) + for rpminfo in packages[arch]: + filename = os.path.basename(rpminfo['path']) + os.link(rpminfo['path'], "%s/%s" %(rpmdir,filename)) + #noarch packages + for rpminfo in packages.get('noarch',[]): + filename = os.path.basename(rpminfo['path']) + os.link(rpminfo['path'], "%s/%s" %(rpmdir,filename)) + # srpms + if with_src: + for rpminfo in packages.get('src',[]): + filename = os.path.basename(rpminfo['path']) + os.link(rpminfo['path'], "%s/%s" %(srpmdir,filename)) + # comps + logger.info("Linking comps for %s" % arch) + os.link("%s/comps.xml" % groupsdir,"%s/%s/comps.xml" % (repodir,arch)) + #groups rpm linked in a later call (hasn't been generated yet) + return [repo_id, event_id] + +def repo_set_state(repo_id, state, check=True): + """Set repo state""" + if check: + # The repo states are sequential, going backwards makes no sense + q = """SELECT state FROM repo WHERE id = %(repo_id)s FOR UPDATE""" + oldstate = _singleValue(q,locals()) + if oldstate > state: + raise koji.GenericError, "Invalid repo state transition %s->%s" \ + % (oldstate,state) + q = """UPDATE repo SET state=%(state)s WHERE id = %(repo_id)s""" + _dml(q,locals()) + +def repo_info(repo_id, strict=False): + fields = ( + ('repo.id', 'id'), + ('repo.state', 'state'), + ('repo.create_event', 'create_event'), + ('EXTRACT(EPOCH FROM events.time)','create_ts'), + ('repo.tag_id', 'tag_id'), + ('tag.name', 'tag_name'), + ) + q = """SELECT %s FROM repo + JOIN tag ON tag_id=tag.id + JOIN events ON repo.create_event = events.id + WHERE repo.id = %%(repo_id)s""" % ','.join([f[0] for f in fields]) + return _singleRow(q, locals(), [f[1] for f in fields], strict=strict) + +def repo_ready(repo_id): + """Set repo state to ready""" + repo_set_state(repo_id,koji.REPO_READY) + +def repo_expire(repo_id): + """Set repo state to expired""" + repo_set_state(repo_id,koji.REPO_EXPIRED) + +def repo_problem(repo_id): + """Set repo state to problem""" + repo_set_state(repo_id,koji.REPO_PROBLEM) + +def repo_delete(repo_id): + """Attempt to mark repo deleted, return number of references + + If the number of references is nonzero, no change is made""" + #get a row lock on the repo + q = """SELECT state FROM repo WHERE id = %(repo_id)s FOR UPDATE""" + _singleValue(q,locals()) + references = repo_references(repo_id) + if not references: + repo_set_state(repo_id,koji.REPO_DELETED) + return len(references) + +def repo_expire_older(tag_id, event_id): + """Expire repos for tag older than event""" + st_ready = koji.REPO_READY + st_expired = koji.REPO_EXPIRED + q = """UPDATE repo SET state=%(st_expired)i + WHERE tag_id = %(tag_id)i + AND create_event < %(event_id)i + AND state = %(st_ready)i""" + _dml(q, locals()) + +def repo_references(repo_id): + """Return a list of buildroots that reference the repo""" + fields = ('id', 'host_id', 'create_event', 'state') + q = """SELECT %s FROM buildroot WHERE repo_id=%%(repo_id)s + AND retire_event IS NULL""" % ','.join(fields) + #check results for bad states + ret = [] + for data in _multiRow(q, locals(), fields): + if data['state'] == koji.BR_STATES['EXPIRED']: + log_error("Error: buildroot %(id)s expired, but has no retire_event" % data) + continue + ret.append(data) + return ret + +def get_active_repos(): + """Get data on all active repos + + This is a list of all the repos that the repo daemon needs to worry about. + """ + fields = ( + ('repo.id', 'id'), + ('repo.state', 'state'), + ('repo.create_event', 'create_event'), + ('EXTRACT(EPOCH FROM events.time)','create_ts'), + ('repo.tag_id', 'tag_id'), + ('tag.name', 'tag_name'), + ) + st_deleted = koji.REPO_DELETED + q = """SELECT %s FROM repo + JOIN tag ON tag_id=tag.id + JOIN events ON repo.create_event = events.id + WHERE repo.state != %%(st_deleted)s""" % ','.join([f[0] for f in fields]) + return _multiRow(q, locals(), [f[1] for f in fields]) + +def tag_changed_since_event(event,taglist): + """Report whether any changes since event affect any of the tags in list + + The function is used by the repo daemon to determine which of its repos + are up to date. + + This function does not figure inheritance, the calling function should + expand the taglist to include any desired inheritance. + + Returns: True or False + """ + c = context.cnx.cursor() + tables = ( + 'tag_listing', + 'tag_inheritance', + 'tag_config', + 'tag_packages', + 'group_package_listing', + 'group_req_listing', + 'group_config', + ) + ret = {} + for table in tables: + q = """SELECT tag_id FROM %(table)s + WHERE create_event > %%(event)s OR revoke_event > %%(event)s + """ % locals() + c.execute(q,locals()) + for (tag_id,) in c.fetchall(): + if tag_id in taglist: + return True + return False + +def create_build_target(name, build_tag, dest_tag): + """Create a new build target""" + + context.session.assertPerm('admin') + + # Does a target with this name already exist? + if get_build_targets(info=name): + raise koji.GenericError("A build target with the name '%s' already exists" % name) + + # Does the build tag exist? + build_tag_object = get_tag(build_tag) + if not build_tag_object: + raise koji.GenericError("build tag '%s' does not exist" % build_tag) + build_tag = build_tag_object['id'] + + # Does the dest tag exist? + dest_tag_object = get_tag(dest_tag) + if not dest_tag_object: + raise koji.GenericError("destination tag '%s' does not exist" % dest_tag) + dest_tag = dest_tag_object['id'] + + #build targets are versioned, so if the target has previously been deleted, it + #is possible the name is in the system + id = get_build_target_id(name,create=True) + + insert = """INSERT into build_target_config (build_target_id, build_tag, dest_tag) + VALUES (%(id)d, %(build_tag)d, %(dest_tag)d)""" + + _dml(insert, locals()) + +def edit_build_target(buildTargetInfo, name, build_tag, dest_tag): + """Set the build_tag and dest_tag of an existing build_target to new values""" + context.session.assertPerm('admin') + + target = lookup_build_target(buildTargetInfo) + if not target: + raise koji.GenericError, 'invalid build target: %s' % buildTargetInfo + + buildTargetID = target['id'] + + build_tag_object = get_tag(build_tag) + if not build_tag_object: + raise koji.GenericError, "build tag '%s' does not exist" % build_tag + buildTagID = build_tag_object['id'] + + dest_tag_object = get_tag(dest_tag) + if not dest_tag_object: + raise koji.GenericError, "destination tag '%s' does not exist" % dest_tag + destTagID = dest_tag_object['id'] + + if target['name'] != name: + # Allow renaming, for parity with tags + id = _singleValue("""SELECT id from build_target where name = %(name)s""", + locals(), strict=False) + if id is not None: + raise koji.GenericError, 'name "%s" is already taken by build target %i' % (name, id) + + rename = """UPDATE build_target + SET name = %(name)s + WHERE id = %(buildTargetID)i""" + + _dml(rename, locals()) + + eventID = _singleValue("SELECT get_event()") + + update = """UPDATE build_target_config + SET active = NULL, + revoke_event = %(eventID)i + WHERE build_target_id = %(buildTargetID)i + AND active is true + """ + + insert = """INSERT INTO build_target_config + (build_target_id, build_tag, dest_tag, create_event) + VALUES + (%(buildTargetID)i, %(buildTagID)i, %(destTagID)i, %(eventID)i) + """ + + _dml(update, locals()) + _dml(insert, locals()) + +def delete_build_target(buildTargetInfo): + """Delete the build target with the given name. If no build target + exists, raise a GenericError.""" + context.session.assertPerm('admin') + + target = lookup_build_target(buildTargetInfo) + if not target: + raise koji.GenericError, 'invalid build target: %s' % buildTargetInfo + + targetID = target['id'] + + #build targets are versioned, so we do not delete them from the db + #instead we revoke the config entry + delConfig = """UPDATE build_target_config + SET active=NULL,revoke_event=get_event() + WHERE build_target_id = %(targetID)i + """ + + _dml(delConfig, locals()) + +def get_build_targets(info=None, event=None, buildTagID=None, destTagID=None, queryOpts=None): + """Return data on all the build targets + + provide event to query at a different time""" + fields = ( + ('build_target.id', 'id'), + ('build_tag', 'build_tag'), + ('dest_tag', 'dest_tag'), + ('build_target.name', 'name'), + ('tag1.name', 'build_tag_name'), + ('tag2.name', 'dest_tag_name'), + ) + joins = ['build_target ON build_target_config.build_target_id = build_target.id', + 'tag AS tag1 ON build_target_config.build_tag = tag1.id', + 'tag AS tag2 ON build_target_config.dest_tag = tag2.id'] + clauses = [eventCondition(event)] + + if info: + if isinstance(info, str): + clauses.append('build_target.name = %(info)s') + elif isinstance(info, int) or isinstance(info, long): + clauses.append('build_target.id = %(info)i') + else: + raise koji.GenericError, 'invalid type for lookup: %s' % type(info) + if buildTagID != None: + clauses.append('build_tag = %(buildTagID)i') + if destTagID != None: + clauses.append('dest_tag = %(destTagID)i') + + query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields], + tables=['build_target_config'], joins=joins, clauses=clauses, + values=locals(), opts=queryOpts) + return query.execute() + +def lookup_name(table,info,strict=False,create=False): + """Find the id and name in the table associated with info. + + Info can be the name to look up, or if create is false it can + be the id. + + Return value is a dict with keys id and name, or None + If there is no match, then the behavior depends on the options. If strict, + then an error is raised. If create, then the required entry is created and + returned. + + table should be the name of a table with (unique) fields + id INTEGER + name TEXT + Any other fields should have default values, otherwise the + create option will fail. + """ + fields = ('id','name') + if isinstance(info, int) or isinstance(info, long): + q="""SELECT id,name FROM %s WHERE id=%%(info)d""" % table + elif isinstance(info, str): + q="""SELECT id,name FROM %s WHERE name=%%(info)s""" % table + else: + raise koji.GenericError, 'invalid type for id lookup: %s' % type(info) + ret = _singleRow(q,locals(),fields,strict=False) + if ret is None: + if strict: + raise koji.GenericError, 'No such entry in table %s: %s' % (table, info) + elif create: + if not isinstance(info, str): + raise koji.GenericError, 'Name must be a string' + id = _singleValue("SELECT nextval('%s_id_seq')" % table, strict=True) + q = """INSERT INTO %s(id,name) VALUES (%%(id)i,%%(info)s)""" % table + _dml(q,locals()) + return {'id': id, 'name': info} + else: + return ret + return ret + +def get_id(table,info,strict=False,create=False): + """Find the id in the table associated with info.""" + data = lookup_name(table,info,strict,create) + if data is None: + return data + else: + return data['id'] + +def get_tag_id(info,strict=False,create=False): + """Get the id for tag""" + return get_id('tag',info,strict,create) + +def lookup_tag(info,strict=False,create=False): + """Get the id,name for tag""" + return lookup_name('tag',info,strict,create) + +def get_perm_id(info,strict=False,create=False): + """Get the id for a permission""" + return get_id('permissions',info,strict,create) + +def lookup_perm(info,strict=False,create=False): + """Get the id,name for perm""" + return lookup_name('permissions',info,strict,create) + +def get_package_id(info,strict=False,create=False): + """Get the id for a package""" + return get_id('package',info,strict,create) + +def lookup_package(info,strict=False,create=False): + """Get the id,name for package""" + return lookup_name('package',info,strict,create) + +def get_channel_id(info,strict=False,create=False): + """Get the id for a channel""" + return get_id('channels',info,strict,create) + +def lookup_channel(info,strict=False,create=False): + """Get the id,name for channel""" + return lookup_name('channels',info,strict,create) + +def get_group_id(info,strict=False,create=False): + """Get the id for a group""" + return get_id('groups',info,strict,create) + +def lookup_group(info,strict=False,create=False): + """Get the id,name for group""" + return lookup_name('groups',info,strict,create) + +def get_build_target_id(info,strict=False,create=False): + """Get the id for a build target""" + return get_id('build_target',info,strict,create) + +def lookup_build_target(info,strict=False,create=False): + """Get the id,name for build target""" + return lookup_name('build_target',info,strict,create) + +def create_tag(name, parent=None, arches=None, perm=None, locked=False): + """Create a new tag""" + + context.session.assertPerm('admin') + + #see if there is already a tag by this name (active) + if get_tag(name): + raise koji.GenericError("A tag with the name '%s' already exists" % name) + + # Does the parent exist? + if parent: + parent_tag = get_tag(parent) + parent_id = parent_tag['id'] + if not parent_tag: + raise koji.GenericError("Parent tag '%s' could not be found" % parent) + else: + parent_id = None + + #there may already be an id for a deleted tag, this will reuse it + tag_id = get_tag_id(name,create=True) + + c=context.cnx.cursor() + + q = """INSERT INTO tag_config (tag_id,arches,perm_id,locked) + VALUES (%(tag_id)i,%(arches)s,%(perm)s,%(locked)s)""" + context.commit_pending = True + c.execute(q,locals()) + + if parent_id: + data = {'parent_id': parent_id, + 'priority': 0, + 'maxdepth': None, + 'intransitive': False, + 'noconfig': False, + 'pkg_filter': ''} + writeInheritanceData(get_tag(name)['id'],data) + +def get_tag(tagInfo,strict=False): + """Get tag information based on the tagInfo. tagInfo may be either + a string (the tag name) or an int (the tag ID). + Returns a map containing the following keys: + + - id + - name + - perm_id (may be null) + - arches (may be null) + - locked (may be null) + + If there is no tag matching the given tagInfo, and strict is False, + return None. If strict is True, raise a GenericError. + + Note that in order for a tag to 'exist', it must have an active entry + in tag_config. A tag whose name appears in the tag table but has no + active tag_config entry is considered deleted. + """ + fields = ('id', 'name', 'perm_id', 'arches', 'locked') + q = """SELECT %s FROM tag_config + JOIN tag ON tag_config.tag_id = tag.id + WHERE tag_config.active = TRUE + AND """ % ', '.join(fields) + if isinstance(tagInfo, int): + q += """tag.id = %(tagInfo)i""" + elif isinstance(tagInfo, str): + q += """tag.name = %(tagInfo)s""" + else: + raise koji.GenericError, 'invalid type for tagInfo: %s' % type(tagInfo) + result = _singleRow(q,locals(),fields) + if not result: + if strict: + raise koji.GenericError, "Invalid tagInfo: %r" % tagInfo + return None + return result + +def edit_tag(tagInfo, **kwargs): + """Edit information for an existing tag. + + tagInfo specifies the tag to edit + fields changes are provided as keyword arguments: + name: rename the tag + arches: change the arch list + locked: lock or unlock the tag + perm: change the permission requirement + """ + + context.session.assertPerm('admin') + + tag = get_tag(tagInfo, strict=True) + if kwargs.has_key('perm'): + if kwargs['perm'] is None: + kwargs['perm_id'] = None + else: + kwargs['perm_id'] = get_perm_id(kwargs['perm'],strict=True) + + name = kwargs.get('name') + if name and tag['name'] != name: + #attempt to update tag name + #XXX - I'm not sure we should allow this sort of renaming anyway. + # while I can see the convenience, it is an untracked change (granted + # a cosmetic one). The more versioning-friendly way would be to create + # a new tag with duplicate data and revoke the old tag. This is more + # of a pain of course :-/ -mikem + values = { + 'name': name, + 'tagID': tag['id'] + } + q = """SELECT id FROM tag WHERE name=%(name)s""" + id = _singleValue(q,values,strict=False) + if id is not None: + #new name is taken + raise koji.GenericError, "Name %s already taken by tag %s" % (name,id) + update = """UPDATE tag + SET name = %(name)s + WHERE id = %(tagID)i""" + _dml(update, values) + + #check for changes + data = tag.copy() + changed = False + for key in ('perm_id','arches','locked'): + if kwargs.has_key(key) and data[key] != kwargs[key]: + changed = True + data[key] = kwargs[key] + if not changed: + return + + #use the same event for both + data['event_id'] = _singleValue("SELECT get_event()") + + update = """UPDATE tag_config + SET active = null, + revoke_event = %(event_id)i + WHERE tag_id = %(id)i + AND active is true""" + _dml(update, data) + + insert = """INSERT INTO tag_config + (tag_id, arches, perm_id, locked, create_event) + VALUES + (%(id)i, %(arches)s, %(perm_id)s, %(locked)s, %(event_id)i)""" + _dml(insert, data) + +def old_edit_tag(tagInfo, name, arches, locked, permissionID): + """Edit information for an existing tag.""" + return edit_tag(tagInfo, name=name, arches=arches, locked=locked, + perm_id=permissionID) + + +def delete_tag(tagInfo): + """Delete the specified tag.""" + + context.session.assertPerm('admin') + + #We do not ever DELETE tag data. It is versioned -- we revoke it instead. + + def _tagDelete(tableName, value, event, columnName='tag_id'): + delete = """UPDATE %(tableName)s SET active=NULL,revoke_event=%%(event)i + WHERE %(columnName)s = %%(value)i AND active = TRUE""" % locals() + _dml(delete, locals()) + + tag = get_tag(tagInfo) + tagID = tag['id'] + #all these updates are a single transaction, so we use the same event + eventID = _singleValue("SELECT get_event()") + + _tagDelete('tag_config', tagID, eventID) + #technically, to 'delete' the tag we only have to revoke the tag_config entry + #these remaining revocations are more for cleanup. + _tagDelete('tag_inheritance', tagID, eventID) + _tagDelete('tag_inheritance', tagID, eventID, 'parent_id') + _tagDelete('build_target_config', tagID, eventID, 'build_tag') + _tagDelete('build_target_config', tagID, eventID, 'dest_tag') + _tagDelete('tag_listing', tagID, eventID) + _tagDelete('tag_packages', tagID, eventID) + _tagDelete('group_config', tagID, eventID) + _tagDelete('group_req_listing', tagID, eventID) + _tagDelete('group_package_listing', tagID, eventID) + # note: we do not delete the entry in the tag table (we can't actually, it + # is still referenced by the revoked rows). + # note: there is no need to do anything with the repo entries that reference tagID + +def get_user(userInfo=None,strict=False): + """Return information about a user. userInfo may be either a str + (Kerberos principal) or an int (user id). A map will be returned with the + following keys: + id: user id + name: user name + status: user status (int), may be null + usertype: user type (int), 0 person, 1 for host, may be null + krb_principal: the user's Kerberos principal""" + if userInfo is None: + userInfo = context.session.user_id + #will still be None if not logged in + fields = ('id', 'name', 'status', 'usertype', 'krb_principal') + q = """SELECT %s FROM users WHERE""" % ', '.join(fields) + if isinstance(userInfo, int) or isinstance(userInfo, long): + q += """ id = %(userInfo)i""" + elif isinstance(userInfo, str): + q += """ (krb_principal = %(userInfo)s or name = %(userInfo)s)""" + else: + raise koji.GenericError, 'invalid type for userInfo: %s' % type(userInfo) + return _singleRow(q,locals(),fields,strict=strict) + +def find_build_id(X): + if isinstance(X,int) or isinstance(X,long): + return X + elif isinstance(X,str): + data = koji.parse_NVR(X) + elif isinstance(X,dict): + data = X + else: + raise koji.GenericError, "Invalid argument: %r" % X + + if not (data.has_key('name') and data.has_key('version') and + data.has_key('release')): + raise koji.GenericError, 'did not provide name, version, and release' + + c=context.cnx.cursor() + q="""SELECT build.id FROM build JOIN package ON build.pkg_id=package.id + WHERE package.name=%(name)s AND build.version=%(version)s + AND build.release=%(release)s + """ + # contraints should ensure this is unique + #log_error(koji.db._quoteparams(q,data)) + c.execute(q,data) + r=c.fetchone() + #log_error("%r" % r ) + if not r: + return None + return r[0] + +def get_build(buildInfo, strict=False): + """Return information about a build. buildID may be either + a int ID, a string NVR, or a map containing 'name', 'version' + and 'release. A map will be returned containing the following + keys: + id: build ID + package_id: ID of the package built + package_name: name of the package built + version + release + epoch + state + task_id: ID of the task that kicked off the build + owner_id: ID of the user who kicked off the build + owner_name: name of the user who kicked off the build + creation_event_id: id of the create_event + creation_time: time the build was created + completion_time: time the build was completed (may be null) + + If there is no build matching the buildInfo given, and strict is specified, + raise an error. Otherwise return None. + """ + buildID = find_build_id(buildInfo) + if buildID == None: + if strict: + raise koji.GenericError, 'No matching build found: %s' % buildInfo + else: + return None + + fields = (('build.id', 'id'), ('build.version', 'version'), ('build.release', 'release'), + ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'), + ('build.task_id', 'task_id'), ('events.id', 'creation_event_id'), ('events.time', 'creation_time'), + ('package.id', 'package_id'), ('package.name', 'package_name'), + ('users.id', 'owner_id'), ('users.name', 'owner_name')) + query = """SELECT %s + FROM build + JOIN events ON build.create_event = events.id + JOIN package on build.pkg_id = package.id + JOIN users on build.owner = users.id + WHERE build.id = %%(buildID)i""" % ', '.join([pair[0] for pair in fields]) + + c = context.cnx.cursor() + c.execute(query, locals()) + result = c.fetchone() + + if not result: + if strict: + raise koji.GenericError, 'No matching build found: %s' % buildInfo + else: + return None + else: + ret = dict(zip([pair[1] for pair in fields], result)) + ret['name'] = ret['package_name'] + return ret + +def get_rpm(rpminfo,strict=False): + """Get information about the specified RPM + + rpminfo may be any one of the following: + - a int ID + - a string N-V-R.A + - a map containing 'name', 'version', 'release', and 'arch' + + A map will be returned, with the following keys: + - id + - name + - version + - release + - arch + - epoch + - payloadhash + - size + - buildtime + - build_id + - buildroot_id + + If there is no RPM with the given ID, None is returned, unless strict + is True in which case an exception is raised + """ + fields = ('id', 'name', 'version', 'release', 'arch', 'epoch', + 'payloadhash', 'size', 'buildtime', 'build_id', 'buildroot_id') + # we can look up by id or NVRA + data = None + if isinstance(rpminfo,(int,long)): + data = {'id': rpminfo} + elif isinstance(rpminfo,str): + data = koji.parse_NVRA(rpminfo) + elif isinstance(rpminfo,dict): + data = rpminfo.copy() + else: + raise koji.GenericError, "Invalid argument: %r" % rpminfo + q = """SELECT %s FROM rpminfo """ % ','.join(fields) + if data.has_key('id'): + q += """WHERE id=%(id)s""" + else: + q += """WHERE name=%(name)s AND version=%(version)s + AND release=%(release)s AND arch=%(arch)s""" + c = context.cnx.cursor() + c.execute(q, data) + row = c.fetchone() + if not row: + if strict: + raise koji.GenericError, "No such rpm: %r" % data + return None + return dict(zip(fields,row)) + +def _fetchMulti(query, values): + """Run the query and return all rows""" + c = context.cnx.cursor() + c.execute(query, values) + results = c.fetchall() + c.close() + return results + +def _fetchSingle(query, values, strict=False): + """Run the query and return a single row + + If strict is true, raise an error if the query returns more or less than + one row.""" + results = _fetchMulti(query, values) + numRows = len(results) + if numRows == 0: + if strict: + raise koji.GenericError, 'query returned no rows' + else: + return None + elif strict and numRows > 1: + raise koji.GenericError, 'multiple rows returned for a single row query' + else: + return results[0] + +def _multiRow(query, values, fields): + """Return all rows from "query". Named query parameters + can be specified using the "values" map. Results will be returned + as a list of maps. Each map in the list will have a key for each + element in the "fields" list. If there are no results, an empty + list will be returned.""" + return [dict(zip(fields, row)) for row in _fetchMulti(query, values)] + +def _singleRow(query, values, fields, strict=False): + """Return a single row from "query". Named parameters can be + specified using the "values" map. The result will be returned as + as map. The map will have a key for each element in the "fields" + list. If more than one row is returned and "strict" is true, a + GenericError will be raised. If no rows are returned, and "strict" + is True, a GenericError will be raised. Otherwise None will be + returned.""" + row = _fetchSingle(query, values, strict) + if row: + return dict(zip(fields, row)) + else: + #strict enforced by _fetchSingle + return None + +def _singleValue(query, values=None, strict=True): + """Perform a query that returns a single value. + + Note that unless strict is True a return value of None could mean either + a single NULL value or zero rows returned.""" + if values is None: + values = {} + row = _fetchSingle(query, values, strict) + if row: + if strict and len(row) > 1: + raise koji.GenericError, 'multiple fields returned for a single value query' + return row[0] + else: + # don't need to check strict here, since that was already handled by _singleRow() + return None + +def _dml(operation, values): + """Run an insert, update, or delete. Return number of rows affected""" + c = context.cnx.cursor() + c.execute(operation, values) + ret = c.rowcount + c.close() + context.commit_pending = True + return ret + +def get_host(hostInfo, strict=False): + """Get information about the given host. hostInfo may be + either a string (hostname) or int (host id). A map will be returned + containign the following data: + + - id + - user_id + - name + - arches + - task_load + - capacity + - ready + - enabled + """ + fields = ('id', 'user_id', 'name', 'arches', 'task_load', + 'capacity', 'ready', 'enabled') + query = """SELECT %s FROM host + WHERE """ % ', '.join(fields) + if isinstance(hostInfo, int) or isinstance(hostInfo, long): + query += """id = %(hostInfo)i""" + elif isinstance(hostInfo, str): + query += """name = %(hostInfo)s""" + else: + raise koji.GenericError, 'invalid type for hostInfo: %s' % type(hostInfo) + + return _singleRow(query, locals(), fields, strict) + +def get_channel(channelInfo, strict=False): + """Return information about a channel.""" + fields = ('id', 'name') + query = """SELECT %s FROM channels + WHERE """ % ', '.join(fields) + if isinstance(channelInfo, int) or isinstance(channelInfo, long): + query += """id = %(channelInfo)i""" + elif isinstance(channelInfo, str): + query += """name = %(channelInfo)s""" + else: + raise koji.GenericError, 'invalid type for channelInfo: %s' % type(channelInfo) + + return _singleRow(query, locals(), fields, strict) + + +def query_buildroots(hostID=None, tagID=None, state=None, rpmID=None, taskID=None, buildrootID=None): + """Return a list of matching buildroots + + Optional args: + hostID - only buildroots on host. + tagID - only buildroots for tag. + state - only buildroots in state (may be a list) + rpmID - only the buildroot the specified rpm was built in + taskID - only buildroots associated with task. + """ + fields = [('buildroot.id', 'id'), ('buildroot.arch', 'arch'), ('buildroot.state', 'state'), + ('buildroot.dirtyness', 'dirtyness'), ('buildroot.task_id', 'task_id'), + ('host.id', 'host_id'), ('host.name', 'host_name'), + ('repo.id', 'repo_id'), ('repo.state', 'repo_state'), + ('tag.id', 'tag_id'), ('tag.name', 'tag_name'), + ('create_events.id', 'create_event_id'), ('create_events.time', 'create_event_time'), + ('EXTRACT(EPOCH FROM create_events.time)','create_ts'), + ('retire_events.id', 'retire_event_id'), ('retire_events.time', 'retire_event_time'), + ('EXTRACT(EPOCH FROM retire_events.time)','retire_ts'), + ('repo_create.id', 'repo_create_event_id'), ('repo_create.time', 'repo_create_event_time')] + + query = """SELECT %s FROM buildroot + JOIN host ON host.id = buildroot.host_id + JOIN repo ON repo.id = buildroot.repo_id + JOIN tag ON tag.id = repo.tag_id + JOIN events AS create_events ON create_events.id = buildroot.create_event + LEFT OUTER JOIN events AS retire_events ON buildroot.retire_event = retire_events.id + JOIN events AS repo_create ON repo_create.id = repo.create_event + """ + + clauses = [] + if buildrootID != None: + if isinstance(buildrootID, list) or isinstance(buildrootID, tuple): + clauses.append('buildroot.id IN %(buildrootID)s') + else: + clauses.append('buildroot.id = %(buildrootID)i') + if hostID != None: + clauses.append('host.id = %(hostID)i') + if tagID != None: + clauses.append('tag.id = %(tagID)i') + if state != None: + if isinstance(state, list) or isinstance(state, tuple): + clauses.append('buildroot.state IN %(state)s') + else: + clauses.append('buildroot.state = %(state)i') + if rpmID != None: + query += """JOIN buildroot_listing ON buildroot.id = buildroot_listing.buildroot_id + """ + fields.append(('buildroot_listing.is_update', 'is_update')) + clauses.append('buildroot_listing.rpm_id = %(rpmID)i') + if taskID != None: + clauses.append('buildroot.task_id = %(taskID)i') + + query = query % ', '.join([pair[0] for pair in fields]) + + if len(clauses) > 0: + query += 'WHERE ' + ' AND '.join(clauses) + + return _multiRow(query, locals(), [pair[1] for pair in fields]) + + +def get_buildroot(buildrootID, strict=False): + """Return information about a buildroot. buildrootID must be an int ID.""" + + result = query_buildroots(buildrootID=buildrootID) + if len(result) == 0: + if strict: + raise koji.GenericError, "No such buildroot: %r" % buildrootID + else: + return None + if len(result) > 1: + #this should be impossible + raise koji.GenericError, "More that one buildroot with id: %i" % buildrootID + return result[0] + +def list_channels(hostID=None): + """List channels. If hostID is specified, only list + channels associated with the host with that ID.""" + fields = ('id', 'name') + query = """SELECT %s FROM channels + """ % ', '.join(fields) + if hostID != None: + query += """JOIN host_channels ON channels.id = host_channels.channel_id + WHERE host_channels.host_id = %(hostID)i""" + return _multiRow(query, locals(), fields) + +def get_changelog_entries(buildID, author=None, before=None, after=None, queryOpts=None): + """Get changelog entries for the build with the given ID. + + - author: only return changelogs with a matching author + - before: only return changelogs from before the given date + (a datetime object or a string in the 'YYYY-MM-DD HH24:MI:SS format) + - after: only return changelogs from after the given date + (a datetime object or a string in the 'YYYY-MM-DD HH24:MI:SS format) + - opts: query options used by the QueryProcessor + + If "order" is not specified in opts, results will be returned in reverse chronological + order. + + Results will be returned as a list of maps with 'date', 'author', and 'text' keys. + If there are no results, an empty list will be returned. + """ + fields = ('date', 'author', 'text') + + if not queryOpts: + queryOpts = {} + if not queryOpts.has_key('order'): + queryOpts['order'] = '-date' + + clauses = ['changelogs.build_id = %(buildID)i'] + if author: + clauses.append('changelogs.author = %(author)s') + if before: + if isinstance(before, datetime.datetime): + before = str(before) + clauses.append('changelogs.date < %(before)s') + if after: + if isinstance(after, datetime.datetime): + after = str(after) + clauses.append('changelogs.date > %(after)s') + + query = QueryProcessor(columns=fields, tables=('changelogs',), + clauses=clauses, values=locals(), opts=queryOpts) + return query.execute() + +def new_package(name,strict=True): + c = context.cnx.cursor() + # TODO - table lock? + # check for existing + q = """SELECT id FROM package WHERE name=%(name)s""" + c.execute(q,locals()) + row = c.fetchone() + if row: + (pkg_id,) = row + if strict: + raise koji.GenericError, "Package already exists [id %d]" % pkg_id + else: + q = """SELECT nextval('package_id_seq')""" + c.execute(q) + (pkg_id,) = c.fetchone() + q = """INSERT INTO package (id,name) VALUES (%(pkg_id)s,%(name)s)""" + context.commit_pending = True + c.execute(q,locals()) + return pkg_id + +def new_build(data): + """insert a new build entry""" + data = data.copy() + if not data.has_key('pkg_id'): + #see if there's a package name + name = data.get('name') + if not name: + raise koji.GenericError, "No name or package id provided for build" + data['pkg_id'] = new_package(name,strict=False) + for f in ('version','release','epoch'): + if not data.has_key(f): + raise koji.GenericError, "No %s value for build" % f + #provide a few default values + data.setdefault('state',koji.BUILD_STATES['COMPLETE']) + data.setdefault('completion_time', 'NOW') + data.setdefault('owner',context.session.user_id) + data.setdefault('task_id',None) + #check for existing build + # TODO - table lock? + q="""SELECT id,state,task_id FROM build + WHERE pkg_id=%(pkg_id)d AND version=%(version)s AND release=%(release)s + FOR UPDATE""" + row = _fetchSingle(q, data) + if row: + id, state, task_id = row + st_desc = koji.BUILD_STATES[state] + if st_desc == 'BUILDING': + # check to see if this is the controlling task + if data['state'] == state and data.get('task_id','') == task_id: + #the controlling task must have restarted (and called initBuild again) + return id + raise koji.GenericError, "Build already in progress (task %d)" % task_id + # TODO? - reclaim 'stale' builds (state=BUILDING and task_id inactive) + if st_desc in ('FAILED','CANCELED'): + #should be ok to replace + update = """UPDATE build SET state=%(state)i,task_id=%(task_id)s, + owner=%(owner)s,completion_time=%(completion_time)s,create_event=get_event() + WHERE id = %(id)i""" + data['id'] = id + _dml(update, data) + # delete any now-obsolete changelogs + delete = """DELETE FROM changelogs WHERE build_id=%(id)i""" + _dml(delete, data) + return id + raise koji.GenericError, "Build already exists (id=%d, state=%s): %r" \ + % (id, st_desc, data) + #insert the new data + q=""" + INSERT INTO build (pkg_id,version,release,epoch,state, + task_id,owner,completion_time) + VALUES (%(pkg_id)s,%(version)s,%(release)s,%(epoch)s, + %(state)s,%(task_id)s,%(owner)s,%(completion_time)s) + """ + _dml(q, data) + #return build_id + q="""SELECT currval('build_id_seq')""" + return _singleValue(q) + +def import_build(srpm, rpms, brmap=None, task_id=None, build_id=None, logs=None): + """Import a build into the database (single transaction) + + Files must be uploaded and specified with path relative to the workdir + Args: + srpm - relative path of srpm + rpms - list of rpms (relative paths) + brmap - dictionary mapping [s]rpms to buildroot ids + task_id - associate the build with a task + build_id - build is a finalization of existing entry + """ + if brmap is None: + brmap = {} + uploadpath = koji.pathinfo.work() + #verify files exist + for relpath in [srpm] + rpms: + fn = "%s/%s" % (uploadpath,relpath) + if not os.path.exists(fn): + raise koji.GenericError, "no such file: %s" % fn + + #verify buildroot ids from brmap + found = {} + for br_id in brmap.values(): + if found.has_key(br_id): + continue + found[br_id] = 1 + #this will raise an exception if the buildroot id is invalid + BuildRoot(br_id) + + #read srpm info + fn = "%s/%s" % (uploadpath,srpm) + build = koji.get_header_fields(fn,('name','version','release','epoch', + 'sourcepackage')) + if build['sourcepackage'] != 1: + raise koji.GenericError, "not a source package: %s" % fn + build['task_id'] = task_id + if build_id is None: + build_id = new_build(build) + else: + #build_id was passed in - sanity check + binfo = get_build(build_id) + for key in ('name','version','release','epoch','task_id'): + if build[key] != binfo[key]: + raise koji.GenericError, "Unable to complete build: %s mismatch (build: %s, rpm: %s)" % (key, binfo[key], build[key]) + if binfo['state'] != koji.BUILD_STATES['BUILDING']: + raise koji.GenericError, "Unable to complete build: state is %s" \ + % koji.BUILD_STATES[binfo['state']] + #update build state + st_complete = koji.BUILD_STATES['COMPLETE'] + update = """UPDATE build SET state=%(st_complete)i,completion_time=NOW() + WHERE id=%(build_id)i""" + _dml(update,locals()) + build['id'] = build_id + # now to handle the individual rpms + for relpath in [srpm] + rpms: + fn = "%s/%s" % (uploadpath,relpath) + rpminfo = import_rpm(fn,build,brmap.get(relpath)) + import_rpm_file(fn,build,rpminfo) + add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(fn)) + if logs: + for key, files in logs.iteritems(): + if not key: + key = None + for relpath in files: + fn = "%s/%s" % (uploadpath,relpath) + import_build_log(fn, build, subdir=key) + return build + +def import_rpm(fn,buildinfo=None,brootid=None): + """Import a single rpm into the database + + Designed to be called from import_build. + """ + if not os.path.exists(fn): + raise koji.GenericError, "no such file: %s" % fn + + #read rpm info + hdr = koji.get_rpm_header(fn) + rpminfo = koji.get_header_fields(hdr,['name','version','release','epoch', + 'sourcepackage','arch','buildtime','sourcerpm']) + if rpminfo['sourcepackage'] == 1: + rpminfo['arch'] = "src" + + #sanity check basename + basename = os.path.basename(fn) + expected = "%(name)s-%(version)s-%(release)s.%(arch)s.rpm" % rpminfo + if basename != expected: + raise koji.GenericError, "bad filename: %s (expected %s)" % (basename,expected) + + if buildinfo is None: + #figure it out for ourselves + if rpminfo['sourcepackage'] == 1: + buildinfo = rpminfo.copy() + buildinfo['id'] = new_build(rpminfo) + else: + #figure it out from sourcerpm string + buildinfo = get_build(koji.parse_NVRA(rpminfo['sourcerpm'])) + if buildinfo is None: + #XXX - handle case where package is not a source rpm + # and we still need to create a new build + raise koji.GenericError, 'No matching build' + state = koji.BUILD_STATES[buildinfo['state']] + if state in ('FAILED', 'CANCELED', 'DELETED'): + nvr = "%(name)s-%(version)s-%(release)s" % buildinfo + raise koji.GenericError, "Build is %s: %s" % (state, nvr) + else: + srpmname = "%(name)s-%(version)s-%(release)s.src.rpm" % buildinfo + #either the sourcerpm field should match the build, or the filename + #itself (for the srpm) + if rpminfo['sourcepackage'] != 1: + if rpminfo['sourcerpm'] != srpmname: + raise koji.GenericError, "srpm mismatch for %s: %s (expected %s)" \ + % (fn,rpminfo['sourcerpm'],srpmname) + elif basename != srpmname: + raise koji.GenericError, "srpm mismatch for %s: %s (expected %s)" \ + % (fn,basename,srpmname) + + #add rpminfo entry + rpminfo['build'] = buildinfo + rpminfo['build_id'] = buildinfo['id'] + rpminfo['size'] = os.path.getsize(fn) + rpminfo['payloadhash'] = koji.hex_string(hdr[rpm.RPMTAG_SIGMD5]) + rpminfo['brootid'] = brootid + q = """INSERT INTO rpminfo (name,version,release,epoch, + build_id,arch,buildtime,buildroot_id, + size,payloadhash) + VALUES (%(name)s,%(version)s,%(release)s,%(epoch)s, + %(build_id)s,%(arch)s,%(buildtime)s,%(brootid)s, + %(size)s,%(payloadhash)s) + """ + _dml(q, rpminfo) + + #get rpminfo id + rpminfo_id = _singleValue("""SELECT currval('rpminfo_id_seq')""") + + # - add rpmdeps entries + for type in ['REQUIRE','PROVIDE','CONFLICT','OBSOLETE']: + dep_type = getattr(koji, "DEP_" + type) + key_n = getattr(rpm, "RPMTAG_" + type + "NAME") + key_f = getattr(rpm, "RPMTAG_" + type + "FLAGS") + key_v = getattr(rpm, "RPMTAG_" + type + "VERSION") + for (dep_name,dep_flags,dep_version) in zip(hdr[key_n],hdr[key_f],hdr[key_v]): + #log_error("%r" %[dep_name,dep_flags,dep_version]) + q = """INSERT INTO rpmdeps (rpm_id,dep_name,dep_flags,dep_version,dep_type) + VALUES (%(rpminfo_id)d,%(dep_name)s,%(dep_flags)d,%(dep_version)s,%(dep_type)d) + """ + #log_error(koji.db._quoteparams(q,locals())) + _dml(q, locals()) + + # - add rpmfiles entries + for (filename,filesize,filemd5,fileflags) in \ + zip(hdr[rpm.RPMTAG_FILENAMES],hdr[rpm.RPMTAG_FILESIZES], + hdr[rpm.RPMTAG_FILEMD5S],hdr[rpm.RPMTAG_FILEFLAGS]): + q = """INSERT INTO rpmfiles (rpm_id,filename,filesize,filemd5,fileflags) + VALUES (%(rpminfo_id)d,%(filename)s,%(filesize)d,%(filemd5)s,%(fileflags)d) + """ + _dml(q, locals()) + + # - add changelog entries, if not already present + import_changelog(buildinfo, fn) + + rpminfo['id'] = rpminfo_id + return rpminfo + +def import_changelog(buildinfo, rpmfile, replace=False): + """Import the changelog from the given rpm into the build with the + given ID. If the build already has changelog info and replace is True, + the existing info is cleared and the changelog info from the rpm is imported. + If replace is False, nothing is done.""" + hdr = koji.get_rpm_header(rpmfile) + + build_id = buildinfo['id'] + + if len(get_changelog_entries(buildID=build_id)) != 0: + # the changelog for this build has already been imported + if replace: + delete = """DELETE FROM changelogs WHERE build_id=%(build_id)i""" + _dml(delete, locals()) + else: + return + + cltimelist = hdr['CHANGELOGTIME'] + # If there is exactly one changelog entry, CHANGELOGTIME is returned as + # an int, instead of a list. + if isinstance(cltimelist, int): + cltimelist = [cltimelist] + for cltime, clauthor, cltext in zip(cltimelist, hdr['CHANGELOGNAME'], + hdr['CHANGELOGTEXT']): + cltime = datetime.datetime.fromtimestamp(cltime).isoformat(' ') + # XXX FIXME! + # Use koji.fixEncoding() instead of koji._forceAscii() + # once the database is in UTF-8 + # clauthor = koji.fixEncoding(clauthor) + # cltext = koji.fixEncoding(cltext) + clauthor = koji._forceAscii(clauthor) + cltext = koji._forceAscii(cltext) + q = """INSERT INTO changelogs (build_id, date, author, text) VALUES + (%(build_id)d, %(cltime)s, %(clauthor)s, %(cltext)s) + """ + _dml(q, locals()) + +def import_build_log(fn, buildinfo, subdir=None): + """Move a logfile related to a build to the right place""" + logdir = koji.pathinfo.build_logs(buildinfo) + if subdir: + logdir = "%s/%s" % (logdir, subdir) + koji.ensuredir(logdir) + final_path = "%s/%s" % (logdir, os.path.basename(fn)) + if os.path.exists(final_path): + raise koji.GenericError("Error importing build log. %s already exists." % final_path) + if os.path.islink(fn) or not os.path.isfile(fn): + raise koji.GenericError("Error importing build log. %s is not a regular file." % fn) + os.rename(fn,final_path) + os.symlink(final_path,fn) + +def import_rpm_file(fn,buildinfo,rpminfo): + """Move the rpm file into the proper place + + Generally this is done after the db import + """ + final_path = "%s/%s" % (koji.pathinfo.build(buildinfo),koji.pathinfo.rpm(rpminfo)) + koji.ensuredir(os.path.dirname(final_path)) + if os.path.exists(final_path): + raise koji.GenericError("Error importing RPM file. %s already exists." % final_path) + if os.path.islink(fn) or not os.path.isfile(fn): + raise koji.GenericError("Error importing RPM file. %s is not a regular file." % fn) + os.rename(fn,final_path) + os.symlink(final_path,fn) + +def import_build_in_place(build): + """Import a package already in the packages directory + + This is used for bootstrapping the database + Parameters: + build: a dictionary with fields: name, version, release + """ + # Only an admin may do this + context.session.assertPerm('admin') + prev = get_build(build) + if prev is not None: + state = koji.BUILD_STATES[prev['state']] + if state == 'COMPLETE': + log_error("Skipping build %r, already in db" % build) + # TODO - check contents against db + return prev['id'] + elif state not in ('FAILED', 'CANCELED'): + raise koji.GenericError, "build already exists (%s): %r" % (state, build) + #otherwise try to reimport + bdir = koji.pathinfo.build(build) + srpm = None + rpms = [] + srpmname = "%(name)s-%(version)s-%(release)s.src.rpm" % build + # look for srpm first + srcdir = bdir + "/src" + if os.path.isdir(srcdir): + for basename in os.listdir(srcdir): + if basename != srpmname: + raise koji.GenericError, "unexpected file: %s" % basename + srpm = "%s/%s" % (srcdir,basename) + for arch in os.listdir(bdir): + if arch == 'src': + #already done that + continue + if arch == "data": + continue + adir = "%s/%s" % (bdir,arch) + if not os.path.isdir(adir): + raise koji.GenericError, "out of place file: %s" % adir + for basename in os.listdir(adir): + fn = "%s/%s" % (adir,basename) + if not os.path.isfile(fn): + raise koji.GenericError, "unexpected non-regular file: %s" % fn + if fn[-4:] != '.rpm': + raise koji.GenericError, "out of place file: %s" % adir + #check sourcerpm field + hdr = koji.get_rpm_header(fn) + sourcerpm = hdr[rpm.RPMTAG_SOURCERPM] + if sourcerpm != srpmname: + raise koji.GenericError, "srpm mismatch for %s: %s (expected %s)" \ + % (fn,sourcerpm,srpmname) + rpms.append(fn) + # actually import + buildinfo = None + if srpm is not None: + rpminfo = import_rpm(srpm) + add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(srpm)) + buildinfo = rpminfo['build'] + # file already in place + for fn in rpms: + rpminfo = import_rpm(fn,buildinfo) + add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(fn)) + #update build state + build_id = buildinfo['id'] + st_complete = koji.BUILD_STATES['COMPLETE'] + update = """UPDATE build SET state=%(st_complete)i,completion_time=NOW() + WHERE id=%(build_id)i""" + _dml(update,locals()) + return build_id + +def add_rpm_sig(an_rpm, sighdr): + """Store a signature header for an rpm""" + #calling function should perform permission checks, if applicable + rinfo = get_rpm(an_rpm, strict=True) + binfo = get_build(rinfo['build_id']) + builddir = koji.pathinfo.build(binfo) + if not os.path.isdir(builddir): + raise koji.GenericError, "No such directory: %s" % builddir + rawhdr = koji.RawHeader(sighdr) + sigmd5 = koji.hex_string(rawhdr.get(koji.RPM_SIGTAG_MD5)) + if sigmd5 == rinfo['payloadhash']: + # note: payloadhash is a misnomer, that field is populated with sigmd5. + sigkey = rawhdr.get(koji.RPM_SIGTAG_GPG) + else: + # In older rpms, this field in the signature header does not actually match + # sigmd5 (I think rpmlib pulls it from SIGTAG_GPG). Anyway, this + # sanity check fails incorrectly for those rpms, so we fall back to + # a somewhat more expensive check. + # ALSO, for these older rpms, the layout of SIGTAG_GPG is different too, so + # we need to pull that differently as well + rpm_path = "%s/%s" % (builddir, koji.pathinfo.rpm(rinfo)) + sigmd5, sigkey = _scan_sighdr(sighdr, rpm_path) + sigmd5 = koji.hex_string(sigmd5) + if sigmd5 != rinfo['payloadhash']: + nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rinfo + raise koji.GenericError, "wrong md5 for %s: %s" % (nvra, sigmd5) + if sigkey is None: + sigkey = '' + #we use the sigkey='' to represent unsigned in the db (so that uniqueness works) + else: + sigkey = koji.hex_string(sigkey[13:17]) + sighash = md5.new(sighdr).hexdigest() + rpm_id = rinfo['id'] + # - db entry + q = """SELECT sighash FROM rpmsigs WHERE rpm_id=%(rpm_id)i AND sigkey=%(sigkey)s""" + rows = _fetchMulti(q, locals()) + if rows: + #TODO[?] - if sighash is the same, handle more gracefully + nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rinfo + raise koji.GenericError, "Signature already exists for package %s, key %s" % (nvra, sigkey) + insert = """INSERT INTO rpmsigs(rpm_id, sigkey, sighash) + VALUES (%(rpm_id)s, %(sigkey)s, %(sighash)s)""" + _dml(insert, locals()) + # - write to fs + sigpath = "%s/%s" % (builddir, koji.pathinfo.sighdr(rinfo, sigkey)) + koji.ensuredir(os.path.dirname(sigpath)) + fo = file(sigpath, 'wb') + fo.write(sighdr) + fo.close() + +def _scan_sighdr(sighdr, fn): + """Splices sighdr with other headers from fn and queries (no payload)""" + # This is hackish, but it works + if not os.path.exists(fn): + raise koji.GenericError, "No such path: %s" % fn + if not os.path.isfile(fn): + raise koji.GenericError, "Not a regular file: %s" % fn + #XXX should probably add an option to splice_rpm_sighdr to handle this instead + sig_start, sigsize = koji.find_rpm_sighdr(fn) + hdr_start = sig_start + sigsize + hdrsize = koji.rpm_hdr_size(fn, hdr_start) + inp = file(fn, 'rb') + outp = tempfile.TemporaryFile(mode='w+b') + #before signature + outp.write(inp.read(sig_start)) + #signature + outp.write(sighdr) + inp.seek(sigsize, 1) + #main header + outp.write(inp.read(hdrsize)) + inp.close() + outp.seek(0,0) + ts = rpm.TransactionSet() + ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS) + #(we have no payload, so verifies would fail otherwise) + hdr = ts.hdrFromFdno(outp.fileno()) + outp.close() + return hdr[rpm.RPMTAG_SIGMD5], hdr[rpm.RPMTAG_SIGGPG] + +def check_rpm_sig(an_rpm, sigkey, sighdr): + #verify that the provided signature header matches the key and rpm + rinfo = get_rpm(an_rpm, strict=True) + binfo = get_build(rinfo['build_id']) + builddir = koji.pathinfo.build(binfo) + rpm_path = "%s/%s" % (builddir, koji.pathinfo.rpm(rinfo)) + if not os.path.exists(rpm_path): + raise koji.GenericError, "No such path: %s" % rpm_path + if not os.path.isfile(rpm_path): + raise koji.GenericError, "Not a regular file: %s" % rpm_path + fd, temp = tempfile.mkstemp() + os.close(fd) + try: + koji.splice_rpm_sighdr(sighdr, rpm_path, temp) + ts = rpm.TransactionSet() + ts.setVSFlags(0) #full verify + fo = file(temp, 'rb') + hdr = ts.hdrFromFdno(fo.fileno()) + fo.close() + except: + try: + os.unlink(temp) + except: + pass + raise + raw_key = hdr[rpm.RPMTAG_SIGGPG] + if raw_key is None: + found_key = None + else: + found_key = koji.hex_string(raw_key[13:17]) + if sigkey != found_key: + raise koji.GenericError, "Signature key mismatch: got %s, expected %s" \ + % (found_key, sigkey) + os.unlink(temp) + + + +def query_rpm_sigs(rpm_id=None, sigkey=None, queryOpts=None): + fields = ('rpm_id', 'sigkey', 'sighash') + clauses = [] + if rpm_id is not None: + clauses.append("rpm_id=%(rpm_id)s") + if sigkey is not None: + clauses.append("sigkey=%(sigkey)s") + query = QueryProcessor(columns=fields, tables=('rpmsigs',), clauses=clauses, + values=locals(), opts=queryOpts) + return query.execute() + +def write_signed_rpm(an_rpm, sigkey, force=False): + """Write a signed copy of the rpm""" + context.session.assertPerm('sign') + #XXX - still not sure if this is the right restriction + rinfo = get_rpm(an_rpm, strict=True) + binfo = get_build(rinfo['build_id']) + nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rinfo + builddir = koji.pathinfo.build(binfo) + rpm_path = "%s/%s" % (builddir, koji.pathinfo.rpm(rinfo)) + if not os.path.exists(rpm_path): + raise koji.GenericError, "No such path: %s" % rpm_path + if not os.path.isfile(rpm_path): + raise koji.GenericError, "Not a regular file: %s" % rpm_path + #make sure we have it in the db + rpm_id = rinfo['id'] + q = """SELECT sighash FROM rpmsigs WHERE rpm_id=%(rpm_id)i AND sigkey=%(sigkey)s""" + row = _fetchSingle(q, locals()) + if not row: + raise koji.GenericError, "No cached signature for package %s, key %s" % (nvra, sigkey) + (sighash,) = row + signedpath = "%s/%s" % (builddir, koji.pathinfo.signed(rinfo, sigkey)) + if os.path.exists(signedpath): + if not force: + #already present + return + else: + os.unlink(signedpath) + sigpath = "%s/%s" % (builddir, koji.pathinfo.sighdr(rinfo, sigkey)) + fo = file(sigpath, 'rb') + sighdr = fo.read() + fo.close() + koji.ensuredir(os.path.dirname(signedpath)) + koji.splice_rpm_sighdr(sighdr, rpm_path, signedpath) + + +def tag_history(build=None, tag=None, package=None, queryOpts=None): + """Returns historical tag data + + package: only for given package + build: only for given build + tag: only for given tag + """ + fields = ('build.id', 'package.name', 'build.version', 'build.release', + 'tag.id', 'tag.name', 'tag_listing.active', + 'tag_listing.create_event', 'tag_listing.revoke_event', + 'EXTRACT(EPOCH FROM ev1.time)', 'EXTRACT(EPOCH FROM ev2.time)',) + aliases = ('build_id', 'name', 'version', 'release', + 'tag_id', 'tag_name', 'active', + 'create_event', 'revoke_event', + 'create_ts', 'revoke_ts',) + st_complete = koji.BUILD_STATES['COMPLETE'] + tables = ['tag_listing'] + joins = ["tag ON tag.id = tag_listing.tag_id", + "build ON build.id = tag_listing.build_id", + "package ON package.id = build.pkg_id", + "events AS ev1 ON ev1.id = tag_listing.create_event", + "LEFT OUTER JOIN events AS ev2 ON ev2.id = tag_listing.revoke_event", ] + clauses = [] + if tag is not None: + tag_id = get_tag_id(tag, strict=True) + clauses.append("tag.id = %(tag_id)i") + if build is not None: + build_id = get_build(build, strict=True)['id'] + clauses.append("build.id = %(build_id)i") + if package is not None: + pkg_id = get_package_id(package, strict=True) + clauses.append("package.id = %(pkg_id)i") + query = QueryProcessor(columns=fields, aliases=aliases, tables=tables, + joins=joins, clauses=clauses, values=locals(), + opts=queryOpts) + return query.execute() + +def untagged_builds(name=None, queryOpts=None): + """Returns the list of untagged builds""" + fields = ('build.id', 'package.name', 'build.version', 'build.release') + aliases = ('id', 'name', 'version', 'release') + st_complete = koji.BUILD_STATES['COMPLETE'] + tables = ('build',) + joins = [] + if name is None: + joins.append("""package ON package.id = build.pkg_id""") + else: + joins.append("""package ON package.name=%(name)s AND package.id = build.pkg_id""") + joins.append("""LEFT OUTER JOIN tag_listing ON tag_listing.build_id = build.id + AND tag_listing.active = TRUE""") + clauses = ["tag_listing.tag_id IS NULL", "build.state = %(st_complete)i"] + #q = """SELECT build.id, package.name, build.version, build.release + #FROM build + # JOIN package on package.id = build.pkg_id + # LEFT OUTER JOIN tag_listing ON tag_listing.build_id = build.id + # AND tag_listing.active IS TRUE + #WHERE tag_listing.tag_id IS NULL AND build.state = %(st_complete)i""" + #return _multiRow(q, locals(), aliases) + query = QueryProcessor(columns=fields, aliases=aliases, tables=tables, + joins=joins, clauses=clauses, values=locals(), + opts=queryOpts) + return query.execute() + +def build_map(): + """Map which builds were used in the buildroots of other builds + + To be used for garbage collection + """ + # find rpms whose buildroots we were in + st_complete = koji.BUILD_STATES['COMPLETE'] + fields = ('used', 'built') + q = """SELECT DISTINCT used.id, built.id + FROM buildroot_listing + JOIN rpminfo AS r_used ON r_used.id = buildroot_listing.rpm_id + JOIN rpminfo AS r_built ON r_built.buildroot_id = buildroot_listing.buildroot_id + JOIN build AS used ON used.id = r_used.build_id + JOIN build AS built ON built.id = r_built.build_id + WHERE built.state = %(st_complete)i AND used.state =%(st_complete)i""" + return _multiRow(q, locals(), fields) + +def build_references(build_id): + """Returns references to a build + + This call is used to determine whether a build can be deleted + """ + #references (that matter): + # tag_listing + # buildroot_listing (via rpminfo) + # ?? rpmsigs (via rpminfo) + ret = {} + + # find tags + q = """SELECT tag_id, tag.name FROM tag_listing JOIN tag on tag_id = tag.id + WHERE build_id = %(build_id)i AND active = TRUE""" + ret['tags'] = _multiRow(q, locals(), ('id', 'name')) + + # find rpms whose buildroots we were in + st_complete = koji.BUILD_STATES['COMPLETE'] + fields = ('id', 'name', 'version', 'release', 'arch', 'build_id') + q = """SELECT that.id, that.name, that.version, that.release, that.arch, that.build_id + FROM rpminfo AS this + JOIN buildroot_listing ON this.build_id = %(build_id)i + AND buildroot_listing.rpm_id = this.id + JOIN rpminfo AS that ON buildroot_listing.buildroot_id = that.buildroot_id + JOIN build on that.build_id = build.id + WHERE this.build_id = %(build_id)i + AND build.state = %(st_complete)i""" + ret['rpms'] = _multiRow(q, locals(), fields) + + # find timestamp of most recent use in a buildroot + q = """SELECT buildroot.create_event + FROM buildroot_listing + JOIN rpminfo ON rpminfo.build_id = %(build_id)i + AND buildroot_listing.rpm_id = rpminfo.id + JOIN buildroot ON buildroot_listing.buildroot_id = buildroot.id + ORDER BY buildroot.create_event DESC + LIMIT 1""" + event_id = _singleValue(q, locals(), strict=False) + if event_id is None: + ret['last_used'] = None + else: + q = """SELECT EXTRACT(EPOCH FROM get_event_time(%(event_id)i))""" + ret['last_used'] = _fetchSingle(q, locals()) + return ret + +def delete_build(build, strict=True, min_ref_age=604800): + """delete a build, if possible + + Attempts to delete a build. A build can only be deleted if it is + unreferenced. + + If strict is true (default), an exception is raised if the build cannot + be deleted. + + Note that a deleted build is not completely gone. It is marked deleted and some + data remains in the database. Mainly, the rpms are removed. + + Note in particular that deleting a build DOES NOT free any NVRs (or NVRAs) for + reuse. + + Returns True if successful, False otherwise + """ + context.session.assertPerm('admin') + binfo = get_build(build, strict=True) + refs = build_references(binfo['id']) + if refs['tags']: + if strict: + raise koji.GenericError, "Cannot delete build, tagged: %s" % refs['tags'] + return False + if refs['rpms']: + if strict: + raise koji.GenericError, "Cannot delete build, used in buildroots: %s" % refs['rpms'] + return False + if refs['last_used']: + age = time.time() - refs['last_used'] + if age < min_ref_age: + if strict: + raise koji.GenericError, "Cannot delete build, used in recent buildroot" + return False + #otherwise we can delete it + _delete_build(binfo) + +def _delete_build(binfo): + """Delete a build (no reference checks) + + Please consider calling delete_build instead + """ + # build-related data: + # build KEEP (marked deleted) + # task ?? + # changelogs DELETE + # tag_listing REVOKE (versioned) (but should ideally be empty anyway) + # rpminfo KEEP + # buildroot_listing KEEP (but should ideally be empty anyway) + # rpmsigs DELETE + # rpmdeps DELETE + # rpmfiles DELETE + # files on disk: DELETE + build_id = binfo['id'] + q = """SELECT id FROM rpminfo WHERE build_id=%(build_id)i""" + rpm_ids = _fetchMulti(q, locals()) + for (rpm_id,) in rpm_ids: + delete = """DELETE FROM rpmdeps WHERE rpm_id=%(rpm_id)i""" + _dml(delete, locals()) + delete = """DELETE FROM rpmfiles WHERE rpm_id=%(rpm_id)i""" + _dml(delete, locals()) + delete = """DELETE FROM rpmsigs WHERE rpm_id=%(rpm_id)i""" + _dml(delete, locals()) + delete = """DELETE FROM changelogs WHERE build_id=%(build_id)i""" + _dml(delete, locals()) + event_id = _singleValue("SELECT get_event()") + update = """UPDATE tag_listing SET revoke_event=%(event_id)i, active=NULL + WHERE active = TRUE AND build_id=%(build_id)i""" + _dml(update, locals()) + st_deleted = koji.BUILD_STATES['DELETED'] + update = """UPDATE build SET state=%(st_deleted)i WHERE id=%(build_id)i""" + _dml(update, locals()) + #now clear the build dir + builddir = koji.pathinfo.build(binfo) + rv = os.system(r"find '%s' -xdev \! -type d -print0 |xargs -0 rm -f" % builddir) + if rv != 0: + raise koji.GenericError, 'file removal failed (code %r) for %s' % (rv, builddir) + #and clear out the emptied dirs + os.system(r"find '%s' -xdev -depth -type d -print0 |xargs -0 rmdir" % builddir) + +def reset_build(build): + """Reset a build so that it can be reimported + + WARNING: this function is potentially destructive. use with care. + nulls task_id + sets state to FAILED + clears data in rpminfo, rpmdeps, rpmfiles + removes rpminfo entries from any buildroot_listings [!] + remove files related to the build + + note, we don't actually delete the build data, so tags + remain intact + """ + # Only an admin may do this + context.session.assertPerm('admin') + binfo = get_build(build) + if not binfo: + #nothing to do + return + q = """SELECT id FROM rpminfo WHERE build_id=%(id)i""" + ids = _fetchMulti(q, binfo) + for (rpm_id,) in ids: + delete = """DELETE FROM rpmdeps WHERE rpm_id=%(rpm_id)i""" + _dml(delete, locals()) + delete = """DELETE FROM rpmfiles WHERE rpm_id=%(rpm_id)i""" + _dml(delete, locals()) + delete = """DELETE FROM rpmsigs WHERE rpm_id=%(rpm_id)i""" + _dml(delete, locals()) + delete = """DELETE FROM buildroot_listing WHERE rpm_id=%(rpm_id)i""" + _dml(delete, locals()) + delete = """DELETE FROM rpminfo WHERE build_id=%(id)i""" + _dml(delete, binfo) + delete = """DELETE FROM changelogs WHERE build_id=%(id)i""" + _dml(delete, binfo) + binfo['state'] = koji.BUILD_STATES['FAILED'] + update = """UPDATE build SET state=%(state)i, task_id=NULL WHERE id=%(id)i""" + _dml(update, binfo) + #now clear the build dir + builddir = koji.pathinfo.build(binfo) + rv = os.system("find '%s' -xdev \\! -type d -print0 |xargs -0 rm -f" % builddir) + if rv != 0: + raise koji.GenericError, 'file removal failed (code %r) for %s' % (rv, builddir) + +def cancel_build(build_id, cancel_task=True): + """Cancel a build + + Calling function should perform permission checks. + + If the build is associated with a task, cancel the task as well (unless + cancel_task is False). + Return True if the build was successfully canceled, False if not. + + The cancel_task option is used to prevent loops between task- and build- + cancellation. + """ + st_canceled = koji.BUILD_STATES['CANCELED'] + st_building = koji.BUILD_STATES['BUILDING'] + update = """UPDATE build + SET state = %(st_canceled)i, completion_time = NOW() + WHERE id = %(build_id)i AND state = %(st_building)i""" + _dml(update, locals()) + build = get_build(build_id) + if build['state'] != st_canceled: + return False + task_id = build['task_id'] + if task_id != None: + build_notification(task_id, build_id) + if cancel_task: + Task(task_id).cancelFull(strict=False) + return True + +def _get_build_target(task_id): + # XXX Should we be storing a reference to the build target + # in the build table for reproducibility? + task = Task(task_id) + request = task.getRequest() + # request is (path-to-srpm, build-target-name, map-of-other-options) + return get_build_targets(request[1])[0] + +def get_notification_recipients(package_id, tag_id, state): + query = """SELECT email FROM build_notifications + WHERE ((package_id = %(package_id)i OR package_id IS NULL) + AND (tag_id = %(tag_id)i OR tag_id IS NULL)) + """ + if state != koji.BUILD_STATES['COMPLETE']: + query += """AND success_only = FALSE + """ + + results = _fetchMulti(query, locals()) + return [result[0] for result in results] + +def tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success=False, failure_msg=''): + if is_successful: + state = koji.BUILD_STATES['COMPLETE'] + else: + state = koji.BUILD_STATES['FAILED'] + recipients = {} + build = get_build(build_id) + if tag_id: + tag = get_tag(tag_id) + for email in get_notification_recipients(build['package_id'], tag['id'], state): + recipients[email] = 1 + if from_id: + from_tag = get_tag(from_id) + for email in get_notification_recipients(build['package_id'], from_tag['id'], state): + recipients[email] = 1 + recipients_uniq = recipients.keys() + if len(recipients_uniq) > 0 and not (is_successful and ignore_success): + task_id = make_task('tagNotification', [recipients_uniq, is_successful, tag_id, from_id, build_id, user_id, ignore_success, failure_msg]) + if context.commit_pending: + # wtf is this for? + context.cnx.commit() + return task_id + return None + +def build_notification(task_id, build_id): + build = get_build(build_id) + target = _get_build_target(task_id) + + if build['state'] == koji.BUILD_STATES['BUILDING']: + raise koji.GenericError, 'never send notifications for incomplete builds' + + web_url = context.opts.get('KojiWebURL') + + recipients = get_notification_recipients(build['package_id'], target['dest_tag'], build['state']) + if len(recipients) > 0: + make_task('buildNotification', [recipients, build, target, web_url]) + +def get_build_notifications(user_id): + fields = ('id', 'user_id', 'package_id', 'tag_id', 'success_only', 'email') + query = """SELECT %s + FROM build_notifications + WHERE user_id = %%(user_id)i + """ % ', '.join(fields) + return _multiRow(query, locals(), fields) + +def new_group(name): + """Add a user group to the database""" + context.session.assertPerm('admin') + if get_user(name): + raise koji.GenericError, 'user/group already exists: %s' % name + group_id = _singleValue("SELECT nextval('users_id_seq')", strict=True) + usertype = koji.USERTYPES['GROUP'] + insert = """INSERT INTO users (id, name, password, usertype) + VALUES (%(group_id)i, %(name)s, NULL, %(usertype)i)""" + _dml(insert, locals()) + return group_id + +def add_group_member(group,user): + """Add user to group""" + context.session.assertPerm('admin') + group = get_user(group) + user = get_user(user) + if group['usertype'] != koji.USERTYPES['GROUP']: + raise koji.GenericError, "Not a group: %(name)s" % group + if user['usertype'] == koji.USERTYPES['GROUP']: + raise koji.GenericError, "Groups cannot be members of other groups" + #check to see if user is already a member + user_id = user['id'] + group_id = group['id'] + q = """SELECT user_id FROM user_groups + WHERE active = TRUE AND user_id = %(user_id)i + AND group_id = %(group_id)s + FOR UPDATE""" + row = _fetchSingle(q, locals(), strict=False) + if row: + raise koji.GenericError, "User already in group" + insert = """INSERT INTO user_groups (user_id,group_id) + VALUES(%(user_id)i,%(group_id)i)""" + _dml(insert,locals()) + + +def drop_group_member(group,user): + """Drop user from group""" + context.session.assertPerm('admin') + group = get_user(group) + user = get_user(user) + if group['usertype'] != koji.USERTYPES['GROUP']: + raise koji.GenericError, "Not a group: %(name)s" % group + user_id = user['id'] + group_id = group['id'] + insert = """UPDATE user_groups + SET active=NULL, revoke_event=get_event() + WHERE active = TRUE AND user_id = %(user_id)i + AND group_id = %(group_id)i""" + _dml(insert,locals()) + +def get_group_members(group): + """Get the members of a group""" + context.session.assertPerm('admin') + group = get_user(group) + if group['usertype'] != koji.USERTYPES['GROUP']: + raise koji.GenericError, "Not a group: %(name)s" % group + group_id = group['id'] + fields = ('id','name','usertype','krb_principal') + q = """SELECT %s FROM user_groups + JOIN users ON user_id = users.id + WHERE active = TRUE AND group_id = %%(group_id)i""" % ','.join(fields) + return _multiRow(q, locals(), fields) + +class QueryProcessor(object): + """ + Build a query from its components. + - columns, aliases, tables: lists of the column names to retrieve, + the tables to retrieve them from, and the key names to use when + returning values as a map, respectively + - joins: a list of joins in the form 'table1 ON table1.col1 = table2.col2', 'JOIN' will be + prepended automatically; if extended join syntax (LEFT, OUTER, etc.) is required, + it can be specified, and 'JOIN' will not be prepended + - clauses: a list of where clauses in the form 'table1.col1 OPER table2.col2-or-variable'; + each clause will be surrounded by parentheses and all will be AND'ed together + - values: the map that will be used to replace any substitution expressions in the query + - opts: a map of query options; currently supported options are: + countOnly: if True, return an integer indicating how many results would have been + returned, rather than the actual query results + order: a column or alias name to use in the 'ORDER BY' clause + offset: an integer to use in the 'OFFSET' clause + limit: an integer to use in the 'LIMIT' clause + asList: if True, return results as a list of lists, where each list contains the + column values in query order, rather than the usual list of maps + """ + def __init__(self, columns=None, aliases=None, tables=None, + joins=None, clauses=None, values=None, opts=None): + self.columns = columns + self.aliases = aliases + if columns and aliases: + if len(columns) != len(aliases): + raise StandardError, 'column and alias lists must be the same length' + self.colsByAlias = dict(zip(aliases, columns)) + else: + self.colsByAlias = {} + self.tables = tables + self.joins = joins + self.clauses = clauses + if values: + self.values = values + else: + self.value = {} + if opts: + self.opts = opts + else: + self.opts = {} + + def countOnly(self, count): + self.opts['countOnly'] = count + + def __str__(self): + query = \ +""" +SELECT %(col_str)s + FROM %(table_str)s +%(join_str)s +%(clause_str)s + %(order_str)s +%(offset_str)s + %(limit_str)s +""" + if self.opts.get('countOnly'): + if self.opts.get('offset') or self.opts.get('limit'): + # If we're counting with an offset and/or limit, we need + # to wrap the offset/limited query and then count the results, + # rather than trying to offset/limit the single row returned + # by count(*). Because we're wrapping the query, we don't care + # about the column values. + col_str = '1' + else: + col_str = 'count(*)' + else: + col_str = self._seqtostr(self.columns) + table_str = self._seqtostr(self.tables) + join_str = self._joinstr() + clause_str = self._seqtostr(self.clauses, sep=')\n AND (') + if clause_str: + clause_str = ' WHERE (' + clause_str + ')' + order_str = self._order() + offset_str = self._optstr('offset') + limit_str = self._optstr('limit') + + query = query % locals() + if self.opts.get('countOnly') and \ + (self.opts.get('offset') or self.opts.get('limit')): + query = 'SELECT count(*)\nFROM (' + query + ') numrows' + return query + + def __repr__(self): + return '' % \ + (self.columns, self.aliases, self.tables, self.joins, self.clauses, self.values, self.opts) + + def _seqtostr(self, seq, sep=', '): + if seq: + return sep.join(seq) + else: + return '' + + def _joinstr(self): + if not self.joins: + return '' + result = '' + for join in self.joins: + if result: + result += '\n' + if re.search(r'\bjoin\b', join, re.IGNORECASE): + # The join clause already contains the word 'join', + # so don't prepend 'JOIN' to it + result += ' ' + join + else: + result += ' JOIN ' + join + return result + + def _order(self): + # Don't bother sorting if we're just counting + if self.opts.get('countOnly'): + return '' + order = self.opts.get('order') + if order: + if order.startswith('-'): + order = order[1:] + direction = ' DESC' + else: + direction = '' + # Check if we're ordering by alias first + orderCol = self.colsByAlias.get(order) + if orderCol: + pass + elif order in self.columns: + orderCol = order + else: + raise StandardError, 'invalid order: ' + order + return 'ORDER BY ' + orderCol + direction + else: + return '' + + def _optstr(self, optname): + optval = self.opts.get(optname) + if optval: + return '%s %i' % (optname.upper(), optval) + else: + return '' + + def execute(self): + query = str(self) + if self.opts.get('countOnly'): + return _singleValue(query, self.values, strict=True) + elif self.opts.get('asList'): + return _fetchMulti(query, self.values) + else: + return _multiRow(query, self.values, (self.aliases or self.columns)) + + def executeOne(self): + results = self.execute() + if isinstance(results, list): + if len(results) > 0: + return results[0] + else: + return None + return results + + +# +# XMLRPC Methods +# +class RootExports(object): + '''Contains functions that are made available via XMLRPC''' + + def buildFromCVS(self, url, tag): + raise koji.Deprecated + #return make_task('buildFromCVS',[url, tag]) + + def build(self, src, target, opts=None, priority=None, channel=None): + """Create a build task + + priority: the amount to increase (or decrease) the task priority, relative + to the default priority; higher values mean lower priority; only + admins have the right to specify a negative priority here + channel: the channel to allocate the task to + Returns the task id + """ + taskOpts = {} + if priority: + if priority < 0: + if not context.session.hasPerm('admin'): + raise koji.NotAllowed, 'only admins may create high-priority tasks' + taskOpts['priority'] = koji.PRIO_DEFAULT + priority + if channel: + taskOpts['channel'] = channel + return make_task('build',[src, target, opts],**taskOpts) + + def chainBuild(self, srcs, target, opts=None, priority=None, channel=None): + """Create a chained build task for building sets of packages in order + + srcs: list of pkg lists, ie [[src00, src01, src03],[src20],[src30,src31],...] + where each of the top-level lists gets built and a new repo is created + before the next list is built. + target: build target + priority: the amount to increase (or decrease) the task priority, relative + to the default priority; higher values mean lower priority; only + admins have the right to specify a negative priority here + channel: the channel to allocate the task to + Returns a list of all the dependent task ids + """ + taskOpts = {} + if priority: + if priority < 0: + if not context.session.hasPerm('admin'): + raise koji.NotAllowed, 'only admins may create high-priority tasks' + taskOpts['priority'] = koji.PRIO_DEFAULT + priority + if channel: + taskOpts['channel'] = channel + + return make_task('chainbuild',[srcs,target,opts],**taskOpts) + + def runroot(self, target, arch, command, **opts): + """Create a runroot task + + Returns the task id + """ + context.session.assertPerm('runroot') + taskopts = {'priority':15, + 'arch': arch, + 'channel': 'runroot'} + return mktask(taskopts,'runroot', target, arch, command, **opts) + + def hello(self,*args): + return "Hello World" + + def fault(self): + "debugging. raise an error" + raise Exception, "test exception" + + def error(self): + "debugging. raise an error" + raise koji.GenericError, "test error" + + def echo(self,*args): + return args + + def getAPIVersion(self): + return koji.API_VERSION + + def showSession(self): + return "%s" % context.session + + def showOpts(self): + context.session.assertPerm('admin') + return "%r" % context.opts + + def getLastEvent(self): + fields = ('id', 'ts') + q = """SELECT id, EXTRACT(EPOCH FROM time) FROM events + ORDER BY id DESC LIMIT 1""" + return _singleRow(q, {}, fields, strict=True) + + def makeTask(self,*args,**opts): + #this is mainly for debugging + #only an admin can make arbitrary tasks + context.session.assertPerm('admin') + return make_task(*args,**opts) + + def uploadFile(self, path, name, size, md5sum, offset, data): + #path: the relative path to upload to + #name: the name of the file + #size: size of contents (bytes) + #md5: md5sum (hex digest) of contents + #data: base64 encoded file contents + #offset: the offset of the chunk + # files can be uploaded in chunks, if so the md5 and size describe + # the chunk rather than the whole file. the offset indicates where + # the chunk belongs + # the special offset -1 is used to indicate the final chunk + contents = base64.decodestring(data) + del data + if offset != -1: + if size is not None: + if size != len(contents): return False + if md5sum is not None: + if md5sum != md5.new(contents).hexdigest(): + return False + uploadpath = koji.pathinfo.work() + #XXX - have an incoming dir and move after upload complete + # SECURITY - ensure path remains under uploadpath + path = os.path.normpath(path) + if path.startswith('..'): + raise koji.GenericError, "Upload path not allowed: %s" % path + udir = "%s/%s" % (uploadpath,path) + koji.ensuredir(udir) + fn = "%s/%s" % (udir,name) + try: + st = os.lstat(fn) + except OSError, e: + if e.errno == errno.ENOENT: + pass + else: + raise + else: + if not stat.S_ISREG(st.st_mode): + raise koji.GenericError, "destination not a file: %s" % fn + # we expect some files to be uploaded more than once to support + # realtime log-file viewing + # elif offset == 0: + # #first chunk, so file should not exist yet + # raise koji.GenericError, "file already exists: %s" % fn + fd = os.open(fn, os.O_RDWR | os.O_CREAT) + # log_error("fd=%r" %fd) + try: + if offset == 0 or (offset == -1 and size == len(contents)): + #truncate file + fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB) + try: + os.ftruncate(fd, 0) + # log_error("truncating fd %r to 0" %fd) + finally: + fcntl.lockf(fd, fcntl.LOCK_UN) + if offset == -1: + os.lseek(fd,0,2) + else: + os.lseek(fd,offset,0) + #write contents + fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB, len(contents), 0, 2) + try: + os.write(fd, contents) + # log_error("wrote contents") + finally: + fcntl.lockf(fd, fcntl.LOCK_UN, len(contents), 0, 2) + if offset == -1: + if size is not None: + #truncate file + fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB) + try: + os.ftruncate(fd, size) + # log_error("truncating fd %r to size %r" % (fd,size)) + finally: + fcntl.lockf(fd, fcntl.LOCK_UN) + if md5sum is not None: + #check final md5sum + sum = md5.new() + fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB) + try: + # log_error("checking md5sum") + os.lseek(fd,0,0) + while True: + block = os.read(fd, 819200) + if not block: break + sum.update(block) + if md5sum != sum.hexdigest(): + # log_error("md5sum did not match") + #os.close(fd) + return False + finally: + fcntl.lockf(fd, fcntl.LOCK_UN) + finally: + os.close(fd) + return True + + def downloadTaskOutput(self, taskID, fileName, offset=0, size=-1): + """Download the file with the given name, generated by the task with the + given ID.""" + if '..' in fileName or '/' in fileName: + raise koji.GenericError, 'Invalid file name: %s' % fileName + filePath = '%s/tasks/%i/%s' % (koji.pathinfo.work(), taskID, fileName) + filePath = os.path.normpath(filePath) + if not os.path.isfile(filePath): + raise koji.GenericError, 'no file "%s" output by task %i' % (fileName, taskID) + # Let the caller handler any IO or permission errors + f = file(filePath, 'r') + if isinstance(offset, int): + if offset > 0: + f.seek(offset, 0) + elif offset < 0: + f.seek(offset, 2) + contents = f.read(size) + f.close() + return base64.encodestring(contents) + + def listTaskOutput(self, taskID): + """List the files generated by the task with the given ID. This + will usually include one or more RPMs, and one or more log files. + If the task did not generate any files, or the output directory + for the task no longer exists, return an empty list.""" + taskDir = '%s/tasks/%i' % (koji.pathinfo.work(), taskID) + if os.path.isdir(taskDir): + return os.listdir(taskDir) + else: + return [] + + createTag = staticmethod(create_tag) + editTag = staticmethod(old_edit_tag) + editTag2 = staticmethod(edit_tag) + deleteTag = staticmethod(delete_tag) + + importBuildInPlace = staticmethod(import_build_in_place) + resetBuild = staticmethod(reset_build) + + untaggedBuilds = staticmethod(untagged_builds) + tagHistory = staticmethod(tag_history) + + buildMap = staticmethod(build_map) + deleteBuild = staticmethod(delete_build) + def buildReferences(self, build): + return build_references(get_build(build, strict=True)['id']) + + def importRPM(self, path, basename): + """Import an RPM into the database. + + The file must be uploaded first. + """ + uploadpath = koji.pathinfo.work() + fn = "%s/%s/%s" %(uploadpath,path,basename) + if not os.path.exists(fn): + raise koji.GenericError, "No such file: %s" % fn + rpminfo = import_rpm(fn) + import_rpm_file(fn,rpminfo['build'],rpminfo) + add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(fn)) + + + def tagBuildBypass(self,tag,build,force=False): + """Tag a build without running post checks or notifications + + This is a short circuit function for imports. + Admin permission required. + + Tagging with a locked tag is not allowed unless force is true. + Retagging is not allowed unless force is true. (retagging changes the order + of entries will affect which build is the latest) + """ + context.session.assertPerm('admin') + _tag_build(tag, build, force=force) + + def tagBuild(self,tag,build,force=False,fromtag=None): + """Request that a build be tagged + + The force option will attempt to force the action in the event of: + - tag locked + - missing permission + - package not in list for tag + The force option is really only effect for admins + + If fromtag is specified, this becomes a move operation. + + This call creates a task to do some of the heavy lifting + The return value is the task id + """ + #first some lookups and basic sanity checks + build = get_build(build, strict=True) + tag = get_tag(tag, strict=True) + if fromtag: + fromtag_id = get_tag_id(fromtag, strict=True) + else: + fromtag_id = None + pkg_id = build['package_id'] + tag_id = tag['id'] + build_id = build['id'] + # note: we're just running the quick checks now so we can fail + # early if appropriate, rather then waiting for the task + # Make sure package is on the list for this tag + pkgs = readPackageList(tagID=tag_id, pkgID=pkg_id, inherit=True) + pkg_error = None + if not pkgs.has_key(pkg_id): + pkg_error = "Package %s not in list for %s" % (build['name'], tag['name']) + elif pkgs[pkg_id]['blocked']: + pkg_error = "Package %s blocked in %s" % (build['name'], tag['name']) + if pkg_error: + if force and context.session.hasPerm('admin'): + pkglist_add(tag_id,pkg_id,force=True,block=False) + else: + raise koji.TagError, pkg_error + #access check + assert_tag_access(tag_id,user_id=None,force=force) + if fromtag: + assert_tag_access(fromtag_id,user_id=None,force=force) + #spawn the tagging tasks (it performs more thorough checks) + return make_task('tagBuild', [tag_id, build_id, force, fromtag_id], priority=10) + + def untagBuild(self,tag,build,strict=True,force=False): + """Untag a build + + Unlike tagBuild, this does not create a task + No return value""" + #we can't staticmethod this one -- we're limiting the options + user_id = context.session.user_id + try: + _untag_build(tag,build,strict=strict,force=force) + tag_notification(True, None, tag, build, user_id) + except Exception, e: + exctype, value = sys.exc_info()[:2] + tag_notification(False, None, tag, build, user_id, False, "%s: %s" % (exctype, value)) + raise e + + def untagBuildBypass(self, tag, build, strict=True, force=False): + """Untag a build without any checks or notifications + + Admins only. Intended for syncs/imports. + + Unlike tagBuild, this does not create a task + No return value""" + context.session.assertPerm('admin') + _untag_build(tag, build, strict=strict, force=force) + + def moveBuild(self,tag1,tag2,build,force=False): + """Move a build from tag1 to tag2 + + Returns the task id of the task performing the move""" + return self.tagBuild(tag2,build,force=force,fromtag=tag1) + + def moveAllBuilds(self, tag1, tag2, package, force=False): + """Move all builds of a package from tag1 to tag2 in the correct order + + Returns the task id of the task performing the move""" + + #lookups and basic sanity checks + pkg_id = get_package_id(package, strict=True) + tag1_id = get_tag_id(tag1, strict=True) + tag2_id = get_tag_id(tag2, strict=True) + + # note: we're just running the quick checks now so we can fail + # early if appropriate, rather then waiting for the task + # Make sure package is on the list for the tag we're adding it to + pkgs = readPackageList(tagID=tag2_id, pkgID=pkg_id, inherit=True) + pkg_error = None + if not pkgs.has_key(pkg_id): + pkg_error = "Package %s not in list for tag %s" % (package, tag2) + elif pkgs[pkg_id]['blocked']: + pkg_error = "Package %s blocked in tag %s" % (package, tag2) + if pkg_error: + if force and context.session.hasPerm('admin'): + pkglist_add(tag2_id,pkg_id,force=True,block=False) + else: + raise koji.TagError, pkg_error + + #access check + assert_tag_access(tag1_id,user_id=None,force=force) + assert_tag_access(tag2_id,user_id=None,force=force) + + build_list = readTaggedBuilds(tag1_id, package=package) + # we want 'ORDER BY tag_listing.create_event ASC' not DESC so reverse + build_list.reverse() + + wait_on = [] + tasklist = [] + for build in build_list: + task_id = make_task('dependantTask', [wait_on, [['tagBuild', [tag2_id, build['id'], force, tag1_id], {'priority':15}]]]) + wait_on = [task_id] + log_error("\nMade Task: %s\n" % task_id) + tasklist.append(task_id) + return tasklist + + def fixTags(self): + """A fix for incomplete tag import, adds tag_config entries + + Note the query will only add the tag_config entries if there are + no other tag_config entries, so it will not 'undelete' any tags""" + c = context.cnx.cursor() + q = """ + INSERT INTO tag_config(tag_id,arches,perm_id,locked) + SELECT id,'i386 ia64 ppc ppc64 s390 s390x x86_64',NULL,False + FROM tag LEFT OUTER JOIN tag_config ON tag.id = tag_config.tag_id + WHERE revoke_event IS NULL AND active IS NULL; + """ + context.commit_pending = True + c.execute(q) + + def listTags(self, build=None, package=None, queryOpts=None): + """List tags. If build is specified, only return tags associated with the + given build. If package is specified, only return tags associated with the + specified package. If neither is specified, return all tags. Build can be + either an integer ID or a string N-V-R. Package can be either an integer ID + or a string name. Only one of build and package may be specified. Returns + a list of maps. Each map contains keys: + - id + - name + - perm_id + - perm + - arches + - locked + + If package is specified, each map will also contain: + - owner_id + - owner_name + - blocked + - extra_arches + """ + if build is not None and package is not None: + raise koji.GenericError, 'only one of build and package may be specified' + + tables = ['tag_config'] + joins = ['tag ON tag.id = tag_config.tag_id', + 'LEFT OUTER JOIN permissions ON tag_config.perm_id = permissions.id'] + fields = ['tag.id', 'tag.name', 'tag_config.perm_id', 'permissions.name', + 'tag_config.arches', 'tag_config.locked'] + aliases = ['id', 'name', 'perm_id', 'perm', + 'arches', 'locked'] + clauses = ['tag_config.active = true'] + + if build is not None: + # lookup build id + buildinfo = get_build(build) + if not buildinfo: + raise koji.GenericError, 'invalid build: %s' % build + joins.append('tag_listing ON tag.id = tag_listing.tag_id') + clauses.append('tag_listing.active = true') + clauses.append('tag_listing.build_id = %(buildID)i') + buildID = buildinfo['id'] + elif package is not None: + packageinfo = self.getPackage(package) + if not packageinfo: + raise koji.GenericError, 'invalid package: %s' % package + fields.extend(['users.id', 'users.name', 'tag_packages.blocked', 'tag_packages.extra_arches']) + aliases.extend(['owner_id', 'owner_name', 'blocked', 'extra_arches']) + joins.append('tag_packages ON tag.id = tag_packages.tag_id') + clauses.append('tag_packages.active = true') + clauses.append('tag_packages.package_id = %(packageID)i') + joins.append('users ON tag_packages.owner = users.id') + packageID = packageinfo['id'] + + query = QueryProcessor(columns=fields, aliases=aliases, tables=tables, + joins=joins, clauses=clauses, values=locals(), + opts=queryOpts) + return query.execute() + + getBuild = staticmethod(get_build) + getChangelogEntries = staticmethod(get_changelog_entries) + + def cancelBuild(self, buildID): + """Cancel the build with the given buildID + + If the build is associated with a task, cancel the task as well. + Return True if the build was successfully canceled, False if not.""" + build = get_build(buildID) + if build == None: + return False + if build['owner_id'] != context.session.user_id: + if not context.session.hasPerm('admin'): + raise koji.NotAllowed, 'Cannot cancel build, not owner' + return cancel_build(build['id']) + + def assignTask(self,task_id,host,force=False): + """Assign a task to a host + + Specify force=True to assign a non-free task + """ + context.session.assertPerm('admin') + task = Task(task_id) + host = get_host(host,strict=True) + task.assign(host['id'],force) + + def freeTask(self,task_id): + """Free a task""" + context.session.assertPerm('admin') + task = Task(task_id) + task.free() + + def cancelTask(self,task_id,recurse=True): + """Cancel a task""" + task = Task(task_id) + if not task.verifyOwner() and not task.verifyHost(): + if not context.session.hasPerm('admin'): + raise koji.NotAllowed, 'Cannot cancel task, not owner' + #non-admins can also use cancelBuild + task.cancel(recurse=recurse) + + def cancelTaskFull(self,task_id,strict=True): + """Cancel a task and all tasks in its group""" + context.session.assertPerm('admin') + #non-admins can use cancelBuild or cancelTask + Task(task_id).cancelFull(strict=strict) + + def cancelTaskChildren(self,task_id): + """Cancel a task's children, but not the task itself""" + task = Task(task_id) + if not task.verifyOwner() and not task.verifyHost(): + if not context.session.hasPerm('admin'): + raise koji.NotAllowed, 'Cannot cancel task, not owner' + task.cancelChildren() + + def listTagged(self,tag,event=None,inherit=False,prefix=None,latest=False,package=None): + """List builds tagged with tag""" + if not isinstance(tag,int): + #lookup tag id + tag = get_tag_id(tag,strict=True) + results = readTaggedBuilds(tag,event,inherit=inherit,latest=latest,package=package) + if prefix: + results = [build for build in results if build['package_name'].lower().startswith(prefix)] + return results + + def listTaggedRPMS(self,tag,event=None,inherit=False,latest=False,package=None,arch=None,rpmsigs=False): + """List rpms and builds within tag""" + if not isinstance(tag,int): + #lookup tag id + tag = get_tag_id(tag,strict=True) + return readTaggedRPMS(tag,event=event,inherit=inherit,latest=latest,package=package,arch=arch,rpmsigs=rpmsigs) + + def listBuilds(self, packageID=None, userID=None, taskID=None, prefix=None, state=None, + completeBefore=None, completeAfter=None, queryOpts=None): + """List package builds. + If packageID is specified, restrict the results to builds of the specified package. + If userID is specified, restrict the results to builds owned by the given user. + If taskID is specfied, restrict the results to builds with the given task ID. If taskID is -1, + restrict the results to builds with a non-null taskID. + If prefix is specified, restrict the results to builds whose package name starts with that + prefix. + If completeBefore and/or completeAfter are specified, restrict the results to builds whose + completion_time is before and/or after the given time. The time may be specified as a floating + point value indicating seconds since the Epoch (as returned by time.time()) or as a string in + ISO format ('YYYY-MM-DD HH24:MI:SS'). + One or more of packageID, userID, and taskID may be specified. + + Returns a list of maps. Each map contains the following keys: + + - build_id + - version + - release + - epoch + - state + - package_id + - package_name + - nvr (synthesized for sorting purposes) + - owner_id + - owner_name + - creation_event_id + - creation_time + - completion_time + - task_id + + If no builds match, an empty list is returned. + """ + fields = (('build.id', 'build_id'), ('build.version', 'version'), ('build.release', 'release'), + ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'), + ('events.id', 'creation_event_id'), ('events.time', 'creation_time'), ('build.task_id', 'task_id'), + ('package.id', 'package_id'), ('package.name', 'package_name'), + ("package.name || '-' || build.version || '-' || build.release", 'nvr'), + ('users.id', 'owner_id'), ('users.name', 'owner_name')) + + tables = ['build'] + joins = ['events ON build.create_event = events.id', + 'package ON build.pkg_id = package.id', + 'users ON build.owner = users.id'] + clauses = [] + if packageID != None: + clauses.append('package.id = %(packageID)i') + if userID != None: + clauses.append('users.id = %(userID)i') + if taskID != None: + if taskID == -1: + clauses.append('build.task_id IS NOT NULL') + else: + clauses.append('build.task_id = %(taskID)i') + if prefix: + clauses.append("package.name ~* ('^' || %(prefix)s || '.*')") + if state != None: + clauses.append('build.state = %(state)i') + if completeBefore: + if not isinstance(completeBefore, str): + completeBefore = datetime.datetime.fromtimestamp(completeBefore).isoformat(' ') + clauses.append('build.completion_time < %(completeBefore)s') + if completeAfter: + if not isinstance(completeAfter, str): + completeAfter = datetime.datetime.fromtimestamp(completeAfter).isoformat(' ') + clauses.append('build.completion_time > %(completeAfter)s') + + query = QueryProcessor(columns=[pair[0] for pair in fields], + aliases=[pair[1] for pair in fields], + tables=tables, joins=joins, clauses=clauses, + values=locals(), opts=queryOpts) + + return query.execute() + + def getLatestBuilds(self,tag,event=None,package=None): + """List latest builds for tag (inheritance enabled)""" + if not isinstance(tag,int): + #lookup tag id + tag = get_tag_id(tag,strict=True) + return readTaggedBuilds(tag,event,inherit=True,latest=True,package=package) + + def getLatestRPMS(self, tag, package=None, arch=None, event=None, rpmsigs=False): + """List latest RPMS for tag (inheritance enabled)""" + if not isinstance(tag,int): + #lookup tag id + tag = get_tag_id(tag,strict=True) + return readTaggedRPMS(tag, package=package, arch=arch, event=event,inherit=True,latest=True, rpmsigs=rpmsigs) + + def getAverageBuildDuration(self, packageID): + """Get the average duration of a build of a package with + the given ID. Returns a floating-point value indicating the + average number of seconds the package took to build. If the package + has never been built, return None.""" + st_complete = koji.BUILD_STATES['COMPLETE'] + query = """SELECT EXTRACT(epoch FROM avg(build.completion_time - events.time)) + FROM build + JOIN events ON build.create_event = events.id + WHERE build.pkg_id = %(packageID)i + AND build.state = %(st_complete)i + AND build.task_id IS NOT NULL""" + + return _singleValue(query, locals()) + + packageListAdd = staticmethod(pkglist_add) + packageListRemove = staticmethod(pkglist_remove) + packageListBlock = staticmethod(pkglist_block) + packageListUnblock = staticmethod(pkglist_unblock) + packageListSetOwner = staticmethod(pkglist_setowner) + packageListSetArches = staticmethod(pkglist_setarches) + + groupListAdd = staticmethod(grplist_add) + groupListRemove = staticmethod(grplist_remove) + groupListBlock = staticmethod(grplist_block) + groupListUnblock = staticmethod(grplist_unblock) + + groupPackageListAdd = staticmethod(grp_pkg_add) + groupPackageListRemove = staticmethod(grp_pkg_remove) + groupPackageListBlock = staticmethod(grp_pkg_block) + groupPackageListUnblock = staticmethod(grp_pkg_unblock) + + groupReqListAdd = staticmethod(grp_req_add) + groupReqListRemove = staticmethod(grp_req_remove) + groupReqListBlock = staticmethod(grp_req_block) + groupReqListUnblock = staticmethod(grp_req_unblock) + + getTagGroups = staticmethod(readTagGroups) + + checkTagAccess = staticmethod(check_tag_access) + + getGlobalInheritance = staticmethod(readGlobalInheritance) + + def getInheritanceData(self,tag): + """Return inheritance data for tag""" + if not isinstance(tag,int): + #lookup tag id + tag = get_tag_id(tag,strict=True) + return readInheritanceData(tag) + + def setInheritanceData(self,tag,data,clear=False): + if not isinstance(tag,int): + #lookup tag id + tag = get_tag_id(tag,strict=True) + context.session.assertPerm('admin') + return writeInheritanceData(tag,data,clear=clear) + + def getFullInheritance(self,tag,event=None,reverse=False,stops={},jumps={}): + if not isinstance(tag,int): + #lookup tag id + tag = get_tag_id(tag,strict=True) + for mapping in [stops, jumps]: + for key in mapping.keys(): + mapping[int(key)] = mapping[key] + return readFullInheritance(tag,event,reverse,stops,jumps) + + def listRPMs(self, buildID=None, buildrootID=None, componentBuildrootID=None, hostID=None, arches=None, queryOpts=None): + """List RPMS. If buildID and/or buildrootID are specified, + restrict the list of RPMs to only those RPMs that are part of that + build, or were built in that buildroot. If componentBuildrootID is specified, + restrict the list to only those RPMs that will get pulled into that buildroot + when it is used to build another package. A list of maps is returned, each map + containing the following keys: + + - id + - name + - version + - release + - nvr (synthesized for sorting purposes) + - arch + - epoch + - payloadhash + - size + - buildtime + - build_id + - buildroot_id + + If componentBuildrootID is specified, two additional keys will be included: + - component_buildroot_id + - is_update + + If no build has the given ID, or the build generated no RPMs, + an empty list is returned.""" + fields = [('rpminfo.id', 'id'), ('rpminfo.name', 'name'), ('rpminfo.version', 'version'), + ('rpminfo.release', 'release'), + ("rpminfo.name || '-' || rpminfo.version || '-' || rpminfo.release", 'nvr'), + ('rpminfo.arch', 'arch'), + ('rpminfo.epoch', 'epoch'), ('rpminfo.payloadhash', 'payloadhash'), + ('rpminfo.size', 'size'), ('rpminfo.buildtime', 'buildtime'), + ('rpminfo.build_id', 'build_id'), ('rpminfo.buildroot_id', 'buildroot_id')] + joins = [] + clauses = [] + + if buildID != None: + clauses.append('rpminfo.build_id = %(buildID)i') + if buildrootID != None: + clauses.append('rpminfo.buildroot_id = %(buildrootID)i') + if componentBuildrootID != None: + fields.append(('buildroot_listing.buildroot_id as component_buildroot_id', + 'component_buildroot_id')) + fields.append(('buildroot_listing.is_update', 'is_update')) + joins.append('buildroot_listing ON rpminfo.id = buildroot_listing.rpm_id') + clauses.append('buildroot_listing.buildroot_id = %(componentBuildrootID)i') + if hostID != None: + joins.append('buildroot ON rpminfo.buildroot_id = buildroot.id') + clauses.append('buildroot.host_id = %(hostID)i') + if arches != None: + if isinstance(arches, list) or isinstance(arches, tuple): + clauses.append('rpminfo.arch IN %(arches)s') + elif isinstance(arches, str): + clauses.append('rpminfo.arch = %(arches)s') + else: + raise koji.GenericError, 'invalid type for "arches" parameter: %s' % type(arches) + + query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields], + tables=['rpminfo'], joins=joins, clauses=clauses, + values=locals(), opts=queryOpts) + return query.execute() + + def listBuildRPMs(self,build): + """Get information about all the RPMs generated by the build with the given + ID. A list of maps is returned, each map containing the following keys: + + - id + - name + - version + - release + - arch + - epoch + - payloadhash + - size + - buildtime + - build_id + - buildroot_id + + If no build has the given ID, or the build generated no RPMs, an empty list is returned.""" + if not isinstance(build, int): + #lookup build id + build = self.findBuildID(build) + return self.listRPMs(buildID=build) + + def getRPM(self, rpmID): + """Get information about the RPM with the given ID.""" + return get_rpm(rpmID) + + def getRPMDeps(self, rpmID, depType=None, queryOpts=None): + """Return dependency information about the RPM with the given ID. + If depType is specified, restrict results to dependencies of the given type. + Otherwise, return all dependency information. A list of maps will be returned, + each with the following keys: + - name + - version + - flags + - type + + If there is no RPM with the given ID, or the RPM has no dependency information, + return None. + """ + fields = (('dep_name', 'name'), ('dep_version', 'version'), + ('dep_flags', 'flags'), ('dep_type', 'type')) + clauses = ['rpm_id = %(rpmID)i'] + if depType != None: + clauses.append('dep_type = %(depType)i') + + query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields], + tables=['rpmdeps'], clauses=clauses, + values=locals(), opts=queryOpts) + return query.execute() + + def listRPMFiles(self, rpmID, queryOpts=None): + """List files associated with the RPM with the given ID. A list of maps + will be returned, each with the following keys: + - name + - md5 + - size + - flags + + If there is no RPM with the given ID, or that RPM contains no files, + and empty list will be returned.""" + fields = (('filename', 'name'), ('filemd5', 'md5'), + ('filesize', 'size'), ('fileflags', 'flags')) + + query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields], + tables=['rpmfiles'], clauses=['rpm_id = %(rpmID)i'], + values=locals(), opts=queryOpts) + return query.execute() + + def getRPMFile(self, rpmID, filename): + """Get info about the file in the given RPM with the given filename.""" + fields = (('rpm_id', 'rpm_id'), ('filename', 'name'), ('filemd5', 'md5'), + ('filesize', 'size'), ('fileflags', 'flags')) + query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields], + tables=['rpmfiles'], + clauses=['rpm_id = %(rpmID)i and filename = %(filename)s'], + values=locals()) + return query.executeOne() + + queryRPMSigs = staticmethod(query_rpm_sigs) + writeSignedRPM = staticmethod(write_signed_rpm) + + def addRPMSig(self, an_rpm, data): + """Store a signature header for an rpm + + data: the signature header encoded as base64 + """ + context.session.assertPerm('sign') + return add_rpm_sig(an_rpm, base64.decodestring(data)) + + findBuildID = staticmethod(find_build_id) + getTagID = staticmethod(get_tag_id) + getTag = staticmethod(get_tag) + + def getPackageID(self,name): + c=context.cnx.cursor() + q="""SELECT id FROM package WHERE name=%(name)s""" + c.execute(q,locals()) + r=c.fetchone() + if not r: + return None + return r[0] + + getPackage = staticmethod(lookup_package) + + def listPackages(self, tagID=None, userID=None, pkgID=None, prefix=None, inherited=False, with_dups=False): + """List if tagID and/or userID is specified, limit the + list to packages belonging to the given user or with the + given tag. + + A list of maps is returned. Each map contains the + following keys: + + - package_id + - package_name + + If tagID, userID, or pkgID are specified, the maps will also contain the + following keys. + + - tag_id + - tag_name + - owner_id + - owner_name + - extra_arches + - blocked + """ + if tagID is None and userID is None and pkgID is None: + query = """SELECT id, name from package""" + results = _multiRow(query,{},('package_id', 'package_name')) + else: + if tagID is not None: + tagID = get_tag_id(tagID,strict=True) + if userID is not None: + userID = get_user(userID,strict=True)['id'] + if pkgID is not None: + pkgID = get_package_id(pkgID,strict=True) + results = readPackageList(tagID=tagID, userID=userID, pkgID=pkgID, + inherit=inherited, with_dups=with_dups).values() + + if prefix: + results = [package for package in results if package['package_name'].lower().startswith(prefix)] + + return results + + def checkTagPackage(self,tag,pkg): + """Check that pkg is in the list for tag. Returns true/false""" + tag_id = get_tag_id(tag,strict=False) + pkg_id = get_package_id(pkg,strict=False) + if pkg_id is None or tag_id is None: + return False + pkgs = readPackageList(tagID=tag_id, pkgID=pkg_id, inherit=True) + if not pkgs.has_key(pkg_id): + return False + else: + #still might be blocked + return not pkgs[pkg_id]['blocked'] + + def getPackageConfig(self,tag,pkg): + """Get config for package in tag""" + tag_id = get_tag_id(tag,strict=False) + pkg_id = get_package_id(pkg,strict=False) + if pkg_id is None or tag_id is None: + return None + pkgs = readPackageList(tagID=tag_id, pkgID=pkg_id, inherit=True) + return pkgs.get(pkg_id,None) + + getUser = staticmethod(get_user) + + def grantPermission(self, userinfo, permission): + """Grant a permission to a user""" + context.session.assertPerm('admin') + user_id = get_user(userinfo,strict=True)['id'] + perm_id = get_perm_id(permission,strict=True) + insert = """INSERT INTO user_perms (user_id, perm_id) + VALUES (%(user_id)i, %(perm_id)i)""" + _dml(insert, locals()) + + def addUser(self, username, password=None, status=None, krb_principal=None): + """Add a user to the database""" + + context.session.assertPerm('admin') + if get_user(username): + raise koji.GenericError, 'user already exists: %s' % username + c = context.cnx.cursor() + userID = koji.get_sequence_value(c, 'users_id_seq') + userType = koji.USERTYPES['NORMAL'] + insert = """INSERT INTO users (id, name, password, usertype, krb_principal) + VALUES (%(userID)i, %(username)s, %(password)s, %(userType)i, %(krb_principal)s)""" + context.commit_pending = True + c.execute(insert, locals()) + return userID + + #group management calls + newGroup = staticmethod(new_group) + addGroupMember = staticmethod(add_group_member) + dropGroupMember = staticmethod(drop_group_member) + getGroupMembers = staticmethod(get_group_members) + + def listUsers(self, userType=koji.USERTYPES['NORMAL'], queryOpts=None): + """List all users in the system. + type can be either koji.USERTYPES['NORMAL'] + or koji.USERTYPES['HOST']. Returns a list of maps with the + following keys: + + - id + - name + - status + - usertype + - krb_principal + + If no users of the specified + type exist, return an empty list.""" + fields = ('id', 'name', 'status', 'usertype', 'krb_principal') + clauses = ('usertype = %(userType)i',) + query = QueryProcessor(columns=fields, tables=('users',), clauses=clauses, + values=locals(), opts=queryOpts) + return query.execute() + + def getBuildConfig(self,tag): + """Return build configuration associated with a tag""" + taginfo = get_tag(tag,strict=True) + arches = taginfo['arches'] + if arches is None: + #follow inheritance for arches + order = readFullInheritance(taginfo['id']) + for link in order: + if link['noconfig']: + continue + arches = get_tag(link['parent_id'])['arches'] + if arches is not None: + taginfo['arches'] = arches + break + return taginfo + + def getRepo(self,tag,state=None): + if isinstance(tag,int): + id = tag + else: + id = get_tag_id(tag,strict=True) + + fields = ['repo.id', 'repo.state', 'events.id', 'events.time'] + aliases = ['id', 'state', 'create_event', 'creation_time'] + joins = ['events ON repo.create_event = events.id'] + clauses = ['repo.tag_id = %(id)i'] + if state is None: + state = koji.REPO_READY + clauses.append('repo.state = %(state)s' ) + + query = QueryProcessor(columns=fields, aliases=aliases, + tables=['repo'], joins=joins, clauses=clauses, + values=locals(), + opts={'order': '-creation_time', 'limit': 1}) + return query.executeOne() + + repoInfo = staticmethod(repo_info) + getActiveRepos = staticmethod(get_active_repos) + + def newRepo(self, tag): + """Create a newRepo task. returns task id""" + context.session.assertPerm('repo') + return make_task('newRepo', [tag], priority=15, channel='createrepo') + + def repoExpire(self, repo_id): + """mark repo expired""" + context.session.assertPerm('repo') + repo_expire(repo_id) + + def repoDelete(self, repo_id): + """Attempt to mark repo deleted, return number of references + + If the number of references is nonzero, no change is made + Does not remove from disk""" + context.session.assertPerm('repo') + return repo_delete(repo_id) + + def repoProblem(self, repo_id): + """mark repo as broken""" + context.session.assertPerm('repo') + repo_problem(repo_id) + + tagChangedSinceEvent = staticmethod(tag_changed_since_event) + createBuildTarget = staticmethod(create_build_target) + editBuildTarget = staticmethod(edit_build_target) + deleteBuildTarget = staticmethod(delete_build_target) + getBuildTargets = staticmethod(get_build_targets) + + def getBuildTarget(self, info): + """Return the build target with the given name or ID. + If there is no matching build target, return None.""" + targets = get_build_targets(info=info) + if len(targets) == 1: + return targets[0] + else: + return None + + def taskFinished(self,taskId): + task = Task(taskId) + return task.isFinished() + + def getTaskRequest(self, taskId): + task = Task(taskId) + return task.getRequest() + + def getTaskResult(self, taskId): + task = Task(taskId) + return task.getResult() + + def getTaskInfo(self, task_id, request=False): + """Get information about a task""" + single = True + if isinstance(task_id, list) or isinstance(task_id, tuple): + single = False + else: + task_id = [task_id] + ret = [Task(id).getInfo(False, request) for id in task_id] + if single: + return ret[0] + else: + return ret + + def getTaskChildren(self, task_id): + """Return a list of the children + of the Task with the given ID.""" + task = Task(task_id) + return task.getChildren() + + def getTaskDescendents(self, task_id, request=False): + """Get all descendents of the task with the given ID. + Return a map of task_id -> list of child tasks. If the given + task has no descendents, the map will contain a single elements + mapping the given task ID to an empty list. Map keys will be strings + representing integers, due to limitations in xmlrpclib. If "request" + is true, the parameters sent with the xmlrpc request will be decoded and + included in the map.""" + task = Task(task_id) + return get_task_descendents(task, request=request) + + def listTasks(self, opts=None, queryOpts=None): + """Return list of tasks filtered by options + + Options(dictionary): + option[type]: meaning + arch[list]: limit to tasks for given arches + state[list]: limit to tasks of given state + owner[int]: limit to tasks owned by the user with the given ID + host_id[int]: limit to tasks running on the host with the given ID + parent[int]: limit to tasks with the given parent + decode[bool]: whether or not xmlrpc data in the 'request' and 'result' + fields should be decoded; defaults to False + method[str]: limit to tasks of the given method + completeBefore[float or str]: limit to tasks whose completion_time is before + the given date, in either float (seconds since the epoch) + or str (ISO) format + completeAfter[float or str]: limit to tasks whose completion_time is after + the given date, in either float (seconds since the epoch) + or str (ISO) format + """ + if opts is None: + opts = {} + + tables = ['task'] + joins = ['users ON task.owner = users.id'] + fields = ('task.id','state','create_time','completion_time','channel_id', + 'host_id','parent','label','waiting','awaited','owner','method', + 'arch','priority','weight','request','result', 'users.name', 'users.usertype') + aliases = ('id','state','create_time','completion_time','channel_id', + 'host_id','parent','label','waiting','awaited','owner','method', + 'arch','priority','weight','request','result', 'owner_name', 'owner_type') + + conditions = [] + for f in ['arch','state']: + if opts.has_key(f): + conditions.append('%s IN %%(%s)s' % (f, f)) + for f in ['owner', 'host_id', 'parent']: + if opts.has_key(f): + if opts[f] is None: + conditions.append('%s IS NULL' % f) + else: + conditions.append('%s = %%(%s)i' % (f, f)) + if opts.has_key('method'): + conditions.append('method = %(method)s') + if opts.get('completeBefore') != None: + completeBefore = opts['completeBefore'] + if not isinstance(completeBefore, str): + opts['completeBefore'] = datetime.datetime.fromtimestamp(completeBefore).isoformat(' ') + conditions.append('completion_time < %(completeBefore)s') + if opts.get('completeAfter') != None: + completeAfter = opts['completeAfter'] + if not isinstance(completeAfter, str): + opts['completeAfter'] = datetime.datetime.fromtimestamp(completeAfter).isoformat(' ') + conditions.append('completion_time > %(completeAfter)s') + + query = QueryProcessor(columns=fields, aliases=aliases, tables=tables, joins=joins, + clauses=conditions, values=opts, opts=queryOpts) + tasks = query.execute() + if queryOpts and (queryOpts.get('countOnly') or queryOpts.get('asList')): + # Either of the above options makes us unable to easily the decode + # the xmlrpc data + return tasks + + if opts.get('decode'): + for task in tasks: + # decode xmlrpc data + for f in ('request','result'): + if task[f]: + try: + data,method = xmlrpclib.loads(base64.decodestring(task[f])) + except xmlrpclib.Fault, fault: + data = fault + task[f] = data + return tasks + + def taskReport(self, owner=None): + """Return data on active or recent tasks""" + fields = ( + ('task.id','id'), + ('task.state','state'), + ('task.create_time','create_time'), + ('task.completion_time','completion_time'), + ('task.channel_id','channel_id'), + ('channels.name','channel'), + ('task.host_id','host_id'), + ('host.name','host'), + ('task.parent','parent'), + ('task.waiting','waiting'), + ('task.awaited','awaited'), + ('task.method','method'), + ('task.arch','arch'), + ('task.priority','priority'), + ('task.weight','weight'), + ('task.owner','owner_id'), + ('users.name','owner'), + ('build.id','build_id'), + ('package.name','build_name'), + ('build.version','build_version'), + ('build.release','build_release'), + ) + q = """SELECT %s FROM task + JOIN channels ON task.channel_id = channels.id + JOIN users ON task.owner = users.id + LEFT OUTER JOIN host ON task.host_id = host.id + LEFT OUTER JOIN build ON build.task_id = task.id + LEFT OUTER JOIN package ON build.pkg_id = package.id + WHERE (task.state NOT IN (%%(CLOSED)d,%%(CANCELED)d,%%(FAILED)d) + OR NOW() - task.create_time < '1 hour'::interval) + """ % ','.join([f[0] for f in fields]) + if owner: + q += """AND users.id = %s + """ % get_user(owner, strict=True)['id'] + q += """ORDER BY priority,create_time + """ + #XXX hard-coded interval + c = context.cnx.cursor() + c.execute(q,koji.TASK_STATES) + return [dict(zip([f[1] for f in fields],row)) for row in c.fetchall()] + + def resubmitTask(self, taskID): + """Retry a canceled or failed task, using the same parameter as the original task. + The logged-in user must be the owner of the original task or an admin.""" + task = Task(taskID) + if not (task.isCanceled() or task.isFailed()): + raise koji.GenericError, 'only canceled or failed tasks may be resubmitted' + taskInfo = task.getInfo() + if taskInfo['parent'] != None: + raise koji.GenericError, 'only top-level tasks may be resubmitted' + if not (context.session.user_id == taskInfo['owner'] or self.hasPerm('admin')): + raise koji.GenericError, 'only the task owner or an admin may resubmit a task' + + args = task.getRequest() + channel = get_channel(taskInfo['channel_id'], strict=True) + + return make_task(taskInfo['method'], args, arch=taskInfo['arch'], channel=channel['name'], priority=taskInfo['priority']) + + def addHost(self, hostname, arches, krb_principal=None): + """Add a host to the database""" + + context.session.assertPerm('admin') + if get_host(hostname): + raise koji.GenericError, 'host already exists: %s' % hostname + q = """SELECT id FROM channels WHERE name = 'default'""" + default_channel = _singleValue(q) + #users entry + userID = _singleValue("SELECT nextval('users_id_seq')", strict=True) + userType = koji.USERTYPES['HOST'] + if krb_principal is None: + fmt = context.opts.get('HostPrincipalFormat','compile/%s@EXAMPLE.COM') + krb_principal = fmt % hostname + insert = """INSERT INTO users (id, name, password, usertype, krb_principal) + VALUES (%(userID)i, %(hostname)s, null, %(userType)i, %(krb_principal)s)""" + _dml(insert, locals()) + #host entry + hostID = _singleValue("SELECT nextval('host_id_seq')", strict=True) + arches = " ".join(arches) + insert = """INSERT INTO host (id, user_id, name, arches) + VALUES (%(hostID)i, %(userID)i, %(hostname)s, %(arches)s)""" + _dml(insert, locals()) + #host_channels entry + insert = """INSERT INTO host_channels (host_id, channel_id) + VALUES (%(hostID)i, %(default_channel)i)""" + _dml(insert, locals()) + return hostID + + + def enableHost(self, hostname): + """Mark a host as enabled""" + set_host_enabled(hostname, True) + + def disableHost(self, hostname): + """Mark a host as disabled""" + set_host_enabled(hostname, False) + + getHost = staticmethod(get_host) + addHostToChannel = staticmethod(add_host_to_channel) + removeHostFromChannel = staticmethod(remove_host_from_channel) + + def listHosts(self, arches=None, channelID=None, ready=None, enabled=None, userID=None, queryOpts=None): + """Get a list of hosts. "arches" is a list of string architecture + names, e.g. ['i386', 'ppc64']. If one of the arches associated with a given + host appears in the list, it will be included in the results. If "ready" and "enabled" + are specified, only hosts with the given value for the respective field will + be included.""" + fields = ('id', 'user_id', 'name', 'arches', 'task_load', + 'capacity', 'ready', 'enabled') + + clauses = [] + joins = [] + if arches != None: + # include the regex constraints below so we can match 'ppc' without + # matching 'ppc64' + if not (isinstance(arches, list) or isinstance(arches, tuple)): + arches = [arches] + archClause = [r"""arches ~ '\\m%s\\M'""" % arch for arch in arches] + clauses.append('(' + ' OR '.join(archClause) + ')') + if channelID != None: + joins.append('host_channels on host.id = host_channels.host_id') + clauses.append('host_channels.channel_id = %(channelID)i') + if ready != None: + clauses.append('ready is %s' % ready) + if enabled != None: + clauses.append('enabled is %s' % enabled) + if userID != None: + clauses.append('user_id = %(userID)i') + + query = QueryProcessor(columns=fields, tables=['host'], + joins=joins, clauses=clauses, + values=locals(), opts=queryOpts) + return query.execute() + + def getLastHostUpdate(self, hostID): + """Return the latest update timestampt for the host + + The timestamp represents the last time the host with the given + ID contacted the hub. Returns None if the host has never contacted + the hub.""" + query = """SELECT update_time FROM sessions + JOIN host ON sessions.user_id = host.user_id + WHERE host.id = %(hostID)i + ORDER BY update_time DESC + LIMIT 1 + """ + return _singleValue(query, locals(), strict=False) + + getChannel = staticmethod(get_channel) + listChannels=staticmethod(list_channels) + + getBuildroot=staticmethod(get_buildroot) + + def getBuildrootListing(self,id): + """Return a list of packages in the buildroot""" + br = BuildRoot(id) + return br.getList() + + listBuildroots = staticmethod(query_buildroots) + + def hasPerm(self, perm): + """Check if the logged-in user has the given permission. Return False if + they do not have the permission, or if they are not logged-in.""" + return context.session.hasPerm(perm) + + def getPerms(self): + """Get a list of the permissions granted to the currently logged-in user.""" + return context.session.getPerms() + + def getUserPerms(self, userID): + """Get a list of the permissions granted to the user with the given ID.""" + return koji.auth.get_user_perms(userID) + + def getAllPerms(self): + """Get a list of all permissions in the system. Returns a list of maps. Each + map contains the following keys: + + - id + - name + """ + query = """SELECT id, name FROM permissions + ORDER BY id""" + + return _multiRow(query, {}, ['id', 'name']) + + def getLoggedInUser(self): + """Return information about the currently logged-in user. Returns data + in the same format as getUser(). If there is no currently logged-in user, + return None.""" + if context.session.logged_in: + return self.getUser(context.session.user_id) + else: + return None + + def setBuildOwner(self, build, user): + context.session.assertPerm('admin') + buildinfo = get_build(build) + if not buildinfo: + raise koji.GenericError, 'build does not exist: %s' % build + userinfo = get_user(user) + if not userinfo: + raise koji.GenericError, 'user does not exist: %s' % user + userid = userinfo['id'] + buildid = buildinfo['id'] + q = """UPDATE build SET owner=%(userid)i WHERE id=%(buildid)i""" + _dml(q,locals()) + + def setBuildTimestamp(self, build, ts): + """Set the completion time for a build + + build should a valid nvr or build id + ts should be # of seconds since epoch or optionally an + xmlrpc DateTime value""" + context.session.assertPerm('admin') + buildinfo = get_build(build) + if not buildinfo: + raise koji.GenericError, 'build does not exist: %s' % build + elif isinstance(ts, xmlrpclib.DateTime): + #not recommended + #the xmlrpclib.DateTime class is almost useless + try: + ts = time.mktime(time.strptime(str(ts),'%Y%m%dT%H:%M:%S')) + except ValueError: + raise koji.GenericError, "Invalid time: %s" % ts + elif not isinstance(ts, (int, long, float)): + raise koji.GenericError, "Invalid type for timestamp" + buildid = buildinfo['id'] + q = """UPDATE build + SET completion_time=TIMESTAMP 'epoch' AT TIME ZONE 'utc' + '%(ts)f seconds'::interval + WHERE id=%%(buildid)i""" % locals() + _dml(q,locals()) + + def count(self, methodName, *args, **kw): + """Execute the XML-RPC method with the given name and count the results. + A method return value of None will return O, a return value of type "list", "tuple", or "dict" + will return len(value), and a return value of any other type will return 1. An invalid + methodName will raise an AttributeError, and invalid arguments will raise a TypeError.""" + result = getattr(self, methodName)(*args, **kw) + if result == None: + return 0 + elif isinstance(result, list) or isinstance(result, tuple) or isinstance(result, dict): + return len(result) + else: + return 1 + + def _sortByKeyFunc(self, key, noneGreatest=True): + """Return a function to sort a list of maps by the given key. + If the key starts with '-', sort in reverse order. If noneGreatest + is True, None will sort higher than all other values (instead of lower). + """ + if noneGreatest: + # Normally None evaluates to be less than every other value + # Invert the comparison so it always evaluates to greater + cmpFunc = lambda a, b: (a is None or b is None) and -(cmp(a, b)) or cmp(a, b) + else: + cmpFunc = cmp + + if key.startswith('-'): + key = key[1:] + return lambda a, b: cmpFunc(b[key], a[key]) + else: + return lambda a, b: cmpFunc(a[key], b[key]) + + def filterResults(self, methodName, *args, **kw): + """Execute the XML-RPC method with the given name and filter the results + based on the options specified in the keywork option "filterOpts". The method + must return a list of maps. Any other return type will result in a TypeError. + Currently supported options are: + - offset: the number of elements to trim off the front of the list + - limit: the maximum number of results to return + - order: the map key to use to sort the list; the list will be sorted before + offset or limit are applied + - noneGreatest: when sorting, consider 'None' to be greater than all other values; + python considers None less than all other values, but Postgres sorts + NULL higher than all other values; default to True for consistency + with database sorts + """ + filterOpts = kw.pop('filterOpts', {}) + + results = getattr(self, methodName)(*args, **kw) + if results is None: + return None + elif not isinstance(results, list): + raise TypeError, '%s() did not return a list' % methodName + + order = filterOpts.get('order') + if order: + results.sort(self._sortByKeyFunc(order, filterOpts.get('noneGreatest', True))) + + offset = filterOpts.get('offset') + if offset is not None: + results = results[offset:] + limit = filterOpts.get('limit') + if limit is not None: + results = results[:limit] + + return results + + def getBuildNotifications(self, userID=None): + """Get build notifications for the user with the given ID. If no ID + is specified, get the notifications for the currently logged-in user. If + there is no currently logged-in user, raise a GenericError.""" + if userID is None: + user = self.getLoggedInUser() + if user is None: + raise koji.GenericError, 'not logged-in' + else: + userID = user['id'] + return get_build_notifications(userID) + + def getBuildNotification(self, id): + """Get the build notification with the given ID. Return None + if there is no notification with the given ID.""" + fields = ('id', 'user_id', 'package_id', 'tag_id', 'success_only', 'email') + query = """SELECT %s + FROM build_notifications + WHERE id = %%(id)i + """ % ', '.join(fields) + return _singleRow(query, locals(), fields) + + def updateNotification(self, id, package_id, tag_id, success_only, email): + """Update an existing build notification with new data. If the notification + with the given ID doesn't exist, or the currently logged-in user is not the + owner or the notification or an admin, raise a GenericError.""" + currentUser = self.getLoggedInUser() + if not currentUser: + raise koji.GenericError, 'not logged-in' + + orig_notif = self.getBuildNotification(id) + if not orig_notif: + raise koji.GenericError, 'no notification with ID: %i' % id + elif not (orig_notif['user_id'] == currentUser['id'] or + self.hasPerm('admin')): + raise koji.GenericError, 'user %i cannot update notifications for user %i' % \ + (currentUser['id'], orig_notif['user_id']) + + update = """UPDATE build_notifications + SET package_id = %(package_id)s, + tag_id = %(tag_id)s, + success_only = %(success_only)s, + email = %(email)s + WHERE id = %(id)i + """ + + _dml(update, locals()) + + def createNotification(self, user_id, package_id, tag_id, success_only, email): + """Create a new notification. If the user_id does not match the currently logged-in user + and the currently logged-in user is not an admin, raise a GenericError.""" + currentUser = self.getLoggedInUser() + if not currentUser: + raise koji.GenericError, 'not logged in' + if not (user_id == currentUser['id'] or self.hasPerm('admin')): + raise koji.GenericError, 'user %i cannot create notifications for user %i' % \ + (currentUser['id'], user_id) + insert = """INSERT INTO build_notifications + (user_id, package_id, tag_id, success_only, email) + VALUES + (%(user_id)i, %(package_id)s, %(tag_id)s, %(success_only)s, %(email)s) + """ + _dml(insert, locals()) + + def deleteNotification(self, id): + """Delete the notification with the given ID. If the currently logged-in + user is not the owner of the notification or an admin, raise a GenericError.""" + notification = self.getBuildNotification(id) + if not notification: + raise koji.GenericError, 'no notification with ID: %i' % id + currentUser = self.getLoggedInUser() + if not currentUser: + raise koji.GenericError, 'not logged-in' + + if not (notification['user_id'] == currentUser['id'] or + self.hasPerm('admin')): + raise koji.GenericError, 'user %i cannot delete notifications for user %i' % \ + (currentUser['id'], notification['user_id']) + delete = """DELETE FROM build_notifications WHERE id = %(id)i""" + _dml(delete, locals()) + + def _prepareSearchTerms(self, terms, matchType): + """Process the search terms before passing them to the database. + If matchType is "glob", "_" will be replaced with "\_" (to match literal + underscores), "?" will be replaced with "_", and "*" will + be replaced with "%". If matchType is "regexp", no changes will be + made.""" + if matchType == 'glob': + return terms.replace('\\', '\\\\').replace('_', r'\_').replace('?', '_').replace('*', '%') + else: + return terms + + _searchTables = {'package': 'package', + 'build': 'build', + 'tag': 'tag', + 'target': 'build_target', + 'user': 'users', + 'host': 'host', + 'rpm': 'rpminfo', + 'file': 'rpmfiles'} + + def search(self, terms, type, matchType, queryOpts=None): + """Search for an item in the database matching "terms". + "type" specifies what object type to search for, and must be + one of "package", "build", "tag", "target", "user", "host", + "rpm", or "file". "matchType" specifies the type of search to + perform, and must be one of "glob" or "regexp". All searches + are case-insensitive. A list of maps containing "id" and + "name" will be returned. If no matches are found, an empty + list will be returned.""" + if not terms: + raise koji.GenericError, 'empty search terms' + table = self._searchTables.get(type) + if not table: + raise koji.GenericError, 'unknown search type: %s' % type + + if matchType == 'glob': + if '?' in terms or '*' in terms: + oper = 'like' + terms = self._prepareSearchTerms(terms, matchType) + else: + # if we're not actually globbing anything, use = so it can use the index; + # also, don't pass terms through _prepareSearchTerms(), because we don't need + # to escape slashes or underscores + oper = '=' + elif matchType == 'regexp': + oper = '~' + terms = self._prepareSearchTerms(terms, matchType) + else: + raise koji.GenericError, 'unknown match type: %s' % matchType + + cols = ('id', 'name') + aliases = cols + joins = [] + if type == 'build': + joins.append('package ON build.pkg_id = package.id') + clause = "package.name || '-' || build.version || '-' || build.release %s %%(terms)s" % oper + cols = ('build.id', "package.name || '-' || build.version || '-' || build.release") + elif type == 'rpm': + clause = "name || '-' || version || '-' || release || '.' || arch || '.rpm' %s %%(terms)s" % oper + cols = ('id', "name || '-' || version || '-' || release || '.' || arch || '.rpm'") + elif type == 'file': + clause = 'filename %s %%(terms)s' % oper + cols = ('rpm_id', 'filename') + else: + clause = 'name %s %%(terms)s' % oper + + query = QueryProcessor(columns=cols, + aliases=aliases, tables=(table,), + joins=joins, clauses=(clause,), + values=locals(), opts=queryOpts) + return query.execute() + + +class BuildRoot(object): + + def __init__(self,id=None): + if id is None: + #db entry has yet to be created + self.id = None + else: + logging.getLogger("koji.hub").debug("BuildRoot id: %s" % id) + #load buildroot data + self.load(id) + + def load(self,id): + fields = ('id', 'host_id', 'repo_id', 'arch', 'task_id', + 'create_event', 'retire_event', 'state') + q = """SELECT %s FROM buildroot WHERE id=%%(id)i""" % (",".join(fields)) + data = _singleRow(q,locals(),fields,strict=False) + if data == None: + raise koji.GenericError, 'no buildroot with ID: %i' % id + self.id = id + self.data = data + + def new(self, host, repo, arch, task_id=None): + state = koji.BR_STATES['INIT'] + id = _singleValue("SELECT nextval('buildroot_id_seq')", strict=True) + q = """INSERT INTO buildroot(id,host_id,repo_id,arch,state,task_id) + VALUES (%(id)i,%(host)i,%(repo)i,%(arch)s,%(state)i,%(task_id)s)""" + _dml(q,locals()) + self.load(id) + return self.id + + def verifyTask(self,task_id): + if self.id is None: + raise koji.GenericError, "buildroot not specified" + return (task_id == self.data['task_id']) + + def assertTask(self,task_id): + if not self.verifyTask(task_id): + raise koji.NotAllowed, 'Task %s does not have lock on buildroot %s' \ + %(task_id,self.id) + + def verifyHost(self,host_id): + if self.id is None: + raise koji.GenericError, "buildroot not specified" + return (host_id == self.data['host_id']) + + def assertHost(self,host_id): + if not self.verifyHost(host_id): + raise koji.NotAllowed, "Host %s not owner of buildroot %s" \ + % (host_id,self.id) + + def setState(self,state): + if self.id is None: + raise koji.GenericError, "buildroot not specified" + id = self.id + if isinstance(state,str): + state = koji.BR_STATES[state] + #sanity checks + if state == koji.BR_STATES['INIT']: + #we do not re-init buildroots + raise koji.GenericError, "Cannot change buildroot state to INIT" + q = """SELECT state,retire_event FROM buildroot WHERE id=%(id)s FOR UPDATE""" + lstate,retire_event = _fetchSingle(q,locals(),strict=True) + if koji.BR_STATES[lstate] == 'EXPIRED': + #we will quietly ignore a request to expire an expired buildroot + #otherwise this is an error + if state == lstate: + return + else: + raise koji.GenericError, "buildroot %i is EXPIRED" % id + set = "state=%(state)s" + if koji.BR_STATES[state] == 'EXPIRED': + set += ",retire_event=get_event()" + update = """UPDATE buildroot SET %s WHERE id=%%(id)s""" % set + _dml(update,locals()) + self.data['state'] = state + + def getList(self): + if self.id is None: + raise koji.GenericError, "buildroot not specified" + brootid = self.id + fields = ('rpm_id','is_update','name','version','release','epoch', + 'arch','build_id') + q = """SELECT %s FROM buildroot_listing + JOIN rpminfo on rpm_id = rpminfo.id + WHERE buildroot_listing.buildroot_id = %%(brootid)s + """ % ','.join(fields) + return _multiRow(q,locals(),fields) + + def _setList(self,rpmlist,update=False): + """Set or update the list of rpms in a buildroot""" + if self.id is None: + raise koji.GenericError, "buildroot not specified" + brootid = self.id + if update: + current = dict([(r['rpm_id'],1) for r in self.getList()]) + q = """INSERT INTO buildroot_listing (buildroot_id,rpm_id,is_update) + VALUES (%(brootid)s,%(rpm_id)s,%(update)s)""" + rpm_ids = [] + for an_rpm in rpmlist: + rpm_id = get_rpm(an_rpm, strict=True)['id'] + if update and current.has_key(rpm_id): + #ignore duplicate packages for updates + continue + rpm_ids.append(rpm_id) + #we sort to try to avoid deadlock issues + rpm_ids.sort() + for rpm_id in rpm_ids: + _dml(q, locals()) + + def setList(self,rpmlist): + """Set the initial list of rpms in a buildroot""" + if self.data['state'] != koji.BR_STATES['INIT']: + raise koji.GenericError, "buildroot %(id)s in wrong state %(state)s" % self.data + self._setList(rpmlist,update=False) + + def updateList(self,rpmlist): + """Update the list of packages in a buildroot""" + if self.data['state'] != koji.BR_STATES['BUILDING']: + raise koji.GenericError, "buildroot %(id)s in wrong state %(state)s" % self.data + self._setList(rpmlist,update=True) + + +class Host(object): + + def __init__(self,id=None): + remote_id = context.session.getHostId() + if id is None: + id = remote_id + if id is None: + raise koji.AuthError, "No host specified" + self.id = id + self.same_host = (id == remote_id) + + def verify(self): + """Verify that the remote host matches and has the lock""" + if not self.same_host: + raise koji.AuthError, "Host mismatch" + if not context.session.exclusive: + raise koji.AuthError, "This method requires an exclusive session" + return True + + def taskUnwait(self,parent): + """Clear wait data for task""" + c = context.cnx.cursor() + #unwait the task + q = """UPDATE task SET waiting='false' WHERE id = %(parent)s""" + context.commit_pending = True + c.execute(q,locals()) + #...and un-await its subtasks + q = """UPDATE task SET awaited='false' WHERE parent=%(parent)s""" + c.execute(q,locals()) + + def taskSetWait(self,parent,tasks): + """Mark task waiting and subtasks awaited""" + self.taskUnwait(parent) + c = context.cnx.cursor() + #mark tasks awaited + q = """UPDATE task SET waiting='true' WHERE id=%(parent)s""" + context.commit_pending = True + c.execute(q,locals()) + if tasks is None: + #wait on all subtasks + q = """UPDATE task SET awaited='true' WHERE parent=%(parent)s""" + c.execute(q,locals()) + else: + for id in tasks: + q = """UPDATE task SET awaited='true' WHERE id=%(id)s""" + c.execute(q,locals()) + + def taskWaitCheck(self,parent): + """Return status of awaited subtask + + The return value is [finished, unfinished] where each entry + is a list of task ids.""" + #check to see if any of the tasks have finished + c = context.cnx.cursor() + q = """ + SELECT id,state FROM task + WHERE parent=%(parent)s AND awaited = TRUE""" + c.execute(q,locals()) + canceled = koji.TASK_STATES['CANCELED'] + closed = koji.TASK_STATES['CLOSED'] + failed = koji.TASK_STATES['FAILED'] + finished = [] + unfinished = [] + for id,state in c.fetchall(): + if state in (canceled,closed,failed): + finished.append(id) + else: + unfinished.append(id) + return finished, unfinished + + def taskWait(self,parent): + """Return task results or mark tasks as waited upon""" + finished, unfinished = self.taskWaitCheck(parent) + # un-await finished tasks + if finished: + context.commit_pending = True + for id in finished: + c = context.cnx.cursor() + q = """UPDATE task SET awaited='false' WHERE id=%(id)s""" + c.execute(q,locals()) + return [finished,unfinished] + + def taskWaitResults(self,parent,tasks): + results = {} + #if we're getting results, we're done waiting + self.taskUnwait(parent) + c = context.cnx.cursor() + canceled = koji.TASK_STATES['CANCELED'] + closed = koji.TASK_STATES['CLOSED'] + failed = koji.TASK_STATES['FAILED'] + q = """ + SELECT id,state FROM task + WHERE parent=%(parent)s""" + if tasks is None: + #query all subtasks + tasks = [] + c.execute(q,locals()) + for id,state in c.fetchall(): + if state == canceled: + raise koji.GenericError, "Subtask canceled" + elif state in (closed,failed): + tasks.append(id) + #would use a dict, but xmlrpc requires the keys to be strings + results = [] + for id in tasks: + task = Task(id) + results.append([id,task.getResult()]) + return results + + def getHostTasks(self): + """get status of open tasks assigned to host""" + c = context.cnx.cursor() + host_id = self.id + #query tasks + fields = ['id','waiting','weight'] + st_open = koji.TASK_STATES['OPEN'] + q = """ + SELECT %s FROM task + WHERE host_id = %%(host_id)s AND state = %%(st_open)s + """ % (",".join(fields)) + c.execute(q,locals()) + tasks = [ dict(zip(fields,x)) for x in c.fetchall() ] + for task in tasks: + id = task['id'] + if task['waiting']: + finished, unfinished = self.taskWaitCheck(id) + if finished: + task['alert'] = True + return tasks + + def updateHost(self,task_load,ready): + host_data = get_host(self.id) + if task_load != host_data['task_load'] or ready != host_data['ready']: + c = context.cnx.cursor() + id = self.id + q = """UPDATE host SET task_load=%(task_load)s,ready=%(ready)s WHERE id=%(id)s""" + c.execute(q,locals()) + context.commit_pending = True + + def getLoadData(self): + """Get load balancing data + + This data is relatively small and the necessary load analysis is + relatively complex, so we let the host machines crunch it.""" + return [get_ready_hosts(),get_active_tasks()] + + def getTask(self): + """Open next available task and return it""" + c = context.cnx.cursor() + id = self.id + #get arch and channel info for host + q = """ + SELECT arches FROM host WHERE id = %(id)s + """ + c.execute(q,locals()) + arches = c.fetchone()[0].split() + q = """ + SELECT channel_id FROM host_channels WHERE host_id = %(id)s + """ + c.execute(q,locals()) + channels = [ x[0] for x in c.fetchall() ] + + #query tasks + fields = ['id', 'state', 'method', 'request', 'channel_id', 'arch', 'parent'] + st_free = koji.TASK_STATES['FREE'] + st_assigned = koji.TASK_STATES['ASSIGNED'] + q = """ + SELECT %s FROM task + WHERE (state = %%(st_free)s) + OR (state = %%(st_assigned)s AND host_id = %%(id)s) + ORDER BY priority,create_time + """ % (",".join(fields)) + c.execute(q,locals()) + for data in c.fetchall(): + data = dict(zip(fields,data)) + # XXX - we should do some pruning here, but for now... + # check arch + if data['arch'] not in arches: + continue + # NOTE: channels ignored for explicit assignments + if data['state'] != st_assigned and data['channel_id'] not in channels: + continue + task = Task(data['id']) + ret = task.open(self.id) + if ret is None: + #someone else got it while we were looking + #log_error("task %s seems to be locked" % task['id']) + continue + return ret + #else no appropriate tasks + return None + + def isEnabled(self): + """Return whether this host is enabled or not.""" + query = """SELECT enabled FROM host WHERE id = %(id)i""" + return _singleValue(query, {'id': self.id}, strict=True) + +class HostExports(object): + '''Contains functions that are made available via XMLRPC''' + + def getID(self): + host = Host() + host.verify() + return host.id + + def updateHost(self,task_load,ready): + host = Host() + host.verify() + host.updateHost(task_load,ready) + + def getLoadData(self): + host = Host() + host.verify() + return host.getLoadData() + + def getHost(self): + """Return information about this host""" + host = Host() + host.verify() + return get_host(host.id) + + def openTask(self,task_id): + host = Host() + host.verify() + task = Task(task_id) + return task.open(host.id) + + def getTask(self): + host = Host() + host.verify() + return host.getTask() + + def closeTask(self,task_id,response): + host = Host() + host.verify() + task = Task(task_id) + task.assertHost(host.id) + return task.close(response) + + def failTask(self,task_id,response): + host = Host() + host.verify() + task = Task(task_id) + task.assertHost(host.id) + return task.fail(response) + + def freeTasks(self,tasks): + host = Host() + host.verify() + for task_id in tasks: + task = Task(task_id) + if not task.verifyHost(host.id): + #it's possible that a task was freed/reassigned since the host + #last checked, so we should not raise an error + continue + task.free() + #XXX - unfinished + #remove any files related to task + + def setTaskWeight(self,task_id,weight): + host = Host() + host.verify() + task = Task(task_id) + task.assertHost(host.id) + return task.setWeight(weight) + + def getHostTasks(self): + host = Host() + host.verify() + return host.getHostTasks() + + def taskSetWait(self,parent,tasks): + host = Host() + host.verify() + return host.taskSetWait(parent,tasks) + + def taskWait(self,parent): + host = Host() + host.verify() + return host.taskWait(parent) + + def taskWaitResults(self,parent,tasks): + host = Host() + host.verify() + return host.taskWaitResults(parent,tasks) + + def subtask(self,method,arglist,parent,**opts): + host = Host() + host.verify() + ptask = Task(parent) + ptask.assertHost(host.id) + opts['parent'] = parent + if opts.has_key('label'): + # first check for existing task with this parent/label + q = """SELECT id FROM task + WHERE parent=%(parent)s AND label=%(label)s""" + row = _fetchSingle(q,opts) + if row: + #return task id + return row[0] + return make_task(method,arglist,**opts) + + def subtask2(self,__parent,__taskopts,__method,*args,**opts): + """A wrapper around subtask with optional signature + + Parameters: + __parent: task id of the parent task + __taskopts: dictionary of task options + __method: the method to be invoked + + Remaining args are passed on to the subtask + """ + args = koji.encode_args(*args,**opts) + return self.subtask(__method,args,__parent,**__taskopts) + + def moveBuildToScratch(self, task_id, srpm, rpms, logs=None): + "Move a completed scratch build into place (not imported)" + host = Host() + host.verify() + task = Task(task_id) + task.assertHost(host.id) + uploadpath = koji.pathinfo.work() + #verify files exist + for relpath in [srpm] + rpms: + fn = "%s/%s" % (uploadpath,relpath) + if not os.path.exists(fn): + raise koji.GenericError, "no such file: %s" % fn + #figure out storage location + # //task_ + scratchdir = koji.pathinfo.scratch() + username = get_user(task.getOwner())['name'] + dir = "%s/%s/task_%s" % (scratchdir, username, task_id) + koji.ensuredir(dir) + for relpath in [srpm] + rpms: + fn = "%s/%s" % (uploadpath,relpath) + dest = "%s/%s" % (dir,os.path.basename(fn)) + os.rename(fn,dest) + os.symlink(dest,fn) + if logs: + for key, files in logs.iteritems(): + if key: + logdir = "%s/logs/%s" % (dir, key) + else: + logdir = "%s/logs" % dir + koji.ensuredir(logdir) + for relpath in files: + fn = "%s/%s" % (uploadpath,relpath) + dest = "%s/%s" % (logdir,os.path.basename(fn)) + os.rename(fn,dest) + os.symlink(dest,fn) + + def initBuild(self,data): + """Create a stub build entry. + + This is done at the very beginning of the build to inform the + system the build is underway. + """ + host = Host() + host.verify() + #sanity checks + task = Task(data['task_id']) + task.assertHost(host.id) + #prep the data + data['owner'] = task.getOwner() + data['state'] = koji.BUILD_STATES['BUILDING'] + data['completion_time'] = None + return new_build(data) + + def completeBuild(self, task_id, build_id, srpm, rpms, brmap=None, logs=None): + """Import final build contents into the database""" + #sanity checks + host = Host() + host.verify() + task = Task(task_id) + task.assertHost(host.id) + result = import_build(srpm, rpms, brmap, task_id, build_id, logs=logs) + build_notification(task_id, build_id) + return result + + def failBuild(self, task_id, build_id): + """Mark the build as failed. If the current state is not + 'BUILDING', or the current competion_time is not null, a + GenericError will be raised.""" + host = Host() + host.verify() + task = Task(task_id) + task.assertHost(host.id) + + query = """SELECT state, completion_time + FROM build + WHERE id = %(build_id)i + FOR UPDATE""" + result = _singleRow(query, locals(), ('state', 'completion_time')) + + if not result: + raise koji.GenericError, 'no build with ID: %i' % build_id + elif result['state'] != koji.BUILD_STATES['BUILDING']: + raise koji.GenericError, 'cannot update build %i, state: %s' % \ + (build_id, koji.BUILD_STATES[result['state']]) + elif result['completion_time'] is not None: + raise koji.GenericError, 'cannot update build %i, completed at %s' % \ + (build_id, result['completion_time']) + + state = koji.BUILD_STATES['FAILED'] + update = """UPDATE build + SET state = %(state)i, + completion_time = NOW() + WHERE id = %(build_id)i""" + _dml(update, locals()) + build_notification(task_id, build_id) + + def tagBuild(self,task_id,tag,build,force=False,fromtag=None): + """Tag a build (host version) + + This tags as the user who owns the task + + If fromtag is specified, also untag the package (i.e. move in a single + transaction) + + No return value + """ + host = Host() + host.verify() + task = Task(task_id) + task.assertHost(host.id) + user_id = task.getOwner() + if fromtag: + _untag_build(fromtag,build,user_id=user_id,force=force,strict=True) + _tag_build(tag,build,user_id=user_id,force=force) + + def tagNotification(self, is_successful, tag_id, from_id, build_id, user_id, ignore_success=False, failure_msg=''): + """Create a tag notification message. + Handles creation of tagNotification tasks for hosts.""" + tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success, failure_msg) + + def importChangelog(self, buildID, rpmfile): + """Import the changelog for the given build + + The changelog data is pulled from the rpm provided. + rpmfile must be a path relative to the 'work' dir. + If the build already has changelog information, the existing + changelog information is cleared and the changelog from the + given rpm is imported.""" + host = Host() + host.verify() + + build = get_build(buildID, strict=True) + taskID = build['task_id'] + if not taskID: + raise koji.GenericError, 'no task for build %i' % build['id'] + + task = Task(taskID) + task.assertHost(host.id) + + rpmfile = '%s/%s' % (koji.pathinfo.work(), rpmfile) + import_changelog(build, rpmfile, replace=True) + + def newBuildRoot(self, repo, arch, task_id=None): + host = Host() + host.verify() + if task_id is not None: + Task(task_id).assertHost(host.id) + br = BuildRoot() + return br.new(host.id,repo,arch,task_id=task_id) + + def setBuildRootState(self,brootid,state,task_id=None): + host = Host() + host.verify() + if task_id is not None: + Task(task_id).assertHost(host.id) + br = BuildRoot(brootid) + br.assertHost(host.id) + if task_id is not None: + br.assertTask(task_id) + return br.setState(state) + + def setBuildRootList(self,brootid,rpmlist,task_id=None): + host = Host() + host.verify() + if task_id is not None: + Task(task_id).assertHost(host.id) + br = BuildRoot(brootid) + br.assertHost(host.id) + if task_id is not None: + br.assertTask(task_id) + return br.setList(rpmlist) + + def updateBuildRootList(self,brootid,rpmlist,task_id=None): + host = Host() + host.verify() + if task_id is not None: + Task(task_id).assertHost(host.id) + br = BuildRoot(brootid) + br.assertHost(host.id) + if task_id is not None: + br.assertTask(task_id) + return br.updateList(rpmlist) + + def repoInit(self, tag, with_src=False): + """Initialize a new repo for tag""" + host = Host() + host.verify() + return repo_init(tag, with_src=with_src) + + def repoAddRPM(self, repo_id, path): + """Add an uploaded rpm to a repo""" + host = Host() + host.verify() + rinfo = repo_info(repo_id, strict=True) + repodir = koji.pathinfo.repo(repo_id, rinfo['tag_name']) + if rinfo['state'] != koji.REPO_INIT: + raise koji.GenericError, "Repo %(id)s not in INIT state (got %(state)s)" % rinfo + #verify file exists + uploadpath = koji.pathinfo.work() + filepath = "%s/%s" % (uploadpath, path) + if not os.path.exists(filepath): + raise koji.GenericError, "no such file: %s" % filepath + rpminfo = koji.get_header_fields(filepath, ('arch','sourcepackage')) + dirs = [] + if not rpminfo['sourcepackage'] and rpminfo['arch'] != 'noarch': + arch = koji.canonArch(rpminfo['arch']) + dir = "%s/%s/RPMS" % (repodir, arch) + if os.path.isdir(dir): + dirs.append(dir) + else: + #noarch and srpms linked for all arches + for fn in os.listdir(repodir): + if fn == 'groups': + continue + if rpminfo['sourcepackage']: + dir = "%s/%s/SRPMS" % (repodir, fn) + else: + dir = "%s/%s/RPMS" % (repodir, fn) + if os.path.isdir(dir): + dirs.append(dir) + for dir in dirs: + fn = os.path.basename(filepath) + dst = "%s/%s" % (dir, fn) + if os.path.exists(dst): + s_st = os.stat(filepath) + d_st = os.stat(dst) + if s_st.st_ino != d_st.st_ino: + raise koji.GenericError, "File already in repo: %s" % dst + #otherwise the desired hardlink already exists + else: + os.link(filepath, dst) + + def repoDone(self, repo_id, data): + """Move repo data into place, mark as ready, and expire earlier repos + + repo_id: the id of the repo + data: a dictionary of the form { arch: (uploadpath, files), ...} + """ + host = Host() + host.verify() + rinfo = repo_info(repo_id, strict=True) + if rinfo['state'] != koji.REPO_INIT: + raise koji.GenericError, "Repo %(id)s not in INIT state (got %(state)s)" % rinfo + repodir = koji.pathinfo.repo(repo_id, rinfo['tag_name']) + workdir = koji.pathinfo.work() + for arch, (uploadpath, files) in data.iteritems(): + archdir = "%s/%s" % (repodir, arch) + if not os.path.isdir(archdir): + raise koji.GenericError, "Repo arch directory missing: %s" % archdir + datadir = "%s/repodata" % archdir + koji.ensuredir(datadir) + for fn in files: + src = "%s/%s/%s" % (workdir,uploadpath, fn) + dst = "%s/%s" % (datadir, fn) + if not os.path.exists(src): + raise koji.GenericError, "uploaded file missing: %s" % src + os.link(src, dst) + os.unlink(src) + repo_ready(repo_id) + repo_expire_older(rinfo['tag_id'], rinfo['create_event']) + + def isEnabled(self): + host = Host() + host.verify() + return host.isEnabled() + +# XXX - not needed anymore? +def handle_upload(req): + """Handle file upload via POST request""" + pass + +#koji.add_sys_logger("koji") + +if __name__ == "__main__": + # XXX - testing defaults + print "Connecting to DB" + koji.db.setDBopts( database = "test", user = "test") + context.cnx = koji.db.connect() + context.req = {} + print "Creating a session" + context.session = koji.auth.Session(None,hostip="127.0.0.1") + print context.session + test_user = "host/1" + pw = "foobar" + print "Logging in as %s" % test_user + session_info = context.session.login(test_user,pw,{'hostip':'127.0.0.1'}) + for k in session_info.keys(): + session_info[k] = [session_info[k]] + s2=koji.auth.Session(session_info,'127.0.0.1') + print s2 + print s2.getHostId() + context.session = s2 + print "Associating host" + Host() + #context.cnx.commit() + context.session.perms['admin'] = 1 #XXX diff --git a/hub/kojixmlrpc.py b/hub/kojixmlrpc.py new file mode 100644 index 00000000..89348707 --- /dev/null +++ b/hub/kojixmlrpc.py @@ -0,0 +1,295 @@ +# mod_python script + +# kojixmlrpc - an XMLRPC interface for koji. +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: +# Mike McLean + +import sys +import time +import traceback +import pprint +from xmlrpclib import loads,dumps,Fault +from mod_python import apache + +import koji +import koji.auth +import koji.db +from kojihub import RootExports +from kojihub import HostExports +from koji.context import context + +class ModXMLRPCRequestHandler(object): + """Simple XML-RPC handler for mod_python environment""" + + def __init__(self): + self.funcs = {} + self.traceback = False + #introspection functions + self.register_function(self.list_api, name="_listapi") + self.register_function(self.system_listMethods, name="system.listMethods") + self.register_function(self.system_methodSignature, name="system.methodSignature") + self.register_function(self.system_methodHelp, name="system.methodHelp") + self.register_function(self.multiCall) + + def register_function(self, function, name = None): + if name is None: + name = function.__name__ + self.funcs[name] = function + + def register_module(self, instance, prefix=None): + """Register all the public functions in an instance with prefix prepended + + For example + h.register_module(exports,"pub.sys") + will register the methods of exports with names like + pub.sys.method1 + pub.sys.method2 + ...etc + """ + for name in dir(instance): + if name.startswith('_'): + continue + function = getattr(instance, name) + if not callable(function): + continue + if prefix is not None: + name = "%s.%s" %(prefix,name) + self.register_function(function, name=name) + + def register_instance(self,instance): + self.register_module(instance) + + def _marshaled_dispatch(self, data): + """Dispatches an XML-RPC method from marshalled (XML) data.""" + + params, method = loads(data) + + start = time.time() + # generate response + try: + response = self._dispatch(method, params) + # wrap response in a singleton tuple + response = (response,) + response = dumps(response, methodresponse=1, allow_none=1) + except Fault, fault: + self.traceback = True + response = dumps(fault) + except: + self.traceback = True + # report exception back to server + e_class, e = sys.exc_info()[:2] + faultCode = getattr(e_class,'faultCode',1) + tb_type = context.opts.get('KojiTraceback',None) + tb_str = ''.join(traceback.format_exception(*sys.exc_info())) + if issubclass(e_class, koji.GenericError): + if not context.opts.get('KojiDebug',False): + faultString = str(e) + elif tb_type == "extended": + faultString = koji.format_exc_plus() + else: + faultString = tb_str + else: + if tb_type == "normal": + faultString = tb_str + elif tb_type == "extended": + faultString = koji.format_exc_plus() + else: + faultString = "%s: %s" % (e_class,e) + sys.stderr.write(tb_str) + sys.stderr.write('\n') + response = dumps(Fault(faultCode, faultString)) + + sys.stderr.write("Returning %d bytes after %f seconds\n" % + (len(response),time.time() - start)) + sys.stderr.flush() + return response + + def _dispatch(self,method,params): + func = self.funcs.get(method,None) + if func is None: + raise koji.GenericError, "Invalid method: %s" % method + context.method = method + if not hasattr(context,"session"): + #we may be called again by one of our meta-calls (like multiCall) + #so we should only create a session if one does not already exist + context.session = koji.auth.Session() + try: + context.session.validate() + except koji.AuthLockError: + #might be ok, depending on method + if method not in ('exclusiveSession','login', 'krbLogin', 'logout'): + raise + if context.opts.get('LockOut',False) and method not in ('login', 'krbLogin', 'logout'): + if not context.session.hasPerm('admin'): + raise koji.GenericError, "Server disabled for maintenance" + # handle named parameters + params,opts = koji.decode_args(*params) + sys.stderr.write("Handling method %s for session %s (#%s)\n" \ + % (method, context.session.id, context.session.callnum)) + if method != 'uploadFile' and context.opts.get('KojiDebug',False): + sys.stderr.write("Params: %s\n" % pprint.pformat(params)) + sys.stderr.write("Opts: %s\n" % pprint.pformat(opts)) + start = time.time() + ret = func(*params,**opts) + sys.stderr.write("Completed method %s for session %s (#%s): %f seconds\n" + % (method, context.session.id, context.session.callnum, + time.time()-start)) + sys.stderr.flush() + return ret + + def multiCall(self, calls): + """Execute a multicall. Execute each method call in the calls list, collecting + results and errors, and return those as a list.""" + results = [] + for call in calls: + try: + result = self._dispatch(call['methodName'], call['params']) + except Fault, fault: + results.append({'faultCode': fault.faultCode, 'faultString': fault.faultString}) + except: + # transform unknown exceptions into XML-RPC Faults + # don't create a reference to full traceback since this creates + # a circular reference. + exc_type, exc_value = sys.exc_info()[:2] + faultCode = getattr(exc_type, 'faultCode', 1) + faultString = ', '.join(exc_value.args) + trace = traceback.format_exception(*sys.exc_info()) + # traceback is not part of the multicall spec, but we include it for debugging purposes + results.append({'faultCode': faultCode, 'faultString': faultString, 'traceback': trace}) + else: + results.append([result]) + + return results + + def list_api(self): + funcs = [] + for name,func in self.funcs.items(): + #the keys in self.funcs determine the name of the method as seen over xmlrpc + #func.__name__ might differ (e.g. for dotted method names) + args = self._getFuncArgs(func) + funcs.append({'name': name, + 'doc': func.__doc__, + 'args': args}) + return funcs + + def _getFuncArgs(self, func): + args = [] + for x in range(0, func.func_code.co_argcount): + if x == 0 and func.func_code.co_varnames[x] == "self": + continue + if func.func_defaults and func.func_code.co_argcount - x <= len(func.func_defaults): + args.append((func.func_code.co_varnames[x], func.func_defaults[x - func.func_code.co_argcount + len(func.func_defaults)])) + else: + args.append(func.func_code.co_varnames[x]) + return args + + def system_listMethods(self): + return self.funcs.keys() + + def system_methodSignature(self, method): + #it is not possible to autogenerate this data + return 'signatures not supported' + + def system_methodHelp(self, method): + func = self.funcs.get(method) + if func is None: + return "" + arglist = [] + for arg in self._getFuncArgs(func): + if isinstance(arg,str): + arglist.append(arg) + else: + arglist.append('%s=%s' % (arg[0], arg[1])) + ret = '%s(%s)' % (method, ", ".join(arglist)) + if func.__doc__: + ret += "\ndescription: %s" % func.__doc__ + return ret + + def handle_request(self,req): + """Handle a single XML-RPC request""" + + # XMLRPC uses POST only. Reject anything else + if req.method != 'POST': + req.allow_methods(['POST'],1) + raise apache.SERVER_RETURN, apache.HTTP_METHOD_NOT_ALLOWED + + response = self._marshaled_dispatch(req.read()) + + req.content_type = "text/xml" + req.set_content_length(len(response)) + req.write(response) + + +# +# mod_python handler +# + +def handler(req, profiling=False): + if profiling: + import profile, pstats, StringIO, tempfile + global _profiling_req + _profiling_req = req + temp = tempfile.NamedTemporaryFile() + profile.run("import kojixmlrpc; kojixmlrpc.handler(kojixmlrpc._profiling_req, False)", temp.name) + stats = pstats.Stats(temp.name) + strstream = StringIO.StringIO() + sys.stdout = strstream + stats.sort_stats("time") + stats.print_stats() + req.write("

" + strstream.getvalue() + "
") + _profiling_req = None + else: + opts = req.get_options() + log_handler = None + try: + context._threadclear() + context.commit_pending = False + context.opts = opts + context.req = req + koji.db.provideDBopts(database = opts["DBName"], + user = opts["DBUser"], + host = opts.get("DBhost",None)) + context.cnx = koji.db.connect(opts.get("KojiDebug",False)) + log_handler = koji.add_db_logger("koji", context.cnx) + functions = RootExports() + hostFunctions = HostExports() + h = ModXMLRPCRequestHandler() + h.register_instance(functions) + h.register_module(hostFunctions,"host") + h.register_function(koji.auth.login) + h.register_function(koji.auth.krbLogin) + h.register_function(koji.auth.logout) + h.register_function(koji.auth.subsession) + h.register_function(koji.auth.logoutChild) + h.register_function(koji.auth.exclusiveSession) + h.register_function(koji.auth.sharedSession) + h.handle_request(req) + if h.traceback: + #rollback + context.cnx.rollback() + elif context.commit_pending: + context.cnx.commit() + finally: + if log_handler != None: + koji.remove_log_handler("koji", log_handler) + #make sure context gets cleaned up + if hasattr(context,'cnx'): + context.cnx.close() + context._threadclear() + return apache.OK diff --git a/koji.spec b/koji.spec new file mode 100644 index 00000000..9cefd6c9 --- /dev/null +++ b/koji.spec @@ -0,0 +1,151 @@ +%{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} + +%define testbuild 1 +%define debug_package %{nil} + +%define baserelease 5 +%if %{testbuild} +%define release %{baserelease}.%(date +%%Y%%m%%d.%%H%%M.%%S) +%else +%define release %{baserelease} +%endif +Name: koji +Version: 0.9.5 +Release: %{release}%{?dist} +License: LGPL +Summary: Build system tools +Group: Applications/System +Source: koji-%{PACKAGE_VERSION}.tar.bz2 +BuildRoot: %(mktemp -d %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) +BuildArch: noarch +Requires: python-krbV >= 1.0.13 +BuildRequires: python + +%description +Koji is a system for building and tracking RPMS. The base package +contains shared libraries and the command-line interface. + +%package hub +Summary: Koji XMLRPC interface +Group: Applications/Internet +Requires: httpd +Requires: mod_python +Requires: postgresql-python +Requires: koji = %{version}-%{release} + +%description hub +koji-hub is the XMLRPC interface to the koji database + +%package builder +Summary: Koji RPM builder daemon +Group: Applications/System +Requires: koji = %{version}-%{release} +Requires: mock >= 0.5-3 +Requires(post): /sbin/chkconfig +Requires(post): /sbin/service +Requires(preun): /sbin/chkconfig +Requires(preun): /sbin/service +Requires(pre): /usr/sbin/useradd +Requires: cvs +Requires: rpm-build +Requires: redhat-rpm-config +Requires: createrepo >= 0.4.4-3 + +%description builder +koji-builder is the daemon that runs on build machines and executes +tasks that come through the Koji system. + +%package utils +Summary: Koji Utilities +Group: Applications/Internet +Requires: postgresql-python +Requires: koji = %{version}-%{release} +Requires: rpm-build +Requires: createrepo >= 0.4.4-3 + +%description utils +Utilities for the Koji system + +%package web +Summary: Koji Web UI +Group: Applications/Internet +Requires: httpd +Requires: mod_python +Requires: mod_auth_kerb +Requires: postgresql-python +Requires: python-cheetah +Requires: koji = %{version}-%{release} +Requires: python-krbV >= 1.0.13 + +%description web +koji-web is a web UI to the Koji system. + +%prep +%setup -q + +%build + +%install +rm -rf $RPM_BUILD_ROOT +make DESTDIR=$RPM_BUILD_ROOT install + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root) +%{_bindir}/* +%{python_sitelib}/koji +%config(noreplace) %{_sysconfdir}/koji.conf +%doc docs + +%files hub +%defattr(-,root,root) +%{_var}/www/koji-hub +%config(noreplace) /etc/httpd/conf.d/kojihub.conf + +%files utils +%defattr(-,root,root) +%{_sbindir}/kojira +%config %{_initrddir}/kojira +%config %{_sysconfdir}/sysconfig/kojira +%config(noreplace) %{_sysconfdir}/kojira.conf + +%files web +%defattr(-,root,root) +%{_var}/www/koji-web +%config(noreplace) /etc/httpd/conf.d/kojiweb.conf + +%files builder +%defattr(-,root,root) +%{_sbindir}/kojid +%config %{_initrddir}/kojid +%config %{_sysconfdir}/sysconfig/kojid +%config(noreplace) %{_sysconfdir}/kojid.conf +%attr(-,kojibuilder,kojibuilder) /etc/mock/koji + +%pre builder +/usr/sbin/useradd -r -s /bin/bash -G mock -d /builddir -M kojibuilder 2>/dev/null ||: + +%post builder +/sbin/chkconfig --add kojid +/sbin/service kojid condrestart &> /dev/null || : + +%preun builder +if [ $1 = 0 ]; then + /sbin/service kojid stop &> /dev/null + /sbin/chkconfig --del kojid +fi + +%post utils +/sbin/chkconfig --add kojira +/sbin/service kojira condrestart &> /dev/null || : +%preun utils +if [ $1 = 0 ]; then + /sbin/service kojira stop &> /dev/null || : + /sbin/chkconfig --del kojira +fi + +%changelog +* Sun Feb 04 2007 Mike McLean - 0.9.5-1 +- project renamed to koji diff --git a/koji/.cvsignore b/koji/.cvsignore new file mode 100644 index 00000000..0d20b648 --- /dev/null +++ b/koji/.cvsignore @@ -0,0 +1 @@ +*.pyc diff --git a/koji/Makefile b/koji/Makefile new file mode 100644 index 00000000..c212b289 --- /dev/null +++ b/koji/Makefile @@ -0,0 +1,20 @@ +PYTHON=python +PACKAGE = $(shell basename `pwd`) +PYFILES = $(wildcard *.py) +PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)') +PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix') +PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER) +PKGDIR = $(PYLIBDIR)/site-packages/$(PACKAGE) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + +install: + mkdir -p $(DESTDIR)/$(PKGDIR) + for p in $(PYFILES) ; do \ + install -m 644 $$p $(DESTDIR)/$(PKGDIR)/$$p; \ + done + $(PYTHON) -c "import compileall; compileall.compile_dir('$(DESTDIR)/$(PKGDIR)', 1, '$(PYDIR)', 1)" diff --git a/koji/__init__.py b/koji/__init__.py new file mode 100644 index 00000000..0fe03f20 --- /dev/null +++ b/koji/__init__.py @@ -0,0 +1,1571 @@ +# Python module +# Common functions + +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: +# Mike McLean + +import sys +try: + import krbV +except ImportError: + sys.stderr.write("Warning: Could not install krbV module. Kerberos support will be disabled.\n") + sys.stderr.flush() +import base64 +import commands +import datetime +from fnmatch import fnmatch +import logging +import logging.handlers +import md5 +import os +import os.path +import pwd +import re +import rpm +import signal +import socket +import tempfile +import time +import traceback +import urllib +import urlparse +import xmlrpclib +from xmlrpclib import loads, Fault + +def _(args): + """Stub function for translation""" + return args + +## Constants ## + +RPM_HEADER_MAGIC = '\x8e\xad\xe8' +RPM_TAG_HEADERSIGNATURES = 62 +RPM_SIGTAG_MD5 = 1004 +RPM_SIGTAG_GPG = 1005 + +class Enum(dict): + """A simple class to track our enumerated constants + + Can quickly map forward or reverse + """ + + def __init__(self,*args): + self._order = tuple(*args) + super(Enum,self).__init__([(value,n) for n,value in enumerate(self._order)]) + + def __getitem__(self,key): + if isinstance(key,int) or isinstance(key,slice): + return self._order.__getitem__(key) + else: + return super(Enum,self).__getitem__(key) + + def get(self,key,default=None): + try: + return self.__getitem__(key) + except (IndexError,KeyError): + return default + + def getnum(self,key,default=None): + try: + value = self.__getitem__(key) + except (IndexError,KeyError): + return default + if isinstance(key,int): + return key + else: + return value + + def getvalue(self,key,default=None): + try: + value = self.__getitem__(key) + except (IndexError,KeyError): + return default + if isinstance(key,int): + return value + else: + return key + + def _notImplemented(self,*args,**opts): + raise NotImplementedError + + #read-only + __setitem__ = _notImplemented + __delitem__ = _notImplemented + clear = _notImplemented + pop = _notImplemented + popitem = _notImplemented + update = _notImplemented + setdefault = _notImplemented + +API_VERSION = 1 + +TASK_STATES = Enum(( + 'FREE', + 'OPEN', + 'CLOSED', + 'CANCELED', + 'ASSIGNED', + 'FAILED', +)) + +BUILD_STATES = Enum(( + 'BUILDING', + 'COMPLETE', + 'DELETED', + 'FAILED', + 'CANCELED', +)) + +USERTYPES = Enum(( + 'NORMAL', + 'HOST', + 'GROUP', +)) + +USER_STATUS = Enum(( + 'NORMAL', + 'BLOCKED', +)) + +# authtype values +# normal == username/password +AUTHTYPE_NORMAL = 0 +AUTHTYPE_KERB = 1 + +#dependency types +DEP_REQUIRE = 0 +DEP_PROVIDE = 1 +DEP_OBSOLETE = 2 +DEP_CONFLICT = 3 + +#dependency flags +RPMSENSE_LESS = 2 +RPMSENSE_GREATER = 4 +RPMSENSE_EQUAL = 8 + +# repo states +REPO_STATES = Enum(( + 'INIT', + 'READY', + 'EXPIRED', + 'DELETED', + 'PROBLEM', +)) +# for backwards compatibility +REPO_INIT = REPO_STATES['INIT'] +REPO_READY = REPO_STATES['READY'] +REPO_EXPIRED = REPO_STATES['EXPIRED'] +REPO_DELETED = REPO_STATES['DELETED'] +REPO_PROBLEM = REPO_STATES['PROBLEM'] + +# buildroot states +BR_STATES = Enum(( + 'INIT', + 'WAITING', + 'BUILDING', + 'EXPIRED', +)) + +#PARAMETERS +BASEDIR = '/mnt/koji' +# default task priority +PRIO_DEFAULT = 20 + +#Exceptions +class GenericError(Exception): + """Base class for our custom exceptions""" + faultCode = 1000 + fromFault = False + def __str__(self): + try: + return str(self.args[0]['args'][0]) + except: + try: + return str(self.args[0]) + except: + return str(self.__dict__) + +class LockConflictError(GenericError): + """Raised when there is a lock conflict""" + faultCode = 1001 + +class AuthError(GenericError): + """Raised when there is an error in authentication""" + faultCode = 1002 + +class TagError(GenericError): + """Raised when a tagging operation fails""" + faultCode = 1003 + +class ActionNotAllowed(GenericError): + """Raised when the session does not have permission to take some action""" + faultCode = 1004 + +class BuildError(GenericError): + """Raised when a build fails""" + faultCode = 1005 + +class AuthLockError(AuthError): + """Raised when a lock prevents authentication""" + faultCode = 1006 + +class AuthExpired(AuthError): + """Raised when a session has expired""" + faultCode = 1007 + +class SequenceError(AuthError): + """Raised when requests are received out of sequence""" + faultCode = 1008 + +class RetryError(AuthError): + """Raised when a request is received twice and cannot be rerun""" + faultCode = 1009 + +class PreBuildError(BuildError): + """Raised when a build fails during pre-checks""" + faultCode = 1010 + +class PostBuildError(BuildError): + """Raised when a build fails during post-checks""" + faultCode = 1011 + +class BuildrootError(BuildError): + """Raised when there is an error with the buildroot""" + faultCode = 1012 + +class FunctionDeprecated(GenericError): + """Raised by a deprecated function""" + faultCode = 1013 + +#A function to get create an exception from a fault +def convertFault(fault): + """Convert a fault to the corresponding Exception type, if possible""" + code = getattr(fault,'faultCode',None) + if code is None: + return fault + for v in globals().values(): + if type(v) == type(Exception) and issubclass(v,GenericError) and \ + code == getattr(v,'faultCode',None): + ret = v(fault.faultString) + ret.fromFault = True + return ret + #otherwise... + return fault + +def listFaults(): + """Return a list of faults + + Returns a list of dictionaries whose keys are: + faultCode: the numeric code used in fault conversion + name: the name of the exception + desc: the description of the exception (docstring) + """ + ret = [] + for n,v in globals().items(): + if type(v) == type(Exception) and issubclass(v,GenericError): + code = getattr(v,'faultCode',None) + if code is None: + continue + info = {} + info['faultCode'] = code + info['name'] = n + info['desc'] = getattr(v,'__doc__',None) + ret.append(info) + ret.sort(lambda a,b: cmp(a['faultCode'],b['faultCode'])) + return ret + +#functions for encoding/decoding optional arguments + +def encode_args(*args,**opts): + """The function encodes optional arguments as regular arguments. + + This is used to allow optional arguments in xmlrpc calls + Returns a tuple of args + """ + if opts: + opts['__starstar'] = True + args = args + (opts,) + return args + +def decode_args(*args): + """Decodes optional arguments from a flat argument list + + Complementary to encode_args + Returns a tuple (args,opts) where args is a tuple and opts is a dict + """ + opts = {} + if len(args) > 0: + last = args[-1] + if type(last) == dict and last.get('__starstar',False): + del last['__starstar'] + opts = last + args = args[:-1] + return args,opts + +#commonly used functions + +def safe_xmlrpc_loads(s): + """Load xmlrpc data from a string, but catch faults""" + try: + return loads(s) + except Fault, f: + return f + +def ensuredir(directory): + """Create directory, if necessary.""" + if os.path.isdir(directory): + return + try: + os.makedirs(directory) + except OSError: + #thrown when dir already exists (could happen in a race) + if not os.path.isdir(directory): + #something else must have gone wrong + raise + +def daemonize(): + """Detach and run in background""" + pid = os.fork() + if pid: + os._exit(0) + os.setsid() + signal.signal(signal.SIGHUP, signal.SIG_IGN) + #fork again + pid = os.fork() + if pid: + os._exit(0) + os.chdir("/") + #redirect stdin/stdout/sterr + fd0 = os.open('/dev/null', os.O_RDONLY) + fd1 = os.open('/dev/null', os.O_RDWR) + fd2 = os.open('/dev/null', os.O_RDWR) + os.dup2(fd0,0) + os.dup2(fd1,1) + os.dup2(fd2,2) + os.close(fd0) + os.close(fd1) + os.close(fd2) + +def multibyte(data): + """Convert a list of bytes to an integer (network byte order)""" + sum = 0 + n = len(data) + for i in xrange(n): + sum += data[i] << (8 * (n - i - 1)) + return sum + +def find_rpm_sighdr(path): + """Finds the offset and length of the signature header.""" + # see Maximum RPM Appendix A: Format of the RPM File + + # The lead is a fixed sized section (96 bytes) that is mostly obsolete + sig_start = 96 + sigsize = rpm_hdr_size(path, sig_start) + return (sig_start, sigsize) + +def rpm_hdr_size(f, ofs=None): + """Returns the length (in bytes) of the rpm header + + f = filename or file object + ofs = offset of the header + """ + if isinstance(f, (str, unicode)): + fo = file(f, 'rb') + else: + fo = f + if ofs != None: + fo.seek(ofs, 0) + magic = fo.read(3) + if magic != RPM_HEADER_MAGIC: + raise GenericError, "Invalid rpm: bad magic: %r" % magic + + # skip past section magic and such + # (3 bytes magic, 1 byte version number, 4 bytes reserved) + fo.seek(ofs + 8, 0) + + # now read two 4-byte integers which tell us + # - # of index entries + # - bytes of data in header + data = [ ord(x) for x in fo.read(8) ] + il = multibyte(data[0:4]) + dl = multibyte(data[4:8]) + + #this is what the section data says the size should be + hdrsize = 8 + 16 * il + dl + + # hdrsize rounded up to nearest 8 bytes + hdrsize = hdrsize + ( 8 - ( hdrsize % 8 ) ) % 8 + + # add eight bytes for section header + hdrsize = hdrsize + 8 + + if not isinstance(f, (str, unicode)): + fo.close() + return hdrsize + + +class RawHeader(object): + + # see Maximum RPM Appendix A: Format of the RPM File + + def __init__(self, data): + if data[0:3] != RPM_HEADER_MAGIC: + raise GenericError, "Invalid rpm header: bad magic: %r" % (data[0:3],) + self.header = data + self._index() + + def version(self): + #fourth byte is the version + return ord(data[3]) + + def _index(self): + # read two 4-byte integers which tell us + # - # of index entries (each 16 bytes long) + # - bytes of data in header + data = [ ord(x) for x in self.header[8:12] ] + il = multibyte(data[:4]) + dl = multibyte(data[4:8]) + + #read the index (starts at offset 16) + index = {} + for i in xrange(il): + entry = [] + for j in xrange(4): + ofs = 16 + i*16 + j*4 + data = [ ord(x) for x in self.header[ofs:ofs+4] ] + entry.append(multibyte(data)) + #print "Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry) + index[entry[0]] = entry + self.datalen = dl + self.index = index + + def dump(self): + print "HEADER DUMP:" + #calculate start of store + il = len(self.index) + store = 16 + il * 16 + #print "start is: %d" % start + #print "index length: %d" % il + print "Store at offset %d (%0x)" % (store,store) + #sort entries by offset, dtype + #also rearrange: tag, dtype, offset, count -> offset, dtype, tag, count + order = [(x[2], x[1], x[0], x[3]) for x in self.index.itervalues()] + order.sort() + next = store + #map some rpmtag codes + tags = {} + for name, code in rpm.__dict__.iteritems(): + if name.startswith('RPMTAG_') and isinstance(code, int): + tags[code] = name[7:].lower() + for entry in order: + #tag, dtype, offset, count = entry + offset, dtype, tag, count = entry + pos = store + offset + if next is not None: + if pos > next: + print "** HOLE between entries" + print "Hex: %s" % hex_string(self.header[next:pos]) + print "Data: %r" % self.header[next:pos] + elif pos < next: + print "** OVERLAPPING entries" + print "Tag: %d [%s], Type: %d, Offset: %x, Count: %d" \ + % (tag, tags.get(tag, '?'), dtype, offset, count) + if dtype == 0: + #null + print "[NULL entry]" + next = pos + elif dtype == 1: + #char + for i in xrange(count): + print "Char: %r" % self.header[pos] + pos += 1 + next = pos + elif dtype >= 2 and dtype <= 5: + #integer + n = 1 << (dtype - 2) + for i in xrange(count): + data = [ ord(x) for x in self.header[pos:pos+n] ] + print "%r" % data + num = multibyte(data) + print "Int(%d): %d" % (n, num) + pos += n + next = pos + elif dtype == 6: + # string (null terminated) + end = self.header.find('\0', pos) + print "String(%d): %r" % (end-pos, self.header[pos:end]) + next = end + 1 + elif dtype == 7: + print "Data: %s" % hex_string(self.header[pos:pos+count]) + next = pos+count + elif dtype == 8: + # string array + for i in xrange(count): + end = self.header.find('\0', pos) + print "String(%d): %r" % (end-pos, self.header[pos:end]) + pos = end + 1 + next = pos + elif dtype == 9: + # unicode string array + for i in xrange(count): + end = self.header.find('\0', pos) + print "i18n(%d): %r" % (end-pos, self.header[pos:end]) + pos = end + 1 + next = pos + else: + print "Skipping data type %x" % dtype + next = None + if next is not None: + pos = store + self.datalen + if next < pos: + print "** HOLE at end of data block" + print "Hex: %s" % hex_string(self.header[next:pos]) + print "Data: %r" % self.header[next:pos] + elif pos > next: + print "** OVERFLOW in data block" + + def __getitem__(self, key): + tag, dtype, offset, count = self.index[key] + assert tag == key + return self._getitem(dtype, offset, count) + + def _getitem(self, dtype, offset, count): + #calculate start of store + il = len(self.index) + store = 16 + il * 16 + pos = store + offset + if dtype >= 2 and dtype <= 5: + n = 1 << (dtype - 2) + # n-byte integer + data = [ ord(x) for x in self.header[pos:pos+n] ] + return multibyte(data) + elif dtype == 6: + # string (null terminated) + end = self.header.find('\0', pos) + return self.header[pos:end] + elif dtype == 7: + #raw data + return self.header[pos:pos+count] + else: + #XXX - not all valid data types are handled + raise GenericError, "Unable to read header data type: %x" % dtype + + def get(self, key, default=None): + entry = self.index.get(key) + if entry is None: + return default + else: + return self._getitem(*entry[1:]) + + +def rip_rpm_sighdr(src): + """Rip the signature header out of an rpm""" + (start, size) = find_rpm_sighdr(src) + fo = file(src, 'rb') + fo.seek(start, 0) + sighdr = fo.read(size) + fo.close() + return sighdr + +def rip_rpm_hdr(src): + """Rip the main header out of an rpm""" + (start, size) = find_rpm_sighdr(src) + start += size + size = rpm_hdr_size(src, start) + fo = file(src, 'rb') + fo.seek(start, 0) + hdr = fo.read(size) + fo.close() + return hdr + +def get_sighdr_key(sighdr): + """Parse the sighdr and return the sigkey""" + sig = RawHeader(sighdr).get(RPM_SIGTAG_GPG) + if sig is None: + return None + else: + return hex_string(sig[13:17]) + +def splice_rpm_sighdr(sighdr, src, dst=None, bufsize=8192): + """Write a copy of an rpm with signature header spliced in""" + (start, size) = find_rpm_sighdr(src) + if dst is None: + (fd, dst) = tempfile.mkstemp() + os.close(fd) + src_fo = file(src, 'rb') + dst_fo = file(dst, 'wb') + dst_fo.write(src_fo.read(start)) + dst_fo.write(sighdr) + src_fo.seek(size, 1) + while True: + buf = src_fo.read(bufsize) + if not buf: + break + dst_fo.write(buf) + src_fo.close() + dst_fo.close() + return dst + +def get_rpm_header(filename): + """Return the rpm header.""" + ts = rpm.TransactionSet() + ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS) + fo = file(filename, "r") + hdr = ts.hdrFromFdno(fo.fileno()) + fo.close() + return hdr + +def get_header_field(hdr,name): + """Extract named field from an rpm header""" + idx = getattr(rpm,"RPMTAG_%s" % name.upper(),None) + if idx is None: + raise GenericError, "No such rpm header field: %s" % name + return hdr[idx] + +def get_header_fields(X,fields): + """Extract named fields from an rpm header and return as a dictionary + + X may be either the rpm header or the rpm filename + """ + if type(X) == str: + hdr = get_rpm_header(X) + else: + hdr = X + ret = {} + for f in fields: + ret[f] = get_header_field(hdr,f) + return ret + +def parse_NVR(nvr): + """split N-V-R into dictionary of data""" + ret = {} + p2 = nvr.rfind("-",0) + if p2 == -1 or p2 == len(nvr) - 1: + raise GenericError("invalid format: %s" % nvr) + p1 = nvr.rfind("-",0,p2) + if p1 == -1 or p1 == p2 - 1: + raise GenericError("invalid format: %s" % nvr) + ret['release'] = nvr[p2+1:] + ret['version'] = nvr[p1+1:p2] + ret['name'] = nvr[:p1] + epochIndex = ret['name'].find(':') + if epochIndex == -1: + ret['epoch'] = '' + else: + ret['epoch'] = ret['name'][:epochIndex] + ret['name'] = ret['name'][epochIndex + 1:] + return ret + +def parse_NVRA(nvra): + """split N-V-R.A.rpm into dictionary of data""" + if nvra.endswith(".rpm"): + nvra = nvra[:-4] + p3 = nvra.rfind(".") + if p3 == -1 or p3 == len(nvra) - 1: + raise GenericError("invalid format: %s" % nvra) + arch = nvra[p3+1:] + ret = parse_NVR(nvra[:p3]) + ret['arch'] = arch + if arch == 'src': + ret['src'] = True + else: + ret['src'] = False + return ret + +def canonArch(arch): + """Given an arch, return the "canonical" arch""" + #XXX - this could stand to be smarter, and we should probably + # have some other related arch-mangling functions. + if fnmatch(arch,'i?86') or arch == 'athlon': + return 'i386' + elif fnmatch(arch,'ppc64*'): + return 'ppc64' + elif arch == 'ia32e': + return 'x86_64' + else: + return arch + +def hex_string(s): + """Converts a string to a string of hex digits""" + return ''.join([ '%02x' % ord(x) for x in s ]) + + +def make_groups_spec(grplist,name='buildsys-build',buildgroup=None): + """Return specfile contents representing the group""" + if buildgroup is None: + buildgroup=name + data = [ +"""# +# This specfile represents buildgroups for mock +# Autogenerated by the build system +# +Summary: The base set of packages for a mock chroot\n""", +"""Name: %s\n""" % name, +"""Version: 1 +Release: 1 +License: GPL +Group: Development/Build Tools +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root +BuildArch: noarch + +#package requirements +"""] + #add a requires entry for all the packages in buildgroup, and in + #groups required by buildgroup + need = [buildgroup] + seen_grp = {} + seen_pkg = {} + #index groups + groups = dict([(g['name'],g) for g in grplist]) + for group_name in need: + if seen_grp.has_key(group_name): + continue + seen_grp[group_name] = 1 + group = groups.get(group_name) + if group is None: + data.append("#MISSING GROUP: %s\n" % group_name) + continue + data.append("#Group: %s\n" % group_name) + pkglist = list(group['packagelist']) + pkglist.sort(lambda a,b: cmp(a['package'], b['package'])) + for pkg in pkglist: + pkg_name = pkg['package'] + if seen_pkg.has_key(pkg_name): + continue + data.append("Requires: %s\n" % pkg_name) + for req in group['grouplist']: + req_name = req['name'] + if seen_grp.has_key(req_name): + continue + need.append(req_name) + data.append(""" +%description +This is a meta-package that requires a defined group of packages + +%prep +%build +%install +%clean + +%files +%defattr(-,root,root,-) +%doc +""") + return ''.join(data) + +def generate_comps(groups): + """Generate comps content from groups data""" + def boolean_text(x): + if x: + return "true" + else: + return "false" + data = [ +""" + + + + +""" ] + groups = list(groups) + groups.sort(lambda a,b:cmp(a['name'],b['name'])) + for g in groups: + group_id = g['name'] + name = g['display_name'] + description = g['description'] + langonly = boolean_text(g['langonly']) + default = boolean_text(g['is_default']) + uservisible = boolean_text(g['uservisible']) + data.append( +""" + %(group_id)s + %(name)s + %(description)s + %(default)s + %(uservisible)s +""" % locals()) + if g['biarchonly']: + data.append( +""" %s +""" % boolean_text(True)) + + #print grouplist, if any + if g['grouplist']: + data.append( +""" +""") + grouplist = list(g['grouplist']) + grouplist.sort(lambda a,b:cmp(a['name'],b['name'])) + for x in grouplist: + #['req_id','type','is_metapkg','name'] + name = x['name'] + thetype = x['type'] + tag = "groupreq" + if x['is_metapkg']: + tag = "metapkg" + if thetype: + data.append( +""" <%(tag)s type="%(thetype)s">%(name)s +""" % locals()) + else: + data.append( +""" <%(tag)s>%(name)s +""" % locals()) + data.append( +""" +""") + + #print packagelist, if any + if g['packagelist']: + data.append( +""" +""") + packagelist = list(g['packagelist']) + packagelist.sort(lambda a,b:cmp(a['package'],b['package'])) + for p in packagelist: + #['package_id','type','basearchonly','requires','name'] + name = p['package'] + opts = 'type="%s"' % p['type'] + if p['basearchonly']: + opts += ' basearchonly="%s"' % boolean_text(True) + if p['requires']: + opts += ' requires="%s"' % p['requires'] + data.append( +""" %(name)s +""" % locals()) + data.append( +""" +""") + data.append( +""" +""") + data.append( +""" +""") + return ''.join(data) + + +def genMockConfig(name, arch, managed=False, repoid=None, tag_name=None, **opts): + """Generate a mock config + + Returns a string containing the config + """ + mockdir = opts.get('mockdir', '/var/lib/mock') + url = opts.get('url') + if not url: + if not (repoid and tag_name): + raise GenericError, "please provide a url or repo/tag" + pathinfo = PathInfo(topdir=opts.get('topdir')) + repodir = pathinfo.repo(repoid,tag_name) + url = "file://%s/%s" % (repodir,arch) + if managed: + buildroot_id = opts.get('buildroot_id') + + # FIXME - get more of this into a config + config_opts = { + 'root' : name, + 'basedir' : mockdir, + 'chroot' : '/usr/sbin/mock-helper chroot', + 'mount' : '/usr/sbin/mock-helper mount', + 'umount' : '/usr/sbin/mock-helper umount', + 'rm' : '/usr/sbin/mock-helper rm', + 'mknod' : '/usr/sbin/mock-helper mknod', + 'yum' : '/usr/sbin/mock-helper yum', + 'runuser' : '/sbin/runuser', + 'buildgroup' : 'build', + 'chroot_dep_package' : 'buildsys-build', + 'chrootuser' : 'kojibuilder', + 'chrootgroup' : 'kojibuilder', + 'chroothome' : '/builddir', + 'clean' : True, + 'target_arch' : arch, + } + + config_opts['yum.conf'] = """ +[main] +cachedir=/var/cache/yum +debuglevel=1 +logfile=/var/log/yum.log +reposdir=/dev/null +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 + +# repos + +[core] +name=core +baseurl=%(url)s +""" % locals() + #XXX - this needs to be configurable + + macros = { + '_topdir' : '%s/build' % config_opts['chroothome'], + '_rpmfilename' : '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm', + #TODO - track some of these in the db instead? + } + if opts.has_key('distribution'): + macros['distribution'] = opts['distribution'] + config_opts['macros'] = '\n'.join(["%%%s %s" % (k, v) for k,v in macros.iteritems()]) + + parts = ["""#!/usr/bin/python -tt +# Auto-generated by the build system +"""] + if managed: + parts.append(""" +# Koji buildroot id: %(buildroot_id)s +# Koji buildroot name: %(name)s +# Koji repo id: %(repoid)s +# Koji tag: %(tag_name)s""" % locals()) + + parts.append(""" +import os +config_opts['chrootuid'] = os.geteuid() +config_opts['chrootgid'] = os.getegid() +""") + for (key,value) in config_opts.iteritems(): + parts.append("config_opts[%r] = %r\n" % (key,value)) + return ''.join(parts) + +def get_sequence_value(cursor, sequence): + cursor.execute("""SELECT nextval(%(sequence)s)""", locals()) + return cursor.fetchone()[0] + +# From Python Cookbook 2nd Edition, Recipe 8.6 +def format_exc_plus(): + """ Format the usual traceback information, followed by a listing of + all the local variables in each frame. + """ + tb = sys.exc_info()[2] + while tb.tb_next: + tb = tb.tb_next + stack = [] + f = tb.tb_frame + while f: + stack.append(f) + f = f.f_back + stack.reverse() + rv = ''.join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + rv += "Locals by frame, innermost last\n" + for frame in stack: + rv += "Frame %s in %s at line %s\n" % (frame.f_code.co_name, + frame.f_code.co_filename, + frame.f_lineno) + for key, value in frame.f_locals.items(): + rv += " %20s = " % key + # we must _absolutely_ avoid propagating eceptions, and str(value) + # COULD cause any exception, so we MUST catch any...: + try: + rv += "%s\n" % value + except: + rv += "\n" + return rv + + +class PathInfo(object): + + def __init__(self,topdir=None): + if topdir is None: + self.topdir = BASEDIR + else: + self.topdir = topdir + + def build(self,build): + """Return the directory where a build belongs""" + return self.topdir + ("/packages/%(name)s/%(version)s/%(release)s" % build) + + def rpm(self,rpminfo): + """Return the path (relative to build_dir) where an rpm belongs""" + return "%(arch)s/%(name)s-%(version)s-%(release)s.%(arch)s.rpm" % rpminfo + + def signed(self, rpminfo, sigkey): + """Return the path (relative to build dir) where a signed rpm lives""" + return "data/signed/%s/" % sigkey + self.rpm(rpminfo) + + def sighdr(self, rpminfo, sigkey): + """Return the path (relative to build_dir) where a cached sig header lives""" + return "data/sigcache/%s/" % sigkey + self.rpm(rpminfo) + ".sig" + + def build_logs(self, build): + """Return the path for build logs""" + return "%s/data/logs" % self.build(build) + + def repo(self,repo_id,tag_str): + """Return the directory where a repo belongs""" + return self.topdir + ("/repos/%(tag_str)s/%(repo_id)s" % locals()) + + def repocache(self,tag_str): + """Return the directory where a repo belongs""" + return self.topdir + ("/repos/%(tag_str)s/cache" % locals()) + + def work(self): + """Return the work dir""" + return self.topdir + '/work' + + def scratch(self): + """Return the main scratch dir""" + return self.topdir + '/scratch' + +pathinfo = PathInfo() + +class VirtualMethod(object): + # some magic to bind an XML-RPC method to an RPC server. + # supports "nested" methods (e.g. examples.getStateName) + # supports named arguments (if server does) + def __init__(self, func, name): + self.__func = func + self.__name = name + def __getattr__(self, name): + return type(self)(self.__func, "%s.%s" % (self.__name, name)) + def __call__(self, *args, **opts): + return self.__func(self.__name,args,opts) + + +class ClientSession(object): + + def __init__(self, baseurl, opts=None, sinfo=None): + assert baseurl, "baseurl argument must not be empty" + if opts == None: + opts = {} + self.opts = opts + self.proxyOpts = {'allow_none':1} + if opts.get('debug_xmlrpc',False): + self.proxyOpts['verbose']=1 + self.baseurl = baseurl + self.setSession(sinfo) + self.multicall = False + self._calls = [] + self.logger = logging.getLogger('koji') + + def setSession(self,sinfo): + """Set the session info + + If sinfo is None, logout.""" + if sinfo is None: + self.logged_in = False + self.callnum = None + url = self.baseurl + else: + self.logged_in = True + self.callnum = 0 + url = "%s?%s" %(self.baseurl,urllib.urlencode(sinfo)) + self.sinfo = sinfo + self.proxy = xmlrpclib.ServerProxy(url,**self.proxyOpts) + + def login(self,opts=None): + sinfo = self.callMethod('login',self.opts['user'], self.opts['password'],opts) + if not sinfo: + return False + self.setSession(sinfo) + return True + + def subsession(self): + "Create a subsession" + sinfo = self.callMethod('subsession') + return type(self)(self.baseurl,self.opts,sinfo) + + def krb_login(self, principal=None, keytab=None, ccache=None, proxyuser=None): + """Log in using Kerberos. If principal is not None and keytab is + not None, then get credentials for the given principal from the given keytab. + If both are None, authenticate using existing local credentials (as obtained + from kinit). ccache is the absolute path to use for the credential cache. If + not specified, the default ccache will be used. If proxyuser is specified, + log in the given user instead of the user associated with the Kerberos + principal. The principal must be in the "ProxyPrincipals" list on + the server side.""" + ctx = krbV.default_context() + + if ccache != None: + ccache = krbV.CCache(name='FILE:' + ccache, context=ctx) + else: + ccache = ctx.default_ccache() + + if principal != None: + if keytab != None: + cprinc = krbV.Principal(name=principal, context=ctx) + keytab = krbV.Keytab(name=keytab, context=ctx) + ccache.init(cprinc) + ccache.init_creds_keytab(principal=cprinc, keytab=keytab) + else: + raise GenericError, 'cannot specify a principal without a keytab' + else: + # We're trying to log ourself in. Connect using existing credentials. + cprinc = ccache.principal() + + sprinc = krbV.Principal(name=self._serverPrincipal(), context=ctx) + + ac = krbV.AuthContext(context=ctx) + ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE|krbV.KRB5_AUTH_CONTEXT_DO_TIME + ac.rcache = ctx.default_rcache() + + # create and encode the authentication request + (ac, req) = ctx.mk_req(server=sprinc, client=cprinc, + auth_context=ac, ccache=ccache, + options=krbV.AP_OPTS_MUTUAL_REQUIRED) + req_enc = base64.encodestring(req) + + # ask the server to authenticate us + (rep_enc, sinfo_enc, addrinfo) = self.callMethod('krbLogin', req_enc, proxyuser) + + # Set the addrinfo we received from the server + # (necessary before calling rd_priv()) + # addrinfo is in (serveraddr, serverport, clientaddr, clientport) + # format, so swap the pairs because clientaddr is now the local addr + ac.addrs = tuple((addrinfo[2], addrinfo[3], addrinfo[0], addrinfo[1])) + + # decode and read the reply from the server + rep = base64.decodestring(rep_enc) + ctx.rd_rep(rep, auth_context=ac) + + # decode and decrypt the login info + sinfo_priv = base64.decodestring(sinfo_enc) + sinfo_str = ac.rd_priv(sinfo_priv) + sinfo = dict(zip(['session-id', 'session-key'], sinfo_str.split())) + + if not sinfo: + self.logger.warn('No session info received') + return False + self.setSession(sinfo) + + return True + + def _serverPrincipal(self): + """Get the Kerberos principal of the server we're connecting + to, based on baseurl. Assume the last two components of the + server name are the Kerberos realm.""" + servername = urlparse.urlparse(self.baseurl)[1] + portspec = servername.find(':') + if portspec != -1: + servername = servername[:portspec] + + parts = servername.split('.') + if len(parts) < 2: + domain = servername.upper() + else: + domain = '.'.join(parts[-2:]).upper() + + return 'host/%s@%s' % (servername, domain) + + def logout(self): + if not self.logged_in: + return + try: + self.proxy.logout() + except AuthExpired: + #this can happen when an exclusive session is forced + pass + self.setSession(None) + + def _forget(self): + """Forget session information, but do not close the session + + This is intended to be used after a fork to prevent the subprocess + from affecting the session accidentally.""" + if not self.logged_in: + return + self.setSession(None) + + #we've had some trouble with this method causing strange problems + #(like infinite recursion). Possibly triggered by initialization failure, + #and possibly due to some interaction with __getattr__. + #Re-enabling with a small improvement + def __del__(self): + if self.__dict__: + try: + self.logout() + except: + pass + + def callMethod(self,name,*args,**opts): + """compatibility wrapper for _callMethod""" + return self._callMethod(name, args, opts) + + def _callMethod(self, name, args, kwargs): + #pass named opts in a way the server can understand + args = encode_args(*args,**kwargs) + + if self.multicall: + self._calls.append({'methodName': name, 'params': args}) + else: + if self.logged_in: + sinfo = self.sinfo.copy() + sinfo['callnum'] = self.callnum + self.callnum += 1 + url = "%s?%s" %(self.baseurl,urllib.urlencode(sinfo)) + proxy = xmlrpclib.ServerProxy(url,**self.proxyOpts) + else: + proxy = self.proxy + tries = 0 + debug = self.opts.get('debug',False) + max_retries = self.opts.get('max_retries',30) + interval = self.opts.get('retry_interval',20) + while tries <= max_retries: + tries += 1 + try: + return proxy.__getattr__(name)(*args) + except Fault, fault: + raise convertFault(fault) + except (socket.error,socket.sslerror,xmlrpclib.ProtocolError),e: + if not self.logged_in: + raise + elif debug: + self.logger.debug("Try #%d for call %d failed: %r" % (tries,self.callnum,e)) + time.sleep(interval) + raise RetryError, "reached maximum number of retries, last call failed with: %s" % sys.exc_info()[1] + + def multiCall(self): + """Execute a multicall (multiple function calls passed to the server + and executed at the same time, with results being returned in a batch). + Before calling this method, the self.multicall field must have + been set to True, and then one or more methods must have been called on + the current session (those method calls will return None). On executing + the multicall, the self.multicall field will be reset to False + (so subsequent method calls will be executed immediately) + and results will be returned in a list. The list will contain one element + for each method added to the multicall, in the order it was added to the multicall. + Each element of the list will be either a one-element list containing the result of the + method call, or a map containing "faultCode" and "faultString" keys, describing the + error that occurred during the method call.""" + if not self.multicall: + raise GenericError, 'ClientSession.multicall must be set to True before calling multiCall()' + if len(self._calls) == 0: + return [] + + try: + return self.proxy.multiCall(self._calls) + finally: + self.multicall = False + self._calls = [] + + def __getattr__(self,name): + #if name[:1] == '_': + # raise AttributeError, "no attribute %r" % name + return VirtualMethod(self._callMethod,name) + + def uploadWrapper(self, localfile, path, name=None, callback=None): + """upload a file in chunks using the uploadFile call""" + # XXX - stick in a config or something + start=time.time() + blocksize=204800 + retries=3 + if name is None: + name = os.path.basename(localfile) + fo = file(localfile, "r") #specify bufsize? + totalsize = os.path.getsize(localfile) + ofs = 0 + md5sum = md5.new() + debug = self.opts.get('debug',False) + if callback: + callback(0, totalsize, 0, 0, 0) + while True: + lap = time.time() + contents = fo.read(blocksize) + md5sum.update(contents) + size = len(contents) + data = base64.encodestring(contents) + if size == 0: + # end of file, use offset = -1 to finalize upload + offset = -1 + digest = md5sum.hexdigest() + sz = ofs + else: + offset = ofs + digest = md5.new(contents).hexdigest() + sz = size + del contents + tries = 0 + while True: + if debug: + self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" %(path,name,sz,digest,offset)) + if self.callMethod('uploadFile', path, name, sz, digest, offset, data): + break + if tries <= retries: + tries += 1 + continue + else: + raise GenericError, "Error uploading file %s, offset %d" %(path, offset) + if size == 0: + break + ofs += size + now = time.time() + t1 = now - lap + if t1 <= 0: + t1 = 1 + t2 = now - start + if t2 <= 0: + t2 = 1 + if debug: + self.logger.debug("Uploaded %d bytes in %f seconds (%f kbytes/sec)" % (size,t1,size/t1/1024)) + if debug: + self.logger.debug("Total: %d bytes in %f seconds (%f kbytes/sec)" % (ofs,t2,ofs/t2/1024)) + if callback: + callback(ofs, totalsize, size, t1, t2) + fo.close() + + def downloadTaskOutput(self, taskID, fileName, offset=0, size=-1): + """Download the file with the given name, generated by the task with the + given ID. + + Note: This method does not work with multicall. + """ + if self.multicall: + raise GenericError, 'downloadTaskOutput() may not be called during a multicall' + result = self.callMethod('downloadTaskOutput', taskID, fileName, offset, size) + return base64.decodestring(result) + +class DBHandler(logging.Handler): + """ + A handler class which writes logging records, appropriately formatted, + to a database. + """ + def __init__(self, cnx, table, mapping=None): + """ + Initialize the handler. + + A database connection and table name are required. + """ + logging.Handler.__init__(self) + self.cnx = cnx + self.table = table + if mapping is None: + self.mapping = { 'message': '%(message)s' } + else: + self.mapping = mapping + + def emit(self, record): + """ + Emit a record. + + If a formatter is specified, it is used to format the record. + """ + try: + cursor = self.cnx.cursor() + columns = [] + values = [] + data = {} + record.message = record.getMessage() + for key, value in self.mapping.iteritems(): + value = str(value) + if value.find("%(asctime)") >= 0: + if self.formatter: + fmt = self.formatter + else: + fmt = logging._defaultFormatter + record.asctime = fmt.formatTime(record, fmt.datefmt) + columns.append(key) + values.append("%%(%s)s" % key) + data[key] = value % record.__dict__ + #values.append(_quote(value % record.__dict__)) + columns = ",".join(columns) + values = ",".join(values) + command = "INSERT INTO %s (%s) VALUES (%s)" % (self.table, columns, values) + #note we're letting cursor.execute do the escaping + cursor.execute(command,data) + cursor.close() + self.cnx.commit() + except: + self.handleError(record) + +#used by parse_timestamp +TIMESTAMP_RE = re.compile("(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)") + +def parse_timestamp(ts): + """Parse a timestamp returned from a query""" + m = TIMESTAMP_RE.search(ts) + t = tuple([int(x) for x in m.groups()]) + (0,0,0) + return time.mktime(t) + +def formatTime(value): + """Format a timestamp so it looks nicer""" + if not value: + return '' + elif isinstance(value, datetime.datetime): + return value.strftime('%Y-%m-%d %H:%M:%S') + else: + # trim off the microseconds, if present + dotidx = value.rfind('.') + if dotidx != -1: + return value[:dotidx] + else: + return value + +def formatTimeLong(value): + """Format a timestamp to a more human-reable format, i.e.: + Sat, 07 Sep 2002 00:00:01 GMT + """ + if not value: + return '' + else: + # Assume the string value passed in is the local time + localtime = time.mktime(time.strptime(formatTime(value), '%Y-%m-%d %H:%M:%S')) + return time.strftime('%a, %d %b %Y %H:%M:%S %Z', time.localtime(localtime)) + +def buildLabel(buildInfo, showEpoch=False): + """Format buildInfo (dict) into a descriptive label.""" + epoch = buildInfo['epoch'] + if showEpoch and epoch != None: + epochStr = '%i:' % epoch + else: + epochStr = '' + return '%s%s-%s-%s' % (epochStr, buildInfo['package_name'], + buildInfo['version'], buildInfo['release']) + +def taskLabel(taskInfo): + """Format taskInfo (dict) into a descriptive label.""" + method = taskInfo['method'] + arch = taskInfo['arch'] + extra = '' + if method == 'build': + if taskInfo.has_key('request'): + source, target = taskInfo['request'][:2] + if source.startswith('cvs://'): + source = source[source.rfind('/') + 1:] + source = source.replace('#', ':') + else: + source = os.path.basename(source) + extra = '%s, %s' % (target, source) + elif method == 'buildSRPMFromCVS': + if taskInfo.has_key('request'): + url = taskInfo['request'][0] + url = url[url.rfind('/') + 1:] + url = url.replace('#', ':') + extra = url + elif method == 'buildArch': + if taskInfo.has_key('request'): + srpm, tagID, arch = taskInfo['request'][:3] + srpm = os.path.basename(srpm) + extra = '%s, %s' % (srpm, arch) + elif method == 'buildNotification': + if taskInfo.has_key('request'): + build = taskInfo['request'][1] + extra = buildLabel(build) + elif method == 'runroot': + if taskInfo.has_key('request'): + tag, arch, command = taskInfo['request'][:3] + if isinstance(command, str): + cmdlist = command.split() + else: + cmdlist = command + cmdlist = [param for param in cmdlist if '=' not in param] + if cmdlist: + cmd = os.path.basename(cmdlist[0]) + else: + cmd = '(none)' + extra = '%s, %s, %s' % (tag, cmd, arch) + elif method == 'newRepo': + pass + elif method == 'prepRepo': + if taskInfo.has_key('request'): + tagInfo = taskInfo['request'][0] + extra = tagInfo['name'] + elif method == 'createrepo': + if taskInfo.has_key('request'): + arch = taskInfo['request'][1] + extra = arch + elif method == 'dependantTask': + if taskInfo.has_key('request'): + extra = ', '.join([subtask[0] for subtask in taskInfo['request'][1]]) + elif method == 'chainbuild': + if taskInfo.has_key('request'): + extra = taskInfo['request'][1] + elif method == 'waitrepo': + if taskInfo.has_key('request'): + extra = taskInfo['request'][0] + + if extra: + return '%s (%s)' % (method, extra) + else: + return '%s (%s)' % (method, arch) + +def _forceAscii(value): + """Replace characters not in the 7-bit ASCII range + with "?".""" + return ''.join([(ord(c) <= 127) and c or '?' for c in value]) + +def fixEncoding(value, fallback='iso8859-15'): + """ + Convert value to a 'str' object encoded as UTF-8. + If value is not valid UTF-8 to begin with, assume it is + encoded in the 'fallback' charset. + """ + if not value: + return value + + try: + return value.decode('utf8').encode('utf8') + except UnicodeDecodeError, err: + return value.decode(fallback).encode('utf8') + +def add_file_logger(logger, fn): + if not os.path.exists(fn): + try: + fh = open(fn, 'w') + fh.close() + except (ValueError, IOError): + return + if not os.path.isfile(fn): + return + if not os.access(fn,os.W_OK): + return + handler = logging.handlers.RotatingFileHandler(fn, maxBytes=1024*1024*10, backupCount=5) + handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s')) + logging.getLogger(logger).addHandler(handler) + +def add_stderr_logger(logger): + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] {%(process)d} %(name)s:%(lineno)d %(message)s')) + handler.setLevel(logging.DEBUG) + logging.getLogger(logger).addHandler(handler) + +def add_sys_logger(logger): + # For remote logging; + # address = ('host.example.com', logging.handlers.SysLogHandler.SYSLOG_UDP_PORT) + address = "/dev/log" + handler = logging.handlers.SysLogHandler(address=address, + facility=logging.handlers.SysLogHandler.LOG_DAEMON) + handler.setFormatter(logging.Formatter('%(name)s: %(message)s')) + handler.setLevel(logging.INFO) + logging.getLogger(logger).addHandler(handler) + +def add_mail_logger(logger, addr): + if not addr: + return + handler = logging.handlers.SMTPHandler("localhost", + "%s@%s" % (pwd.getpwuid(os.getuid())[0], socket.getfqdn()), + addr, + "%s: error notice" % socket.getfqdn()) + handler.setFormatter(logging.Formatter('%(pathname)s:%(lineno)d [%(levelname)s] %(message)s')) + handler.setLevel(logging.ERROR) + logging.getLogger(logger).addHandler(handler) + +def add_db_logger(logger, cnx): + handler = DBHandler(cnx, "log_messages", {'message': '%(message)s', + 'message_time': '%(asctime)s', + 'logger_name': '%(name)s', + 'level': '%(levelname)s', + 'location': '%(pathname)s:%(lineno)d', + 'host': commands.getoutput("hostname"), + }) + handler.setFormatter(logging.Formatter(datefmt='%Y-%m-%d %H:%M:%S')) + logging.getLogger(logger).addHandler(handler) + return handler + +def remove_log_handler(logger, handler): + logging.getLogger(logger).removeHandler(handler) diff --git a/koji/auth.py b/koji/auth.py new file mode 100644 index 00000000..69b5de52 --- /dev/null +++ b/koji/auth.py @@ -0,0 +1,587 @@ +# authentication module +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: +# Mike McLean + +import socket +import string +import random +import base64 +import krbV +import koji +import cgi #for parse_qs +from context import context +from mod_python import apache + +# 1 - load session if provided +# - check uri for session id +# - load session info from db +# - validate session +# 2 - create a session +# - maybe in two steps +# - + +class Session(object): + + def __init__(self,args=None,hostip=None): + self.logged_in = False + self.id = None + self.master = None + self.key = None + self.user_id = None + self.message = '' + self.exclusive = False + self.lockerror = None + self.callnum = None + #get session data from request + if args is None: + req = getattr(context,'req',None) + args = getattr(req,'args',None) + if not args: + self.message = 'no session args' + return + args = cgi.parse_qs(args,strict_parsing=True) + if hostip is None: + hostip = context.req.get_remote_host(apache.REMOTE_NOLOOKUP) + if hostip == '127.0.0.1': + hostip = socket.gethostbyname(socket.gethostname()) + try: + id = long(args['session-id'][0]) + key = args['session-key'][0] + except KeyError, field: + raise koji.AuthError, '%s not specified in session args' % field + try: + callnum = args['callnum'][0] + except: + callnum = None + #lookup the session + c = context.cnx.cursor() + fields = ('user_id','authtype','expired','start_time','update_time', + 'master','exclusive','callnum') + q = """ + SELECT %s FROM sessions + WHERE id = %%(id)i + AND key = %%(key)s + AND hostip = %%(hostip)s + FOR UPDATE + """ % ",".join(fields) + c.execute(q,locals()) + row = c.fetchone() + if not row: + raise koji.AuthError, 'Invalid session or bad credentials' + session_data = dict(zip(fields,row)) + #check for expiration + if session_data['expired']: + raise koji.AuthExpired, 'session "%i" has expired' % id + #check for callnum sanity + if callnum is not None: + try: + callnum = int(callnum) + except (ValueError,TypeError): + raise koji.AuthError, "Invalid callnum: %r" % callnum + lastcall = session_data['callnum'] + if lastcall is not None: + if lastcall > callnum: + raise koji.SequenceError, "%d > %d (session %d)" \ + % (lastcall,callnum,id) + elif lastcall == callnum: + #Some explanation: + #This function is one of the few that performs its own commit. + #However, our storage of the current callnum is /after/ that + #commit. This means the the current callnum only gets commited if + #a commit happens afterward. + #We only schedule a commit for dml operations, so if we find the + #callnum in the db then a previous attempt succeeded but failed to + #return. Data was changed, so we cannot simply try the call again. + raise koji.RetryError, \ + "unable to retry call %d (method %s) for session %d" \ + % (callnum, getattr(context, 'method', 'UNKNOWN'), id) + + # read user data + #historical note: + # we used to get a row lock here as an attempt to maintain sanity of exclusive + # sessions, but it was an imperfect approach and the lock could cause some + # performance issues. + fields = ('name','status','usertype') + q = """SELECT %s FROM users WHERE id=%%(user_id)s""" % ','.join(fields) + c.execute(q,session_data) + user_data = dict(zip(fields,c.fetchone())) + + if user_data['status'] == koji.USER_STATUS['BLOCKED']: + raise koji.AuthError, 'User not allowed' + #check for exclusive sessions + if session_data['exclusive']: + #we are the exclusive session for this user + self.exclusive = True + else: + #see if an exclusive session exists + q = """SELECT id FROM sessions WHERE user_id=%(user_id)s + AND "exclusive" = TRUE AND expired = FALSE""" + #should not return multiple rows (unique constraint) + c.execute(q,session_data) + row = c.fetchone() + if row: + (excl_id,) = row + if excl_id == session_data['master']: + #(note excl_id cannot be None) + #our master session has the lock + self.exclusive = True + else: + #a session unrelated to us has the lock + self.lockerror = "User locked by another session" + # we don't enforce here, but rely on the dispatcher to enforce + # if appropriate (otherwise it would be impossible to steal + # an exclusive session with the force option). + + # update timestamp + q = """UPDATE sessions SET update_time=NOW() WHERE id = %(id)i""" + c.execute(q,locals()) + #save update time + context.cnx.commit() + + #update callnum (this is deliberately after the commit) + #see earlier note near RetryError + if callnum is not None: + q = """UPDATE sessions SET callnum=%(callnum)i WHERE id = %(id)i""" + c.execute(q,locals()) + + # record the login data + self.id = id + self.key = key + self.hostip = hostip + self.callnum = callnum + self.user_id = session_data['user_id'] + self.authtype = session_data['authtype'] + self.master = session_data['master'] + self.session_data = session_data + self.user_data = user_data + # we look up perms, groups, and host_id on demand, see __getattr__ + self._perms = None + self._groups = None + self._host_id = '' + self.logged_in = True + + def __getattr__(self, name): + # grab perm and groups data on the fly + if name == 'perms': + if self._perms is None: + #in a dict for quicker lookup + self._perms = dict([[name,1] for name in get_user_perms(self.user_id)]) + return self._perms + elif name == 'groups': + if self._groups is None: + self._groups = get_user_groups(self.user_id) + return self._groups + elif name == 'host_id': + if self._host_id == '': + self._host_id = self._getHostId() + return self._host_id + else: + raise AttributeError, "%s" % name + + def __str__(self): + # convenient display for debugging + if not self.logged_in: + s = "session: not logged in" + else: + s = "session %d: %r" % (self.id, self.__dict__) + if self.message: + s += " (%s)" % self.message + return s + + def validate(self): + if self.lockerror: + raise koji.AuthLockError, self.lockerror + return True + + def login(self,user,password,opts=None): + """create a login session""" + if opts is None: + opts = {} + if not isinstance(password,str) or len(password) == 0: + raise koji.AuthError, 'invalid username or password' + if self.logged_in: + raise koji.GenericError, "Already logged in" + hostip = opts.get('hostip') + if hostip is None: + hostip = context.req.get_remote_host(apache.REMOTE_NOLOOKUP) + if hostip == '127.0.0.1': + hostip = socket.gethostbyname(socket.gethostname()) + + # check passwd + c = context.cnx.cursor() + q = """SELECT id,status,usertype FROM users + WHERE name = %(user)s AND password = %(password)s""" + c.execute(q,locals()) + r = c.fetchone() + if not r: + raise koji.AuthError, 'invalid username or password' + (user_id,status,usertype) = r + + #create session and return + sinfo = self.createSession(user_id, hostip, koji.AUTHTYPE_NORMAL) + session_id = sinfo['session-id'] + context.cnx.commit() + return sinfo + + def krbLogin(self, krb_req, proxyuser=None): + """Authenticate the user using the base64-encoded + AP_REQ message in krb_req. If proxyuser is not None, + log in that user instead of the user associated with the + Kerberos principal. The principal must be an authorized + "proxy_principal" in the server config.""" + if self.logged_in: + raise koji.AuthError, "Already logged in" + + ctx = krbV.default_context() + srvprinc = krbV.Principal(name=context.req.get_options()['AuthPrincipal'], context=ctx) + srvkt = krbV.Keytab(name=context.req.get_options()['AuthKeytab'], context=ctx) + + ac = krbV.AuthContext(context=ctx) + ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE|krbV.KRB5_AUTH_CONTEXT_DO_TIME + conninfo = self.getConnInfo() + ac.addrs = conninfo + + # decode and read the authentication request + req = base64.decodestring(krb_req) + ac, opts, sprinc, ccreds = ctx.rd_req(req, server=srvprinc, keytab=srvkt, + auth_context=ac, + options=krbV.AP_OPTS_MUTUAL_REQUIRED) + cprinc = ccreds[2] + + # Successfully authenticated via Kerberos, now log in + if proxyuser: + proxyprincs = [princ.strip() for princ in context.req.get_options()['ProxyPrincipals'].split(',')] + if cprinc.name in proxyprincs: + login_principal = proxyuser + else: + raise koji.AuthError, \ + 'Kerberos principal %s is not authorized to log in other users' % cprinc.name + else: + login_principal = cprinc.name + user_id = self.getUserIdFromKerberos(login_principal) + if not user_id: + user_id = self.createUserFromKerberos(login_principal) + + hostip = context.req.connection.remote_ip + if hostip == '127.0.0.1': + hostip = socket.gethostbyname(socket.gethostname()) + + sinfo = self.createSession(user_id, hostip, koji.AUTHTYPE_KERB) + + # encode the reply + rep = ctx.mk_rep(auth_context=ac) + rep_enc = base64.encodestring(rep) + + # encrypt and encode the login info + sinfo_priv = ac.mk_priv('%(session-id)s %(session-key)s' % sinfo) + sinfo_enc = base64.encodestring(sinfo_priv) + + return (rep_enc, sinfo_enc, conninfo) + + def getConnInfo(self): + """Return a tuple containing connection information + in the following format: + (local ip addr, local port, remote ip, remote port)""" + # For some reason req.connection.{local,remote}_addr contain port info, + # but no IP info. Use req.connection.{local,remote}_ip for that instead. + # See: http://lists.planet-lab.org/pipermail/devel-community/2005-June/001084.html + # local_ip seems to always be set to the same value as remote_ip, + # so get the local ip via a different method + # local_ip = context.req.connection.local_ip + local_ip = socket.gethostbyname(context.req.hostname) + remote_ip = context.req.connection.remote_ip + + # it appears that calling setports() with *any* value results in authentication + # failing with "Incorrect net address", so return 0 (which prevents + # python-krbV from calling setports()) + # local_port = context.req.connection.local_addr[1] + # remote_port = context.req.connection.remote_addr[1] + local_port = 0 + remote_port = 0 + + return (local_ip, local_port, remote_ip, remote_port) + + def makeExclusive(self,force=False): + """Make this session exclusive""" + c = context.cnx.cursor() + if self.master is not None: + raise koji.GenericError, "subsessions cannot become exclusive" + if self.exclusive: + #shouldn't happen + raise koji.GenericError, "session is already exclusive" + user_id = self.user_id + session_id = self.id + #acquire a row lock on the user entry + q = """SELECT id FROM users WHERE id=%(user_id)s FOR UPDATE""" + c.execute(q,locals()) + # check that no other sessions for this user are exclusive + q = """SELECT id FROM sessions WHERE user_id=%(user_id)s + AND expired = FALSE AND "exclusive" = TRUE + FOR UPDATE""" + c.execute(q,locals()) + row = c.fetchone() + if row: + if force: + #expire the previous exclusive session and try again + (excl_id,) = row + q = """UPDATE sessions SET expired=TRUE,"exclusive"=NULL WHERE id=%(excl_id)s""" + c.execute(q,locals()) + else: + raise koji.AuthLockError, "Cannot get exclusive session" + #mark this session exclusive + q = """UPDATE sessions SET "exclusive"=TRUE WHERE id=%(session_id)s""" + c.execute(q,locals()) + context.cnx.commit() + + def makeShared(self): + """Drop out of exclusive mode""" + c = context.cnx.cursor() + session_id = self.id + q = """UPDATE sessions SET "exclusive"=NULL WHERE id=%(session_id)s""" + c.execute(q,locals()) + context.cnx.commit() + + def logout(self): + """expire a login session""" + if not self.logged_in: + #XXX raise an error? + raise koji.AuthError, "Not logged in" + update = """UPDATE sessions + SET expired=TRUE,exclusive=NULL + WHERE id = %(id)i OR master = %(id)i""" + #note we expire subsessions as well + c = context.cnx.cursor() + c.execute(update, {'id': self.id}) + context.cnx.commit() + self.logged_in = False + + def logoutChild(self, session_id): + """expire a subsession""" + if not self.logged_in: + #XXX raise an error? + raise koji.AuthError, "Not logged in" + update = """UPDATE sessions + SET expired=TRUE,exclusive=NULL + WHERE id = %(session_id)i AND master = %(master)i""" + master = self.id + c = context.cnx.cursor() + c.execute(update, locals()) + context.cnx.commit() + + def createSession(self, user_id, hostip, authtype, master=None, locked=False): + """Create a new session for the given user. + + Return a map containing the session-id and session-key. + If master is specified, create a subsession + """ + c = context.cnx.cursor() + if not locked: + #acquire a row lock on the user entry + q = """SELECT id FROM users WHERE id=%(user_id)s""" + c.execute(q,locals()) + + # generate a random key + alnum = string.ascii_letters + string.digits + key = "%s-%s" %(user_id, + ''.join([ random.choice(alnum) for x in range(1,20) ])) + # use sha? sha.new(phrase).hexdigest() + + # get a session id + q = """SELECT nextval('sessions_id_seq')""" + c.execute(q, {}) + (session_id,) = c.fetchone() + + #add session id to database + q = """ + INSERT INTO sessions (id, user_id, key, hostip, authtype, master) + VALUES (%(session_id)i, %(user_id)i, %(key)s, %(hostip)s, %(authtype)i, %(master)s) + """ + c.execute(q,locals()) + context.cnx.commit() + + #return session info + return {'session-id' : session_id, 'session-key' : key} + + def subsession(self): + "Create a subsession" + if not self.logged_in: + raise koji.AuthError, "Not logged in" + master = self.master + if master is None: + master=self.id + return self.createSession(self.user_id, self.hostip, self.authtype, + master=master) + + def getPerms(self): + if not self.logged_in: + return [] + return self.perms.keys() + + def hasPerm(self, name): + if not self.logged_in: + return False + return self.perms.has_key(name) + + def assertPerm(self, name): + if not self.hasPerm(name) and not self.hasPerm('admin'): + raise koji.NotAllowed, "%s permission required" % name + + def hasGroup(self, group_id): + if not self.logged_in: + return False + #groups indexed by id + return self.groups.has_key(group_id) + + def isUser(self, user_id): + if not self.logged_in: + return False + return ( self.user_id == user_id or self.hasGroup(user_id) ) + + def assertUser(self, user_id): + if not self.isUser(user_id) and not self.hasPerm('admin'): + raise koji.NotAllowed, "not owner" + + def _getHostId(self): + '''Using session data, find host id (if there is one)''' + if self.user_id is None: + return None + c=context.cnx.cursor() + q="""SELECT id FROM host WHERE user_id = %(uid)d""" + c.execute(q,{'uid' : self.user_id }) + r=c.fetchone() + c.close() + if r: + return r[0] + else: + return None + + def getHostId(self): + #for compatibility + return self.host_id + + def getUserIdFromKerberos(self, krb_principal): + """Return the user ID associated with a particular Kerberos principal. + If no user with the given princpal if found, return None.""" + c = context.cnx.cursor() + q = """SELECT id FROM users WHERE krb_principal = %(krb_principal)s""" + c.execute(q,locals()) + r = c.fetchone() + c.close() + if r: + return r[0] + else: + return None + + def createUserFromKerberos(self, krb_principal): + """Create a new user, based on the Kerberos principal. Their + username will be everything before the "@" in the principal. + Return the ID of the newly created user.""" + atidx = krb_principal.find('@') + if atidx == -1: + raise koji.AuthError, 'invalid Kerberos principal: %s' % krb_principal + user_name = krb_principal[:atidx] + user_type = koji.USERTYPES['NORMAL'] + + c = context.cnx.cursor() + select = """SELECT nextval('users_id_seq')""" + c.execute(select, locals()) + user_id = c.fetchone()[0] + + insert = """INSERT INTO users (id, name, password, usertype, krb_principal) + VALUES (%(user_id)i, %(user_name)s, null, %(user_type)i, %(krb_principal)s)""" + c.execute(insert, locals()) + context.cnx.commit() + + return user_id + +def get_user_groups(user_id): + """Get user groups + + returns a dictionary where the keys are the group ids and the values + are the group names""" + c = context.cnx.cursor() + t_group = koji.USERTYPES['GROUP'] + q = """SELECT group_id,name + FROM user_groups JOIN users ON group_id = users.id + WHERE active = TRUE AND users.usertype=%(t_group)i + AND user_id=%(user_id)i""" + c.execute(q,locals()) + return dict(c.fetchall()) + +def get_user_perms(user_id): + c = context.cnx.cursor() + q = """SELECT name + FROM user_perms JOIN permissions ON perm_id = permissions.id + WHERE active = TRUE AND user_id=%(user_id)s""" + c.execute(q,locals()) + #return a list of permissions by name + return [row[0] for row in c.fetchall()] + +def get_user_data(user_id): + c = context.cnx.cursor() + fields = ('name','status','usertype') + q = """SELECT %s FROM users WHERE id=%%(user_id)s""" % ','.join(fields) + c.execute(q,locals()) + row = c.fetchone() + if not row: + return None + return dict(zip(fields,row)) + +def login(*args,**opts): + return context.session.login(*args,**opts) + +def krbLogin(*args, **opts): + return context.session.krbLogin(*args, **opts) + +def logout(): + return context.session.logout() + +def subsession(): + return context.session.subsession() + +def logoutChild(session_id): + return context.session.logoutChild(session_id) + +def exclusiveSession(*args,**opts): + """Make this session exclusive""" + return context.session.makeExclusive(*args,**opts) + +def sharedSession(): + """Drop out of exclusive mode""" + return context.session.makeShared() + + +if __name__ == '__main__': + # XXX - testing defaults + import db + db.setDBopts( database = "test", user = "test") + print "Connecting to db" + context.cnx = db.connect() + print "starting session 1" + sess = Session(None,hostip='127.0.0.1') + print "Session 1: %s" % sess + print "logging in with session 1" + session_info = sess.login('host/1','foobar',{'hostip':'127.0.0.1'}) + #wrap values in lists + session_info = dict([ [k,[v]] for k,v in session_info.iteritems()]) + print "Session 1: %s" % sess + print "Session 1 info: %r" % session_info + print "Creating session 2" + s2 = Session(session_info,'127.0.0.1') + print "Session 2: %s " % s2 diff --git a/koji/context.py b/koji/context.py new file mode 100644 index 00000000..17d10bcc --- /dev/null +++ b/koji/context.py @@ -0,0 +1,109 @@ +#!/usr/bin/python +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +# This modules provides a thread-safe way of passing +# request context around in a global way +# - db connections +# - request data +# - auth data + +import thread + +class _data(object): + pass + +class ThreadLocal(object): + def __init__(self): + object.__setattr__(self, '_tdict', {}) + + # should probably be getattribute, but easier to debug this way + def __getattr__(self, key): + id = thread.get_ident() + tdict = object.__getattribute__(self, '_tdict') + if not tdict.has_key(id): + raise AttributeError(key) + data = tdict[id] + return object.__getattribute__(data, key) + + def __setattr__(self, key, value): + id = thread.get_ident() + tdict = object.__getattribute__(self, '_tdict') + if not tdict.has_key(id): + tdict[id] = _data() + data = tdict[id] + return object.__setattr__(data,key,value) + + def __delattr__(self, key): + id = thread.get_ident() + tdict = object.__getattribute__(self, '_tdict') + if not tdict.has_key(id): + raise AttributeError(key) + data = tdict[id] + ret = object.__delattr__(data, key) + if len(data.__dict__) == 0: + del tdict[id] + return ret + + def __str__(self): + id = thread.get_ident() + tdict = object.__getattribute__(self, '_tdict') + return "(current thread: %s) {" % id + \ + ", ".join([ "%s : %s" %(k,v.__dict__) for (k,v) in tdict.iteritems() ]) + \ + "}" + + def _threadclear(self): + id = thread.get_ident() + tdict = object.__getattribute__(self, '_tdict') + if not tdict.has_key(id): + return + del tdict[id] + + +context = ThreadLocal() + + +if __name__ == '__main__': + + #testing + + #context.foo = 1 + #context.bar = 2 + print context + #del context.bar + print context + + import random + import time + def test(): + context.foo=random.random() + time.sleep(1.5+random.random()) + context._threadclear() + print context + + for x in xrange(1,10): + thread.start_new_thread(test,()) + + time.sleep(4) + print + print context + + context.foo = 1 + context.bar = 2 + print context.foo,context.bar + print context + context._threadclear() + print context diff --git a/koji/db.py b/koji/db.py new file mode 100644 index 00000000..5c55e8f7 --- /dev/null +++ b/koji/db.py @@ -0,0 +1,137 @@ +# python library + +# db utilities for koji +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: +# Mike McLean + + +import sys +import pgdb +import time +from pgdb import _quoteparams +assert pgdb.threadsafety >= 1 +import context + +## Globals ## +_DBopts = None +# A persistent connection to the database. +# A new connection will be created whenever +# Apache forks a new worker, and that connection +# will be used to service all requests handled +# by that worker. +# This probably doesn't need to be a ThreadLocal +# since Apache is not using threading, +# but play it safe anyway. +_DBconn = context.ThreadLocal() + +class DBWrapper: + def __init__(self, cnx, debug=False): + self.cnx = cnx + self.debug = debug + + def __getattr__(self, key): + if not self.cnx: + raise StandardError, 'connection is closed' + return getattr(self.cnx, key) + + def cursor(self, *args, **kw): + if not self.cnx: + raise StandardError, 'connection is closed' + return CursorWrapper(self.cnx.cursor(*args, **kw),self.debug) + + def close(self): + # Rollback any uncommitted changes and clear the connection so + # this DBWrapper is no longer usable after close() + if not self.cnx: + raise StandardError, 'connection is closed' + self.cnx.rollback() + self.cnx = None + +class CursorWrapper: + def __init__(self, cursor, debug=False): + self.cursor = cursor + self.debug = debug + + def __getattr__(self, key): + return getattr(self.cursor, key) + + def _timed_call(self, method, args, kwargs): + if self.debug: + start = time.time() + ret = getattr(self.cursor,method)(*args,**kwargs) + if self.debug: + sys.stderr.write("%s operation completed in %.4f seconds\n" % + (method, time.time() - start)) + sys.stderr.flush() + return ret + + def fetchone(self,*args,**kwargs): + return self._timed_call('fetchone',args,kwargs) + + def fetchall(self,*args,**kwargs): + return self._timed_call('fetchall',args,kwargs) + + def execute(self, operation, parameters=()): + if self.debug: + sys.stderr.write(_quoteparams(operation,parameters)) + sys.stderr.write("\n") + sys.stderr.flush() + start = time.time() + ret = self.cursor.execute(operation, parameters) + if self.debug: + sys.stderr.write("Execute operation completed in %.4f seconds\n" % + (time.time() - start)) + sys.stderr.flush() + return ret + + +## Functions ## +def provideDBopts(**opts): + global _DBopts + if _DBopts is None: + _DBopts = opts + +def setDBopts(**opts): + global _DBopts + _DBopts = opts + +def getDBopts(): + return _DBopts + +def connect(debug=False): + global _DBconn + if hasattr(_DBconn, 'conn'): + # Make sure the previous transaction has been + # closed. This is safe to call multiple times. + conn = _DBconn.conn + conn.rollback() + else: + opts = _DBopts + if opts is None: + opts = {} + conn = pgdb.connect(**opts) + # XXX test + # return conn + _DBconn.conn = conn + + return DBWrapper(conn, debug) + +if __name__ == "__main__": + setDBopts( database = "test", user = "test") + print "This is a Python library" diff --git a/koji/util.py b/koji/util.py new file mode 100644 index 00000000..8bb2a8ef --- /dev/null +++ b/koji/util.py @@ -0,0 +1,33 @@ +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +import time +import koji + +def _changelogDate(cldate): + return time.strftime('%a %b %d %Y', time.strptime(koji.formatTime(cldate), '%Y-%m-%d %H:%M:%S')) + +def formatChangelog(entries): + """Format a list of changelog entries (dicts) + into a string representation.""" + result = '' + for entry in entries: + result += """* %s %s +%s + +""" % (_changelogDate(entry['date']), entry['author'], entry['text']) + + return result diff --git a/tests/.cvsignore b/tests/.cvsignore new file mode 100644 index 00000000..b275b0df --- /dev/null +++ b/tests/.cvsignore @@ -0,0 +1,3 @@ +*.pyc +*.pyo +test.py diff --git a/tests/runtests.py b/tests/runtests.py new file mode 100755 index 00000000..699e9e4e --- /dev/null +++ b/tests/runtests.py @@ -0,0 +1,32 @@ +#!/usr/bin/python + +"""Wrapper script for running unit tests""" + +__version__ = "$Revision: 1.1 $" + +import sys +import os +import os.path +import unittest + +testDir = os.path.dirname(sys.argv[0]) + +sys.path.insert(0, os.path.abspath('%s/..' % testDir)) + +allTests = unittest.TestSuite() +for root, dirs, files in os.walk(testDir): + common_path = os.path.commonprefix([os.path.abspath(testDir), + os.path.abspath(root)]) + root_path = os.path.abspath(root).replace(common_path, '').lstrip('/').replace('/', '.') + + for test_file in [item for item in files + if item.startswith("test_") and item.endswith(".py")]: + if len(sys.argv) == 1 or test_file in sys.argv[1:]: + print "adding %s..." % test_file + test_file = test_file[:-3] + if root_path: + test_file = "%s.%s" % (root_path, test_file) + suite = unittest.defaultTestLoader.loadTestsFromName(test_file) + allTests.addTests(suite._tests) + +unittest.TextTestRunner(verbosity=2).run(allTests) diff --git a/tests/test___init__.py b/tests/test___init__.py new file mode 100644 index 00000000..93de88a3 --- /dev/null +++ b/tests/test___init__.py @@ -0,0 +1,67 @@ +#!/usr/bin/python + +"""Test the __init__.py module""" + +import koji +import unittest + +class INITTestCase(unittest.TestCase): + """Main test case container""" + + def test_parse_NVR(self): + """Test the parse_NVR method""" + + self.assertRaises(AttributeError, koji.parse_NVR, None) + self.assertRaises(AttributeError, koji.parse_NVR, 1) + self.assertRaises(AttributeError, koji.parse_NVR, {}) + self.assertRaises(AttributeError, koji.parse_NVR, []) + self.assertRaises(koji.GenericError, koji.parse_NVR, "") + self.assertRaises(koji.GenericError, koji.parse_NVR, "foo") + self.assertRaises(koji.GenericError, koji.parse_NVR, "foo-1") + self.assertRaises(koji.GenericError, koji.parse_NVR, "foo-1-") + self.assertRaises(koji.GenericError, koji.parse_NVR, "foo--1") + self.assertRaises(koji.GenericError, koji.parse_NVR, "--1") + ret = koji.parse_NVR("foo-1-2") + self.assertEqual(ret['name'], "foo") + self.assertEqual(ret['version'], "1") + self.assertEqual(ret['release'], "2") + self.assertEqual(ret['epoch'], "") + ret = koji.parse_NVR("12:foo-1-2") + self.assertEqual(ret['name'], "foo") + self.assertEqual(ret['version'], "1") + self.assertEqual(ret['release'], "2") + self.assertEqual(ret['epoch'], "12") + + def test_parse_NVRA(self): + """Test the parse_NVRA method""" + + self.assertRaises(AttributeError, koji.parse_NVRA, None) + self.assertRaises(AttributeError, koji.parse_NVRA, 1) + self.assertRaises(AttributeError, koji.parse_NVRA, {}) + self.assertRaises(AttributeError, koji.parse_NVRA, []) + self.assertRaises(koji.GenericError, koji.parse_NVRA, "") + self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo") + self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1") + self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1-") + self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo--1") + self.assertRaises(koji.GenericError, koji.parse_NVRA, "--1") + self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1-1") + self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1-1.") + self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1.-1") + ret = koji.parse_NVRA("foo-1-2.i386") + self.assertEqual(ret['name'], "foo") + self.assertEqual(ret['version'], "1") + self.assertEqual(ret['release'], "2") + self.assertEqual(ret['epoch'], "") + self.assertEqual(ret['arch'], "i386") + self.assertEqual(ret['src'], False) + ret = koji.parse_NVRA("12:foo-1-2.src") + self.assertEqual(ret['name'], "foo") + self.assertEqual(ret['version'], "1") + self.assertEqual(ret['release'], "2") + self.assertEqual(ret['epoch'], "12") + self.assertEqual(ret['arch'], "src") + self.assertEqual(ret['src'], True) + +if __name__ == '__main__': + unittest.main() diff --git a/util/Makefile b/util/Makefile new file mode 100644 index 00000000..9a055c1d --- /dev/null +++ b/util/Makefile @@ -0,0 +1,24 @@ +BINFILES = kojira + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + mkdir -p $(DESTDIR)/usr/sbin + install -m 755 $(BINFILES) $(DESTDIR)/usr/sbin + + mkdir -p $(DESTDIR)/etc/rc.d/init.d + install -m 755 kojira.init $(DESTDIR)/etc/rc.d/init.d/kojira + + mkdir -p $(DESTDIR)/etc/sysconfig + install -m 644 kojira.sysconfig $(DESTDIR)/etc/sysconfig/kojira + + install -m 644 kojira.conf $(DESTDIR)/etc/kojira.conf diff --git a/util/kojira b/util/kojira new file mode 100755 index 00000000..2dacb079 --- /dev/null +++ b/util/kojira @@ -0,0 +1,487 @@ +#!/usr/bin/python + +# Koji Repository Administrator (kojira) +# Copyright (c) 2005-2007 Red Hat +# +# Koji is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; +# version 2.1 of the License. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this software; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: +# Mike McLean + +try: + import krbV +except ImportError: + pass +import sys +import os +import koji +from optparse import OptionParser +from ConfigParser import ConfigParser +import logging +import logging.handlers +import pprint +import signal +import time +import traceback + + + +def safe_rmtree(path, strict=True): + logger = logging.getLogger("koji.repo") + #safe remove: with -xdev the find cmd will not cross filesystems + # (though it will cross bind mounts from the same filesystem) + if not os.path.exists(path): + logger.debug("No such path: %s" % path) + return + #first rm -f non-directories + logger.debug('Removing files under %s' % path) + rv = os.system("find '%s' -xdev \\! -type d -print0 |xargs -0 rm -f" % path) + msg = 'file removal failed (code %r) for %s' % (rv,path) + if rv != 0: + logger.warn(msg) + if strict: + raise koji.GenericError, msg + else: + return rv + #them rmdir directories + #with -depth, we start at the bottom and work up + logger.debug('Removing directories under %s' % path) + rv = os.system("find '%s' -xdev -depth -type d -print0 |xargs -0 rmdir" % path) + msg = 'dir removal failed (code %r) for %s' % (rv,path) + if rv != 0: + logger.warn(msg) + if strict: + raise koji.GenericError, msg + return rv + +class ManagedRepo(object): + + def __init__(self, data): + self.logger = logging.getLogger("koji.repo") + self.current = True + self.repo_id = data['id'] + self.event_id = data['create_event'] + self.event_ts = data['create_ts'] + self.tag_id = data['tag_id'] + self.state = data['state'] + order = session.getFullInheritance(self.tag_id, event=self.event_id) + #order may contain same tag more than once + tags = {self.tag_id : 1} + for x in order: + tags[x['parent_id']] = 1 + self.taglist = tags.keys() + + def expire(self): + """Mark the repo expired""" + if self.state == koji.REPO_EXPIRED: + return + elif self.state == koji.REPO_DELETED: + raise GenericError, "Repo already deleted" + self.logger.info("Expiring repo %s.." % self.repo_id) + session.repoExpire(self.repo_id) + self.state = koji.REPO_EXPIRED + + def expired(self): + return self.state == koji.REPO_EXPIRED + + def pending(self, timeout=180): + """Determine if repo generation appears to be in progress and not already obsolete""" + if self.state != koji.REPO_INIT: + return False + age = time.time() - self.event_ts + return self.isCurrent(ignore_state=True) and age < timeout + + def stale(self): + """Determine if repo seems stale + + By stale, we mean: + - state=INIT + - timestamp really, really old + """ + timeout = 36000 + #XXX - config + if self.state != koji.REPO_INIT: + return False + age = time.time() - self.event_ts + return age > timeout + + def tryDelete(self): + """Remove the repo from disk, if possible""" + age = time.time() - self.event_ts + if age < options.deleted_repo_lifetime: + return False + self.logger.debug("Attempting to delete repo %s.." % self.repo_id) + if self.state != koji.REPO_EXPIRED: + raise GenericError, "Repo not expired" + if session.repoDelete(self.repo_id) > 0: + #cannot delete, we are referenced by a buildroot + self.logger.debug("Cannot delete repo %s, still referenced" % self.repo_id) + return False + self.logger.info("Deleted repo %s" % self.repo_id) + self.state = koji.REPO_DELETED + tag_name = session.getTag(self.tag_id)['name'] + path = pathinfo.repo(self.repo_id, tag_name) + safe_rmtree(path, strict=False) + return True + + def ready(self): + return self.state == koji.REPO_READY + + def deleted(self): + return self.state == koji.REPO_DELETED + + def problem(self): + return self.state == koji.REPO_PROBLEM + + def isCurrent(self, ignore_state=False): + if not self.current: + # no point in checking again + return False + if not ignore_state and self.state != koji.REPO_READY: + #also no point in checking + return False + self.logger.debug("Checking for changes: %r" % self.taglist) + if session.tagChangedSinceEvent(self.event_id,self.taglist): + self.logger.debug("Tag data has changed since event %r" % self.event_id) + self.current = False + else: + self.logger.debug("No tag changes since event %r" % self.event_id) + return self.current + + +class RepoManager(object): + + def __init__(self): + self.repos = {} + self.tasks = {} + self.logger = logging.getLogger("koji.repo.manager") + + def printState(self): + for repo in self.repos.itervalues(): + self.logger.debug("repo %s: tag=%s, state=%s" + % (repo.repo_id, repo.tag_id, koji.REPO_STATES[repo.state])) + for tag_id, task_id in self.tasks.iteritems(): + self.logger.debug("task %s for tag %s" % (task_id, tag_id)) + + def readCurrentRepos(self): + self.logger.debug("Reading current repo data") + repodata = session.getActiveRepos() + self.logger.debug("Repo data: %r" % repodata) + for data in repodata: + repo_id = data['id'] + repo = self.repos.get(repo_id) + if repo: + #we're already tracking it + if repo.state != data['state']: + self.logger.info('State changed for repo %s: %s -> %s' + %(repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']])) + repo.state = data['state'] + else: + self.logger.info('Found repo %s, state=%s' + %(repo_id, koji.REPO_STATES[data['state']])) + self.repos[repo_id] = ManagedRepo(data) + + def pruneLocalRepos(self): + """Scan filesystem for repos and remove any deleted ones + + Also, warn about any oddities""" + self.logger.debug("Scanning filesystem for repos") + topdir = "%s/repos" % pathinfo.topdir + count = 0 + for tag in os.listdir(topdir): + tagdir = "%s/%s" % (topdir, tag) + if not os.path.isdir(tagdir): + continue + taginfo = session.getTag(tag) + if taginfo is None: + self.logger.warn("Unexpected directory (no such tag): %s" % tagdir) + continue + for repo_id in os.listdir(tagdir): + if count >= options.prune_batch_size: + #this keeps us from spending too much time on this at one time + return + repodir = "%s/%s" % (tagdir, repo_id) + if not os.path.isdir(repodir): + continue + try: + repo_id = int(repo_id) + except ValueError: + continue + if self.repos.has_key(repo_id): + #we're already managing it, no need to deal with it here + continue + rinfo = session.repoInfo(repo_id) + if rinfo is None: + try: + age = time.time() - os.stat(repodir).st_mtime + except OSError: + #just in case something deletes the repo out from under us + continue + if age > 36000: + self.logger.warn("Unexpected directory (no such repo): %s" % repodir) + continue + if rinfo['tag_name'] != taginfo['name']: + self.logger.warn("Tag name mismatch: %s" % repodir) + continue + if rinfo['state'] in (koji.REPO_DELETED, koji.REPO_PROBLEM): + age = time.time() - rinfo['create_ts'] + if age > options.deleted_repo_lifetime: + count += 1 + logger.info("Removing stray repo (state=%s): %s" % (koji.REPO_STATES[rinfo['state']], repodir)) + safe_rmtree(repodir, strict=False) + pass + + def updateRepos(self): + #check on tasks + for tag_id, task_id in self.tasks.items(): + tinfo = session.getTaskInfo(task_id) + tstate = koji.TASK_STATES[tinfo['state']] + if tstate == 'CLOSED': + self.logger.info("Finished: newRepo task %s for tag %s" % (task_id, tag_id)) + del self.tasks[tag_id] + elif tstate in ('CANCELED', 'FAILED'): + self.logger.info("Problem: newRepo task %s for tag %s is %s" % (task_id, tag_id, tstate)) + del self.tasks[tag_id] + #TODO [?] - implement a timeout for active tasks? + self.logger.debug("Current tasks: %r" % self.tasks) + if len(self.tasks) >= options.max_repo_tasks: + self.logger.info("Maximum number of repo tasks reached.") + return + self.logger.debug("Updating repos") + self.readCurrentRepos() + #check for stale repos + for repo in self.repos.values(): + if repo.stale(): + repo.expire() + #find out which tags require repos + tags = {} + for target in session.getBuildTargets(): + tag_id = target['build_tag'] + tags[tag_id] = 1 + #index repos by tag + tag_repos = {} + for repo in self.repos.values(): + tag_repos.setdefault(repo.tag_id, []).append(repo) + self.logger.debug("Needed tags: %r" % tags.keys()) + self.logger.debug("Current tags: %r" % tag_repos.keys()) + + #we need to determine: + # - which tags need a new repo + # - if any repos seem to be broken + for tag_id in tags.iterkeys(): + covered = False + for repo in tag_repos.get(tag_id,[]): + if repo.isCurrent(): + covered = True + break + elif repo.pending(): + #one on the way + covered = True + break + if covered: + continue + if self.tasks.has_key(tag_id): + #repo creation in progress + #TODO - implement a timeout + continue + #tag still appears to be uncovered + task_id = session.newRepo(tag_id) + self.logger.info("Created newRepo task %s for tag %s" % (task_id, tag_id)) + self.tasks[tag_id] = task_id + #some cleanup + for tag_id, repolist in tag_repos.items(): + if not tags.has_key(tag_id): + #repos for these tags are no longer required + for repo in repolist: + if repo.ready(): + repo.expire() + for repo in repolist: + if repo.expired(): + #try to delete + repo.tryDelete() + + +def main(): + repomgr = RepoManager() + repomgr.readCurrentRepos() + repomgr.pruneLocalRepos() + logger.info("Entering main loop") + while True: + try: + repomgr.updateRepos() + repomgr.printState() + repomgr.pruneLocalRepos() + except KeyboardInterrupt: + logger.warn("User exit") + break + except koji.AuthExpired: + logger.warn("Session expired") + break + except SystemExit: + logger.warn("Shutting down") + break + except: + # log the exception and continue + logger.error(''.join(traceback.format_exception(*sys.exc_info()))) + try: + time.sleep(5) + except KeyboardInterrupt: + logger.warn("User exit") + break + try: + session.logout() + finally: + sys.exit() + +def _exit_signal_handler(signum, frame): + logger.error('Exiting on signal') + session.logout() + sys.exit(1) + +def get_options(): + """process options from command line and config file""" + # parse command line args + parser = OptionParser("usage: %prog [opts]") + parser.add_option("-c", "--config", dest="configFile", + help="use alternate configuration file", metavar="FILE", + default="/etc/kojira.conf") + parser.add_option("--user", help="specify user") + parser.add_option("--password", help="specify password") + parser.add_option("--principal", help="Kerberos principal") + parser.add_option("--keytab", help="Kerberos keytab") + parser.add_option("-f", "--fg", dest="daemon", + action="store_false", default=True, + help="run in foreground") + parser.add_option("-d", "--debug", action="store_true", + help="show debug output") + parser.add_option("-v", "--verbose", action="store_true", + help="show verbose output") + parser.add_option("--with-src", action="store_true", + help="include srpms in repos") + parser.add_option("--force-lock", action="store_true", default=False, + help="force lock for exclusive session") + parser.add_option("--debug-xmlrpc", action="store_true", default=False, + help="show xmlrpc debug output") + parser.add_option("--skip-main", action="store_true", default=False, + help="don't actually run main") + parser.add_option("--show-config", action="store_true", default=False, + help="Show config and exit") + parser.add_option("-s", "--server", help="URL of XMLRPC server") + parser.add_option("--topdir", help="Specify topdir") + parser.add_option("--logfile", help="Specify logfile") + (options, args) = parser.parse_args() + + config = ConfigParser() + config.read(options.configFile) + section = 'kojira' + for x in config.sections(): + if x != section: + quit('invalid section found in config file: %s' % x) + defaults = {'with_src': False, + 'verbose': False, + 'debug': False, + 'topdir': '/mnt/koji', + 'server': None, + 'logfile': '/var/log/kojira.log', + 'principal': None, + 'keytab': None, + 'prune_batch_size': 4, + 'max_repo_tasks' : 10, + 'deleted_repo_lifetime': 7*24*3600, + } + if config.has_section(section): + int_opts = ('prune_batch_size', 'deleted_repo_lifetime', 'max_repo_tasks') + str_opts = ('topdir','server','user','password','logfile', 'principal', 'keytab') + bool_opts = ('with_src','verbose','debug') + for name in config.options(section): + if name in int_opts: + defaults[name] = config.getint(section, name) + elif name in str_opts: + defaults[name] = config.get(section, name) + elif name in bool_opts: + defaults[name] = config.getboolean(section, name) + else: + quit("unknown config option: %s" % name) + for name, value in defaults.items(): + if getattr(options, name, None) is None: + setattr(options, name, value) + if options.logfile in ('','None','none'): + options.logfile = None + return options + +def quit(msg=None, code=1): + if msg: + logging.getLogger("koji.repo").error(msg) + sys.stderr.write('%s\n' % msg) + sys.stderr.flush() + sys.exit(code) + +if __name__ == "__main__": + + options = get_options() + topdir = getattr(options,'topdir',None) + pathinfo = koji.PathInfo(topdir) + if options.show_config: + pprint.pprint(options.__dict__) + sys.exit() + if options.logfile: + if not os.path.exists(options.logfile): + try: + logfile = open(options.logfile, "w") + logfile.close() + except: + sys.stderr.write("Cannot create logfile: %s\n" % options.logfile) + sys.exit(1) + if not os.access(options.logfile,os.W_OK): + sys.stderr.write("Cannot write to logfile: %s\n" % options.logfile) + sys.exit(1) + koji.add_file_logger("koji", "/var/log/kojira.log") + koji.add_sys_logger("koji") + #note we're setting logging for koji.* + logger = logging.getLogger("koji") + if options.debug: + logger.setLevel(logging.DEBUG) + elif options.verbose: + logger.setLevel(logging.INFO) + else: + logger.setLevel(logging.WARNING) + session_opts = {} + for k in ('user', 'password', 'debug_xmlrpc', 'debug'): + session_opts[k] = getattr(options,k) + session = koji.ClientSession(options.server,session_opts) + if options.user: + #authenticate using user/password + session.login() + elif sys.modules.has_key('krbV') and options.principal and options.keytab: + session.krb_login(options.principal, options.keytab) + #get an exclusive session + try: + session.exclusiveSession(force=options.force_lock) + except koji.AuthLockError: + quit("Error: Unable to get lock. Trying using --force-lock") + if not session.logged_in: + quit("Error: Unknown login error") + if not session.logged_in: + print "Error: unable to log in" + sys.exit(1) + if options.skip_main: + sys.exit() + elif options.daemon: + koji.daemonize() + else: + koji.add_stderr_logger("koji") + main() + + diff --git a/util/kojira.conf b/util/kojira.conf new file mode 100644 index 00000000..4633926c --- /dev/null +++ b/util/kojira.conf @@ -0,0 +1,22 @@ +[kojira] +; For user/pass authentication +; user=kojira +; password=kojira + +; For Kerberos authentication +; the principal to connect with +principal=koji/repo@EXAMPLE.COM +; The location of the keytab for the principal above +keytab=/etc/kojira.keytab + +; The URL for the koji hub server +server=http://hub.example.com/kojihub + +; The directory containing the repos/ directory +topdir=/mnt/koji + +; Logfile +logfile=/var/log/kojira.log + +; Include srpms in repos? (not needed for normal operation) +with_src=no diff --git a/util/kojira.init b/util/kojira.init new file mode 100644 index 00000000..d5cdf390 --- /dev/null +++ b/util/kojira.init @@ -0,0 +1,81 @@ +#! /bin/sh +# +# kojira Start/Stop kojira +# +# chkconfig: 345 99 99 +# description: koji repo administrator +# processname: kojira + +# This is an interactive program, we need the current locale + +# Source function library. +. /etc/init.d/functions + +# Check that we're a priviledged user +[ `id -u` = 0 ] || exit 0 + +[ -f /etc/sysconfig/kojira ] && . /etc/sysconfig/kojira + +prog="kojira" + +# Check that networking is up. +if [ "$NETWORKING" = "no" ] +then + exit 0 +fi + +[ -f /usr/sbin/kojira ] || exit 0 + +RETVAL=0 + +start() { + echo -n $"Starting $prog: " + cd / + ARGS="" + [ "$FORCE_LOCK" == "Y" ] && ARGS="$ARGS --force-lock" + [ "$KOJIRA_DEBUG" == "Y" ] && ARGS="$ARGS --debug" + [ "$KOJIRA_VERBOSE" == "Y" ] && ARGS="$ARGS --verbose" + daemon /usr/sbin/kojira $ARGS + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && touch /var/lock/subsys/kojira + return $RETVAL +} + +stop() { + echo -n $"Stopping $prog: " + killproc kojira + RETVAL=$? + echo + [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/kojira + return $RETVAL +} + +restart() { + stop + start +} + +# See how we were called. +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status $prog + ;; + restart|reload) + restart + ;; + condrestart) + [ -f /var/lock/subsys/kojira ] && restart || : + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|reload|condrestart}" + exit 1 +esac + +exit $? diff --git a/util/kojira.sysconfig b/util/kojira.sysconfig new file mode 100644 index 00000000..6b8c54c9 --- /dev/null +++ b/util/kojira.sysconfig @@ -0,0 +1,3 @@ +FORCE_LOCK=Y +KOJIRA_DEBUG=N +KOJIRA_VERBOSE=Y diff --git a/www/Makefile b/www/Makefile new file mode 100644 index 00000000..1f76244f --- /dev/null +++ b/www/Makefile @@ -0,0 +1,20 @@ +SUBDIRS = kojiweb conf lib static + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + for d in $(SUBDIRS); do make -s -C $$d clean; done + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/var/www/koji-web + + for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR) \ + -C $$d install; [ $$? = 0 ] || exit 1; done diff --git a/www/conf/Makefile b/www/conf/Makefile new file mode 100644 index 00000000..34f34ead --- /dev/null +++ b/www/conf/Makefile @@ -0,0 +1,16 @@ +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + for d in $(SUBDIRS); do make -s -C $$d clean; done + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/etc/httpd/conf.d + install -m 644 kojiweb.conf $(DESTDIR)/etc/httpd/conf.d/kojiweb.conf diff --git a/www/conf/kojiweb.conf b/www/conf/kojiweb.conf new file mode 100644 index 00000000..72f005b8 --- /dev/null +++ b/www/conf/kojiweb.conf @@ -0,0 +1,45 @@ +Alias /koji "/var/www/koji-web/scripts/" + + + # Config for the publisher handler + SetHandler mod_python + PythonHandler mod_python.publisher + + # General settings + PythonDebug On + PythonOption KojiHubURL http://hub.example.com/kojihub + PythonOption KojiWebURL http://www.example.com/koji + PythonOption KojiPackagesURL http://server.example.com/mnt/koji/packages + PythonOption WebPrincipal koji/web@EXAMPLE.COM + PythonOption WebKeytab /etc/httpd.keytab + PythonOption WebCCache /var/tmp/kojiweb.ccache + PythonOption LoginTimeout 72 + # This must be changed before deployment + PythonOption Secret CHANGE_ME + PythonPath "sys.path + ['/var/www/koji-web/lib']" + PythonCleanupHandler kojiweb.handlers::cleanup + PythonAutoReload Off + + +# Authentication settings + + AuthType Kerberos + AuthName "Koji Web UI" + KrbMethodNegotiate on + KrbMethodK5Passwd off + KrbServiceName HTTP + KrbAuthRealm EXAMPLE.COM + Krb5Keytab /etc/httpd.keytab + KrbSaveCredentials off + Require valid-user + ErrorDocument 401 /koji-static/errors/unauthorized.html + + +Alias /koji-static/ "/var/www/koji-web/static/" + + + Options None + AllowOverride None + Order allow,deny + Allow from all + diff --git a/www/docs/negotiate/index.html b/www/docs/negotiate/index.html new file mode 100644 index 00000000..ed6122e7 --- /dev/null +++ b/www/docs/negotiate/index.html @@ -0,0 +1,78 @@ + + + Configuring Firefox (and Mozilla) for Negotiate Authentication + + +

Configuring Firefox (and Mozilla) for Negotiate Authentication

+

+ Before Firefox and Mozilla can authenticate to a server using "Negotiate" + authentication, a couple of configuration changes must be made. +

+

+ Type about:config into the location bar, to bring + up the configuration page. Type negotiate into the Filter: box, to restrict + the listing to the configuration options we're interested in. +
+ Change network.negotiate-auth.trusted-uris to the domain you want to authenticate against, + e.g. .example.com. You can leave network.negotiate-auth.delegation-uris + blank, as it enables Kerberos ticket passing, which is not required. If you do not see those two config + options listed, your version of Firefox or Mozilla may be too old to support Negotiate authentication, and + you should consider upgrading. +
+ FC5 Update: Firefox and Mozilla on FC5 are attempting to load a library by its unversioned name, which is + not installed by default. A fix has been checked-in upstream, but in the meantime, the workaround is to set + network.negotiate-auth.gsslib to libgssapi_krb5.so.2. +
+ FC5 Update Update: If you are using the most recent Firefox or Mozilla, this workaround is + no longer necessary. +

+

+ Now, make sure you have Kerberos tickets. Typing kinit in a shell should allow you to + retrieve Kerberos tickets. klist will show you what tickets you have. +
+

+

+ Now, if you visit a Kerberos-authenticated website in the .example.com domain, you should be logged in + automatically, without having to type in your password. +

+

+

Troubleshooting

+ If you have followed the configuration steps and Negotiate authentication is not working, you can + turn on verbose logging of the authentication process, and potentially find the cause of the problem. + Exit Firefox or Mozilla. In a shell, type the following commands: +
+export NSPR_LOG_MODULES=negotiateauth:5
+export NSPR_LOG_FILE=/tmp/moz.log
+      
+ Then restart Firefox or Mozilla from that shell, and visit the website you were unable to authenticate + to earlier. Information will be logged to /tmp/moz.log, which may give a clue to the problem. + For example: +
+-1208550944[90039d0]: entering nsNegotiateAuth::GetNextToken()
+-1208550944[90039d0]: gss_init_sec_context() failed: Miscellaneous failure
+No credentials cache found
+
+      
+ means that you do not have Kerberos tickets, and need to run kinit. +
+
+ If you are able to kinit successfully from your machine but you are unable to authenticate, and you see + something like this in your log: +
+-1208994096[8d683d8]: entering nsAuthGSSAPI::GetNextToken()
+-1208994096[8d683d8]: gss_init_sec_context() failed: Miscellaneous failure
+Server not found in Kerberos database
+      
+ it generally indicates a Kerberos configuration problem. Make sure you have the following in the + [domain_realm] section of /etc/krb5.conf: +
+ .example.com = EXAMPLE.COM
+ example.com = EXAMPLE.COM
+      
+ If nothing is showing up in the log it's possible that you're behind a proxy, and that proxy is stripping off + the HTTP headers required for Negotiate authentication. As a workaround, you can try to connect to the + server via https instead, which will allow the request to pass through unmodified. Then proceed to + debug using the log, as described above. +

+ + diff --git a/www/kojiweb/Makefile b/www/kojiweb/Makefile new file mode 100644 index 00000000..4c6f4c0e --- /dev/null +++ b/www/kojiweb/Makefile @@ -0,0 +1,24 @@ +SUBDIRS = includes + +SERVERDIR = /var/www/koji-web/scripts +FILES = $(wildcard *.py *.chtml) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + for d in $(SUBDIRS); do make -s -C $$d clean; done + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/$(SERVERDIR) + install -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR) + + for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR)/$(SERVERDIR) \ + -C $$d install; [ $$? = 0 ] || exit 1; done diff --git a/www/kojiweb/buildinfo.chtml b/www/kojiweb/buildinfo.chtml new file mode 100644 index 00000000..f1d67528 --- /dev/null +++ b/www/kojiweb/buildinfo.chtml @@ -0,0 +1,103 @@ +#import koji +#import koji.util +#from kojiweb import util + +#include "includes/header.chtml" + +

Information for build $koji.buildLabel($build)

+ + + + + + + + + + + + + + + + + + + + + + + + #if $build.state == $koji.BUILD_STATES.BUILDING + #if $estCompletion + + + + #end if + #else + + + + #end if + + #set $stateName = $util.stateName($build.state) + + + + #if $task + + + + #end if + + + + + + + + + #if $changelog + + + + + #end if +
ID$build.id
Package Name$build.package_name
Version$build.version
Release$build.release
Epoch$build.epoch
Built by$build.owner_name
Started$util.formatTimeLong($build.creation_time)
Est. Completion$util.formatTimeLong($estCompletion)
Completed$util.formatTimeLong($build.completion_time)
State$stateName + #if $build.state == $koji.BUILD_STATES.BUILDING + #if $currentUser and ('admin' in $perms or $build.owner_id == $currentUser.id) + (cancel) + #end if + #end if +
Task$koji.taskLabel($task)
Tags + #if $len($tags) > 0 + + #for $tag in $tags + + + + #end for +
$tag.name
+ #else + No tags + #end if +
RPMs + #if $len($rpms) > 0 + + #for $rpm in $rpms + + #set $rpmfile = '%(name)s-%(version)s-%(release)s.%(arch)s.rpm' % $rpm + + + #end for +
$rpmfile (info) (download)
+ #else + No RPMs + #end if +
Changelog +
+#echo $util.escapeHTML($koji.util.formatChangelog($changelog))
+
+
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/buildrootinfo.chtml b/www/kojiweb/buildrootinfo.chtml new file mode 100644 index 00000000..c2ece53f --- /dev/null +++ b/www/kojiweb/buildrootinfo.chtml @@ -0,0 +1,50 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Information for buildroot $buildroot.tag_name-$buildroot.id-$buildroot.repo_id

+ + + + + + + + + + + + + + + + #set $clean = $buildroot.dirtyness and 'yes' or 'no' + + + + + + + + + + + + + + + + + + + + + + + + + + +
Host$buildroot.host_name
Arch$buildroot.arch
ID$buildroot.id
State$util.imageTag($util.brStateName($buildroot.state))
Clean?$util.imageTag($clean)
Created$util.formatTimeLong($buildroot.create_event_time)
Retired$util.formatTimeLong($buildroot.retire_event_time)
Repo ID$buildroot.repo_id
Repo Tag$buildroot.tag_name
Repo State$util.imageTag($util.repoStateName($buildroot.repo_state))
Repo Created$util.formatTimeLong($buildroot.repo_create_event_time)
Component RPMs
Built RPMs
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/builds.chtml b/www/kojiweb/builds.chtml new file mode 100644 index 00000000..6b1e0e57 --- /dev/null +++ b/www/kojiweb/builds.chtml @@ -0,0 +1,133 @@ +#import koji +#from kojiweb import util + +#def toggleLink($comp, $label, $link) + #if $comp + $label#slurp + #else + $label#slurp + #end if +#end def + +#include "includes/header.chtml" + +

#if $state != None then $util.stateName($state).capitalize() else ''# Builds#if $prefix then ' starting with "%s"' % $prefix else ''##if $user then ' by %s' % ($user.id, $user.name) else ''##if $tag then ' in tag %s' % ($tag.id, $tag.name) else ''#

+ + + #if $tag + + + + #end if + + + + + + + + + + + + + #if $tag + + #end if + + + + + #if $len($builds) > 0 + #for $build in $builds + + + + #if $tag + + #end if + + + #set $stateName = $util.stateName($build.state) + + + #end for + #else + + + + #end if + + + +
+ #if $inherited + Hide inherited builds + #else + Show inherited builds + #end if +
+ State: + +
+ #for $char in $chars + #if $prefix == $char + $char + #else + $char + #end if + | + #end for + #if $prefix + all + #else + all + #end if +
+ #if $len($buildPages) > 1 +
+ Page: + +
+ #end if + #if $buildStart > 0 + <<< + #end if + #if $totalBuilds != 0 + Builds #echo $buildStart + 1 # through #echo $buildStart + $buildCount # of $totalBuilds + #end if + #if $buildStart + $buildCount < $totalBuilds + >>> + #end if +
ID $util.sortImage($self, 'build_id')NVR $util.sortImage($self, 'nvr')Tag $util.sortImage($self, 'tag_name')Built by $util.sortImage($self, 'owner_name')Finished $util.sortImage($self, 'completion_time')State $util.sortImage($self, 'state')
$build.build_id$koji.buildLabel($build)$build.tag_name$build.owner_name$util.formatTime($build.completion_time)$util.stateImage($build.state)
No builds
+ #if $len($buildPages) > 1 +
+ Page: + +
+ #end if + #if $buildStart > 0 + <<< + #end if + #if $totalBuilds != 0 + Builds #echo $buildStart + 1 # through #echo $buildStart + $buildCount # of $totalBuilds + #end if + #if $buildStart + $buildCount < $totalBuilds + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/buildsbystatus.chtml b/www/kojiweb/buildsbystatus.chtml new file mode 100644 index 00000000..a5a18f85 --- /dev/null +++ b/www/kojiweb/buildsbystatus.chtml @@ -0,0 +1,55 @@ +#from kojiweb import util + +#def printOption(value, label=None) +#if not $label +#set $label = $value +#end if + +#end def + +#include "includes/header.chtml" + +

Succeeded/Failed/Canceled Builds#if $days != -1 then ' in the last %i days' % $days else ''#

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ Show last + days +
+
TypeBuilds 
Succeeded$numSucceeded
Failed$numFailed
Canceled$numCanceled
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/buildsbytarget.chtml b/www/kojiweb/buildsbytarget.chtml new file mode 100644 index 00000000..3605dc40 --- /dev/null +++ b/www/kojiweb/buildsbytarget.chtml @@ -0,0 +1,99 @@ +#from kojiweb import util + +#def printOption(value, label=None) +#if not $label +#set $label = $value +#end if + +#end def + +#include "includes/header.chtml" + +

Builds by Target#if $days != -1 then ' in the last %i days' % $days else ''#

+ + + + + + + + + + + + + #if $len($targets) > 0 + #for $target in $targets + + + + + + #end for + #else + + + + #end if + + + +
+
+ Show last + days +
+
+ #if $len($targetPages) > 1 +
+ Page: + +
+ #end if + #if $targetStart > 0 + <<< + #end if + #if $totalTargets != 0 + Build Targets #echo $targetStart + 1 # through #echo $targetStart + $targetCount # of $totalTargets + #end if + #if $targetStart + $targetCount < $totalTargets + >>> + #end if +
Name $util.sortImage($self, 'name')Builds $util.sortImage($self, 'builds') 
$target.name$target.builds
No builds
+ #if $len($targetPages) > 1 +
+ Page: + +
+ #end if + #if $targetStart > 0 + <<< + #end if + #if $totalTargets != 0 + Build Targets #echo $targetStart + 1 # through #echo $targetStart + $targetCount # of $totalTargets + #end if + #if $targetStart + $targetCount < $totalTargets + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/buildsbyuser.chtml b/www/kojiweb/buildsbyuser.chtml new file mode 100644 index 00000000..f1394f9f --- /dev/null +++ b/www/kojiweb/buildsbyuser.chtml @@ -0,0 +1,73 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Builds by User

+ + + + + + + + + + #if $len($userBuilds) > 0 + #for $userBuild in $userBuilds + + + + + + #end for + #else + + + + #end if + + + +
+ #if $len($userBuildPages) > 1 +
+ Page: + +
+ #end if + #if $userBuildStart > 0 + <<< + #end if + #if $totalUserBuilds != 0 + Users #echo $userBuildStart + 1 # through #echo $userBuildStart + $userBuildCount # of $totalUserBuilds + #end if + #if $userBuildStart + $userBuildCount < $totalUserBuilds + >>> + #end if +
Name $util.sortImage($self, 'name')Builds $util.sortImage($self, 'builds') 
$userBuild.name$userBuild.builds
No users
+ #if $len($userBuildPages) > 1 +
+ Page: + +
+ #end if + #if $userBuildStart > 0 + <<< + #end if + #if $totalUserBuilds != 0 + Users #echo $userBuildStart + 1 # through #echo $userBuildStart + $userBuildCount # of $totalUserBuilds + #end if + #if $userBuildStart + $userBuildCount < $totalUserBuilds + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/buildtargetedit.chtml b/www/kojiweb/buildtargetedit.chtml new file mode 100644 index 00000000..9a62cf84 --- /dev/null +++ b/www/kojiweb/buildtargetedit.chtml @@ -0,0 +1,61 @@ +#from kojiweb import util + +#include "includes/header.chtml" + + #if $target +

Edit target $target.name

+ #else +

Create build target

+ #end if + +
+ #if $target + + #end if + + + + + + #if $target + + + + #end if + + + + + + + + + + + + +
Name + +
ID$target.id
Build Tag + +
Destination Tag + +
+ #if $target + + #else + + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/buildtargetinfo.chtml b/www/kojiweb/buildtargetinfo.chtml new file mode 100644 index 00000000..e1275510 --- /dev/null +++ b/www/kojiweb/buildtargetinfo.chtml @@ -0,0 +1,30 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Information for target $target.name

+ + + + + + + + + + + + + + + #if 'admin' in $perms + + + + + + + #end if +
Name$target.name
ID$target.id
Build Tag$buildTag.name
Destination Tag$destTag.name
Edit
Delete
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/buildtargets.chtml b/www/kojiweb/buildtargets.chtml new file mode 100644 index 00000000..cb4041bc --- /dev/null +++ b/www/kojiweb/buildtargets.chtml @@ -0,0 +1,76 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Build Targets

+ + + + + + + + + #if $len($targets) > 0 + #for $target in $targets + + + + + #end for + #else + + + + #end if + + + +
+ #if $len($targetPages) > 1 + + Page: + + + #end if + #if $targetStart > 0 + <<< + #end if + #if $totalTargets != 0 + Targets #echo $targetStart + 1 # through #echo $targetStart + $targetCount # of $totalTargets + #end if + #if $targetStart + $targetCount < $totalTargets + >>> + #end if +
ID $util.sortImage($self, 'id')Name $util.sortImage($self, 'name')
$target.id$target.name
No build targets
+ #if $len($targetPages) > 1 +
+ Page: + +
+ #end if + #if $targetStart > 0 + <<< + #end if + #if $totalTargets != 0 + Targets #echo $targetStart + 1 # through #echo $targetStart + $targetCount # of $totalTargets + #end if + #if $targetStart + $targetCount < $totalTargets + >>> + #end if +
+ + #if 'admin' in $perms +
+ Create new Build Target + #end if + +#include "includes/footer.chtml" diff --git a/www/kojiweb/channelinfo.chtml b/www/kojiweb/channelinfo.chtml new file mode 100644 index 00000000..75ac09cf --- /dev/null +++ b/www/kojiweb/channelinfo.chtml @@ -0,0 +1,28 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Information for channel $channel.name

+ + + + + + + + + + + + +
Name$channel.name
ID$channel.id
Hosts + #if $len($hosts) > 0 + #for $host in $hosts + $host.name
+ #end for + #else + No hosts + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/fileinfo.chtml b/www/kojiweb/fileinfo.chtml new file mode 100644 index 00000000..dcc1aea1 --- /dev/null +++ b/www/kojiweb/fileinfo.chtml @@ -0,0 +1,30 @@ +#from kojiweb import util + +#include "includes/header.chtml" +

Information for file $file.name

+ + + + + + + + + + + + + + + + + #set $epoch = ($rpm.epoch != None and $str($rpm.epoch) + ':' or '') + + +
Name$file.name
MD5 Sum$file.md5
Size$file.size
Flags + #for flag in $util.formatFileFlags($file.flags) + $flag
+ #end for +
RPM$rpm.name-$epoch$rpm.version-$rpm.release.${rpm.arch}.rpm
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/hostinfo.chtml b/www/kojiweb/hostinfo.chtml new file mode 100644 index 00000000..e1a198ad --- /dev/null +++ b/www/kojiweb/hostinfo.chtml @@ -0,0 +1,75 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Information for host $host.name

+ + + + + + + + + + + + + + + + + + + #set $enabled = $host.enabled and 'yes' or 'no' + + + + + #set $ready = $host.ready and 'yes' or 'no' + + + + + + + + + + + + + +
Name$host.name
ID$host.id
Arches$host.arches
Capacity$host.capacity
Task Load$host.task_load
Enabled? + $util.imageTag($enabled) + #if 'admin' in $perms + #if $host.enabled + (disable) + #else + (enable) + #end if + #end if +
Ready?$util.imageTag($ready)
Last Update$util.formatTime($lastUpdate)
Channels + #for $channel in $channels + $channel.name
+ #end for +
Active Buildroots + #if $len($buildroots) > 0 + + + + + #for $buildroot in $buildroots + + + + + + #end for +
BuildrootCreatedState
$buildroot.tag_name-$buildroot.id-$buildroot.repo_id$util.formatTime($buildroot.create_event_time)$util.imageTag($util.brStateName($buildroot.state))
+ #else + No buildroots + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/hosts.chtml b/www/kojiweb/hosts.chtml new file mode 100644 index 00000000..5e58726b --- /dev/null +++ b/www/kojiweb/hosts.chtml @@ -0,0 +1,79 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Hosts

+ + + + + + + + + + + + + #if $len($hosts) > 0 + #for $host in $hosts + + + + + + + + + #end for + #else + + + + #end if + + + +
+ #if $len($hostPages) > 1 +
+ Page: + +
+ #end if + #if $hostStart > 0 + <<< + #end if + #if $totalHosts != 0 + Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts + #end if + #if $hostStart + $hostCount < $totalHosts + >>> + #end if +
ID $util.sortImage($self, 'id')Name $util.sortImage($self, 'name')ArchesEnabled? $util.sortImage($self, 'enabled')Ready? $util.sortImage($self, 'ready')Last Update $util.sortImage($self, 'last_update')
$host.id$host.name$host.arches#if $host.enabled then $util.imageTag('yes') else $util.imageTag('no')##if $host.ready then $util.imageTag('yes') else $util.imageTag('no')#$util.formatTime($host.last_update)
No hosts
+ #if $len($hostPages) > 1 +
+ Page: + +
+ #end if + #if $hostStart > 0 + <<< + #end if + #if $totalHosts != 0 + Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts + #end if + #if $hostStart + $hostCount < $totalHosts + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/includes/Makefile b/www/kojiweb/includes/Makefile new file mode 100644 index 00000000..d3e15100 --- /dev/null +++ b/www/kojiweb/includes/Makefile @@ -0,0 +1,18 @@ +SERVERDIR = /includes +FILES = $(wildcard *.chtml) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/$(SERVERDIR) + install -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR) diff --git a/www/kojiweb/includes/footer.chtml b/www/kojiweb/includes/footer.chtml new file mode 100644 index 00000000..f2686bd4 --- /dev/null +++ b/www/kojiweb/includes/footer.chtml @@ -0,0 +1,11 @@ + + + + + + + + + diff --git a/www/kojiweb/includes/header.chtml b/www/kojiweb/includes/header.chtml new file mode 100644 index 00000000..5441ed7b --- /dev/null +++ b/www/kojiweb/includes/header.chtml @@ -0,0 +1,71 @@ +#import koji +#import random + + +#def greeting() +#set $greetings = ('hello', 'hi', 'yo', "what's up", "g'day", 'back to work', + 'bonjour', + 'hallo', + 'ciao', + 'hola', + 'olá', + 'dobrý den', + 'zdravstvuite', + 'góðan daginn', + 'hej', + 'grüezi', + 'céad míle fáilte', + 'hylô', + '你好', + 'こんにちは', + 'नमस्कार', + '안녕하세요') +#echo $random.choice($greetings)##slurp +#end def + + + + + $title | Koji + + + + + + +
+
+ + + + + + + + + $koji.formatTimeLong($currentDate) | + #if $currentUser + $greeting(), $currentUser.name | logout + #else + login + #end if + + +
diff --git a/www/kojiweb/index.chtml b/www/kojiweb/index.chtml new file mode 100644 index 00000000..1cb08844 --- /dev/null +++ b/www/kojiweb/index.chtml @@ -0,0 +1,188 @@ +#import koji +#from kojiweb import util + +#include "includes/header.chtml" + + + +
#if $user then 'Your ' else ''#Recent Builds
+ + + + + + + + + + #for $build in $builds + + #set $stateName = $util.stateName($build.state) + + + + + #end for + #if $totalBuilds == 0 + + + + #end if +
+ #if $len($buildPages) > 1 +
+ Page: + +
+ #end if + #if $buildStart > 0 + <<< + #end if + #if $totalBuilds != 0 + Build #echo $buildStart + 1 # through #echo $buildStart + $buildCount # of $totalBuilds + #end if + #if $buildStart + $buildCount < $totalBuilds + >>> + #end if +
NVR $util.sortImage($self, 'nvr', 'buildOrder')Finished $util.sortImage($self, 'completion_time', 'buildOrder')State $util.sortImage($self, 'state', 'buildOrder')
$build.nvr$util.formatTime($build.completion_time)$util.stateImage($build.state)
No builds
+ +
+ +
#if $user then 'Your ' else ''#Recent Tasks
+ + + + + + + + + + + + #for $task in $tasks + + #set $state = $util.taskState($task.state) + + + + + + + #end for + #if $totalTasks == 0 + + + + #end if +
+ #if $len($taskPages) > 1 +
+ Page: + +
+ #end if + #if $taskStart > 0 + <<< + #end if + #if $totalTasks != 0 + Tasks #echo $taskStart + 1 # through #echo $taskStart + $taskCount # of $totalTasks + #end if + #if $taskStart + $taskCount < $totalTasks + >>> + #end if +
ID $util.sortImage($self, 'id', 'taskOrder')Type $util.sortImage($self, 'method', 'taskOrder')Arch $util.sortImage($self, 'arch', 'taskOrder')Finished $util.sortImage($self, 'completion_time', 'taskOrder')State $util.sortImage($self, 'state', 'taskOrder')
$task.id$koji.taskLabel($task)$task.arch$util.formatTime($task.completion_time)$util.imageTag($state)
No tasks
+ + #if $user +
+ +
Your Packages
+ + + + + + + + + + #for $package in $packages + + + + #set $included = $package.blocked and 'no' or 'yes' + + + #end for + #if $totalPackages == 0 + + + + #end if +
+ #if $len($packagePages) > 1 +
+ Page: + +
+ #end if + #if $packageStart > 0 + <<< + #end if + #if $totalPackages != 0 + Package #echo $packageStart + 1 # through #echo $packageStart + $packageCount # of $totalPackages + #end if + #if $packageStart + $packageCount < $totalPackages + >>> + #end if +
Name $util.sortImage($self, 'package_name', 'packageOrder')Tag $util.sortImage($self, 'tag_name', 'packageOrder')Included? $util.sortImage($self, 'blocked', 'packageOrder')
$package.package_name$package.tag_name$util.imageTag($included)
No packages
+ +
+ +
Your Notifications
+ + + + + + + + + + + + + #for $notif in $notifs + + + + + + + + + #end for + #if $len($notifs) == 0 + + + + #end if +
PackageTagTypeEmail
#if $notif.package then $notif.package.name else 'all'##if $notif.tag then $notif.tag.name else 'all'##if $notif.success_only then 'success only' else 'all'#$notif.emaileditdelete
No notifications
+ +
+ Add a Notification + #end if + +#include "includes/footer.chtml" diff --git a/www/kojiweb/index.py b/www/kojiweb/index.py new file mode 100644 index 00000000..1e111426 --- /dev/null +++ b/www/kojiweb/index.py @@ -0,0 +1,1554 @@ +import os +import os.path +import re +import sys +import mod_python +import mod_python.Cookie +import Cheetah.Template +import datetime +import time +import koji +import kojiweb.util + +# Convenience definition of a commonly-used sort function +_sortbyname = kojiweb.util.sortByKeyFunc('name') + +def _setUserCookie(req, user): + options = req.get_options() + cookie = mod_python.Cookie.SignedCookie('user', user, + secret=options['Secret'], + path=os.path.dirname(req.uri), + expires=(time.time() + (int(options['LoginTimeout']) * 60 * 60))) + mod_python.Cookie.add_cookie(req, cookie) + +def _clearUserCookie(req): + cookie = mod_python.Cookie.Cookie('user', '', + path=os.path.dirname(req.uri), + expires=0) + mod_python.Cookie.add_cookie(req, cookie) + +def _getUserCookie(req): + options = req.get_options() + cookies = mod_python.Cookie.get_cookies(req, + mod_python.Cookie.SignedCookie, + secret=options['Secret']) + if cookies.has_key('user') and \ + (type(cookies['user']) is mod_python.Cookie.SignedCookie): + return cookies['user'].value + else: + return None + +def _krbLogin(req, session, principal): + options = req.get_options() + wprinc = options['WebPrincipal'] + keytab = options['WebKeytab'] + ccache = options['WebCCache'] + return session.krb_login(principal=wprinc, keytab=keytab, + ccache=ccache, proxyuser=principal) + +def _assertLogin(req): + if not (hasattr(req, 'currentPrincipal') and + hasattr(req, 'currentUser')): + raise StandardError, '_getServer() must be called before _assertLogin()' + elif req.currentPrincipal and req.currentUser: + if not _krbLogin(req, req._session, req.currentPrincipal): + raise koji.AuthError, 'could not login using principal: %s' % req.currentPrincipal + else: + mod_python.util.redirect(req, 'login') + assert False + +def _initValues(req, title='Build System Info', pageID='summary'): + values = {} + values['title'] = title + values['pageID'] = pageID + values['currentDate'] = str(datetime.datetime.now()) + + req._values = values + + return values + +def _genHTML(req, fileName): + os.chdir(os.path.dirname(req.filename)) + + if hasattr(req, 'currentUser'): + req._values['currentUser'] = req.currentUser + else: + req._values['currentUser'] = None + + return str(Cheetah.Template.Template(file=fileName, searchList=[req._values])) + +def _getServer(req): + serverURL = req.get_options()['KojiHubURL'] + session = koji.ClientSession(serverURL) + + req.currentPrincipal = _getUserCookie(req) + if req.currentPrincipal: + req.currentUser = session.getUser(req.currentPrincipal) + if not req.currentUser: + raise koji.AuthError, 'could not get user for principal: %s' % req.currentPrincipal + _setUserCookie(req, req.currentPrincipal) + else: + req.currentUser = None + + req._session = session + return session + +def _redirectBack(req, page): + if page: + mod_python.util.redirect(req, page) + elif req.headers_in.get('Referer'): + mod_python.util.redirect(req, req.headers_in.get('Referer')) + else: + mod_python.util.redirect(req, 'index') + +def login(req, page=None): + session = _getServer(req) + + principal = req.user + if not principal: + raise koji.AuthError, 'configuration error: an external module should have performed authentication before presenting this page' + + # login via Kerberos to verify credentials and create the user if it doesn't exist + if not _krbLogin(req, session, principal): + raise koji.AuthError, 'could not login using principal: %s' % principal + + _setUserCookie(req, principal) + + _redirectBack(req, page) + +def logout(req, page=None): + _clearUserCookie(req) + + _redirectBack(req, page) + +def index(req, packageOrder='package_name', packageStart=None, buildOrder='-completion_time', buildStart=None, taskOrder='-completion_time', taskStart=None): + values = _initValues(req) + server = _getServer(req) + + user = req.currentUser + + builds = kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'userID': (user and user['id'] or None)}, + start=buildStart, dataName='builds', prefix='build', order=buildOrder, pageSize=10) + + taskOpts = {'parent': None, 'decode': True} + if user: + taskOpts['owner'] = user['id'] + tasks = kojiweb.util.paginateMethod(server, values, 'listTasks', kw={'opts': taskOpts}, + start=taskStart, dataName='tasks', prefix='task', order=taskOrder, pageSize=10) + + if user: + packages = kojiweb.util.paginateResults(server, values, 'listPackages', kw={'userID': user['id']}, + start=packageStart, dataName='packages', prefix='package', order=packageOrder, pageSize=10) + + notifs = server.getBuildNotifications(user['id']) + notifs.sort(kojiweb.util.sortByKeyFunc('id')) + # XXX Make this a multicall + for notif in notifs: + notif['package'] = None + if notif['package_id']: + notif['package'] = server.getPackage(notif['package_id']) + + notif['tag'] = None + if notif['tag_id']: + notif['tag'] = server.getTag(notif['tag_id']) + values['notifs'] = notifs + + values['user'] = user + + return _genHTML(req, 'index.chtml') + +def notificationedit(req, notificationID): + server = _getServer(req) + _assertLogin(req) + + notificationID = int(notificationID) + notification = server.getBuildNotification(notificationID) + if notification == None: + raise koji.GenericError, 'no notification with ID: %i' % notificationID + + form = req.form + + if form.has_key('save'): + package_id = form['package'] + if package_id == 'all': + package_id = None + else: + package_id = int(package_id) + + tag_id = form['tag'] + if tag_id == 'all': + tag_id = None + else: + tag_id = int(tag_id) + + if form.has_key('success_only'): + success_only = True + else: + success_only = False + + email = form['email'].value + if not email: + raise koji.GenericError, 'an email address for the notification must be provided' + + server.updateNotification(notification['id'], package_id, tag_id, success_only, email) + + mod_python.util.redirect(req, 'index') + elif form.has_key('cancel'): + mod_python.util.redirect(req, 'index') + else: + values = _initValues(req, 'Edit Notification') + + values['notif'] = notification + packages = server.listPackages() + packages.sort(kojiweb.util.sortByKeyFunc('package_name')) + values['packages'] = packages + tags = server.listTags(queryOpts={'order': 'name'}) + values['tags'] = tags + + return _genHTML(req, 'notificationedit.chtml') + +def notificationcreate(req): + server = _getServer(req) + _assertLogin(req) + + form = req.form + + if form.has_key('add'): + user = req.currentUser + if not user: + raise koji.GenericError, 'not logged-in' + + package_id = form['package'] + if package_id == 'all': + package_id = None + else: + package_id = int(package_id) + + tag_id = form['tag'] + if tag_id == 'all': + tag_id = None + else: + tag_id = int(tag_id) + + if form.has_key('success_only'): + success_only = True + else: + success_only = False + + email = form['email'].value + if not email: + raise koji.GenericError, 'an email address for the notification must be provided' + + server.createNotification(user['id'], package_id, tag_id, success_only, email) + + mod_python.util.redirect(req, 'index') + elif form.has_key('cancel'): + mod_python.util.redirect(req, 'index') + else: + values = _initValues(req, 'Edit Notification') + + values['notif'] = None + packages = server.listPackages() + packages.sort(kojiweb.util.sortByKeyFunc('package_name')) + values['packages'] = packages + tags = server.listTags(queryOpts={'order': 'name'}) + values['tags'] = tags + + return _genHTML(req, 'notificationedit.chtml') + +def notificationdelete(req, notificationID): + server = _getServer(req) + _assertLogin(req) + + notificationID = int(notificationID) + notification = server.getBuildNotification(notificationID) + if not notification: + raise koji.GenericError, 'no notification with ID: %i' % notificationID + + server.deleteNotification(notification['id']) + + mod_python.util.redirect(req, 'index') + +def hello(req): + return _getServer(req).hello() + +def showSession(req): + return _getServer(req).showSession() + +def tasks(req, owner=None, state='active', method='all', hostID=None, start=None, order='-completion_time'): + values = _initValues(req, 'Tasks', 'tasks') + server = _getServer(req) + + opts = {'decode': True} + if owner: + if owner.isdigit(): + owner = int(owner) + ownerObj = server.getUser(owner, strict=True) + opts['owner'] = ownerObj['id'] + values['owner'] = ownerObj['name'] + values['ownerObj'] = ownerObj + else: + values['owner'] = None + values['ownerObj'] = None + + values['users'] = server.listUsers(queryOpts={'order': 'name'}) + + if state == 'active' and method == 'all' and not hostID: + # If we're only showing active tasks, and not filtering by host or method, only query the top-level tasks as well, + # and then retrieve the task children so we can do the nice tree display. + treeDisplay = True + else: + treeDisplay = False + values['treeDisplay'] = treeDisplay + + if method != 'all': + opts['method'] = method + values['method'] = method + + if state == 'active': + opts['state'] = [koji.TASK_STATES['FREE'], koji.TASK_STATES['OPEN'], koji.TASK_STATES['ASSIGNED']] + if treeDisplay: + opts['parent'] = None + elif state == 'toplevel': + # Show all top-level tasks, no tree display + opts['parent'] = None + elif state == 'all': + pass + else: + # Assume they've passed in a state name + opts['state'] = [koji.TASK_STATES[state.upper()]] + values['state'] = state + + if hostID: + hostID = int(hostID) + host = server.getHost(hostID, strict=True) + opts['host_id'] = host['id'] + values['host'] = host + values['hostID'] = host['id'] + else: + values['host'] = None + values['hostID'] = None + + loggedInUser = req.currentUser + values['loggedInUser'] = loggedInUser + + values['order'] = order + + tasks = kojiweb.util.paginateMethod(server, values, 'listTasks', kw={'opts': opts}, + start=start, dataName='tasks', prefix='task', order=order) + + if treeDisplay: + server.multicall = True + for task in tasks: + server.getTaskDescendents(task['id'], request=True) + descendentList = server.multiCall() + for task, [descendents] in zip(tasks, descendentList): + task['descendents'] = descendents + + return _genHTML(req, 'tasks.chtml') + +def taskinfo(req, taskID): + server = _getServer(req) + values = _initValues(req, 'Task Info', 'tasks') + + taskID = int(taskID) + task = server.getTaskInfo(taskID, request=True) + if not task: + raise koji.GenericError, 'invalid task ID: %s' % taskID + + values['task'] = task + params = task['request'] + values['params'] = params + + if task['channel_id']: + channel = server.getChannel(task['channel_id']) + values['channelName'] = channel['name'] + else: + values['channelName'] = None + if task['host_id']: + host = server.getHost(task['host_id']) + values['hostName'] = host['name'] + else: + values['hostName'] = None + if task['owner']: + owner = server.getUser(task['owner']) + values['owner'] = owner + else: + values['owner'] = None + if task['parent']: + parent = server.getTaskInfo(task['parent'], request=True) + values['parent'] = parent + else: + values['parent'] = None + + descendents = server.getTaskDescendents(task['id'], request=True) + values['descendents'] = descendents + + builds = server.listBuilds(taskID=task['id']) + if builds: + values['taskBuild'] = builds[0] + else: + values['taskBuild'] = None + + buildroots = server.listBuildroots(taskID=task['id']) + values['buildroots'] = buildroots + + if task['method'] == 'buildArch': + buildTag = server.getTag(params[1]) + values['buildTag'] = buildTag + elif task['method'] == 'tagBuild': + destTag = server.getTag(params[0]) + build = server.getBuild(params[1]) + values['destTag'] = destTag + values['build'] = build + elif task['method'] == 'newRepo': + tag = server.getTag(params[0]) + values['tag'] = tag + elif task['method'] == 'tagNotification': + destTag = None + if params[2]: + destTag = server.getTag(params[2]) + srcTag = None + if params[3]: + srcTag = server.getTag(params[3]) + build = server.getBuild(params[4]) + user = server.getUser(params[5]) + values['destTag'] = destTag + values['srcTag'] = srcTag + values['build'] = build + values['user'] = user + elif task['method'] == 'dependantTask': + deps = [server.getTaskInfo(depID, request=True) for depID in params[0]] + values['deps'] = deps + + if task['state'] in (koji.TASK_STATES['CLOSED'], koji.TASK_STATES['FAILED']): + try: + result = server.getTaskResult(task['id']) + values['result'] = result + values['excClass'] = None + except: + excClass, exc = sys.exc_info()[:2] + values['result'] = exc + values['excClass'] = excClass + # clear the exception, since we're just using + # it for display purposes + sys.exc_clear() + else: + values['result'] = None + values['excClass'] = None + + output = server.listTaskOutput(task['id']) + output.sort(_sortByExtAndName) + values['output'] = output + if req.currentUser: + values['perms'] = server.getUserPerms(req.currentUser['id']) + else: + values['perms'] = [] + + return _genHTML(req, 'taskinfo.chtml') + +def resubmittask(req, taskID): + server = _getServer(req) + _assertLogin(req) + + taskID = int(taskID) + newTaskID = server.resubmitTask(taskID) + mod_python.util.redirect(req, 'taskinfo?taskID=%i' % newTaskID) + +def canceltask(req, taskID): + server = _getServer(req) + _assertLogin(req) + + taskID = int(taskID) + server.cancelTask(taskID) + mod_python.util.redirect(req, 'taskinfo?taskID=%i' % taskID) + +def _sortByExtAndName(a, b): + """Sort two filenames, first by extension, and then by name.""" + aRoot, aExt = os.path.splitext(a) + bRoot, bExt = os.path.splitext(b) + return cmp(aExt, bExt) or cmp(aRoot, bRoot) + +def getfile(req, taskID, name): + server = _getServer(req) + taskID = int(taskID) + + if name.endswith('.rpm'): + req.content_type = 'application/x-rpm' + req.headers_out['Content-Disposition'] = 'attachment; filename=%s' % name + elif name.endswith('.log'): + req.content_type = 'text/plain' + + return server.downloadTaskOutput(taskID, name) + +def tags(req, start=None, order=None, childID=None): + values = _initValues(req, 'Tags', 'tags') + server = _getServer(req) + + if order == None: + order = 'name' + values['order'] = order + + tags = kojiweb.util.paginateMethod(server, values, 'listTags', kw=None, + start=start, dataName='tags', prefix='tag', order=order) + + if req.currentUser: + values['perms'] = server.getUserPerms(req.currentUser['id']) + else: + values['perms'] = [] + + values['childID'] = childID + + return _genHTML(req, 'tags.chtml') + +def packages(req, tagID=None, userID=None, order='package_name', start=None, prefix=None, inherited='1'): + values = _initValues(req, 'Packages', 'packages') + server = _getServer(req) + tag = None + if tagID != None: + tagID = int(tagID) + tag = server.getTag(tagID) + values['tagID'] = tagID + values['tag'] = tag + user = None + if userID != None: + userID = int(userID) + user = server.getUser(userID) + values['userID'] = userID + values['user'] = user + values['order'] = order + if prefix: + prefix = prefix.lower() + values['prefix'] = prefix + inherited = int(inherited) + values['inherited'] = inherited + + packages = kojiweb.util.paginateResults(server, values, 'listPackages', + kw={'tagID': tagID, 'userID': userID, 'prefix': prefix, 'inherited': bool(inherited)}, + start=start, dataName='packages', prefix='package', order=order) + + values['chars'] = [chr(char) for char in range(48, 58) + range(97, 123)] + + return _genHTML(req, 'packages.chtml') + +def packageinfo(req, packageID, tagOrder='name', tagStart=None, buildOrder='-completion_time', buildStart=None): + values = _initValues(req, 'Package Info', 'packages') + server = _getServer(req) + + if packageID.isdigit(): + packageID = int(packageID) + package = server.getPackage(packageID) + if package == None: + raise koji.GenericError, 'invalid package ID: %s' % packageID + values['package'] = package + values['packageID'] = package['id'] + + tags = kojiweb.util.paginateMethod(server, values, 'listTags', kw={'package': package['id']}, + start=tagStart, dataName='tags', prefix='tag', order=tagOrder) + builds = kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'packageID': package['id']}, + start=buildStart, dataName='builds', prefix='build', order=buildOrder) + + return _genHTML(req, 'packageinfo.chtml') + +def taginfo(req, tagID, all='0', packageOrder='package_name', packageStart=None, buildOrder='-completion_time', buildStart=None, childID=None): + values = _initValues(req, 'Tag Info', 'tags') + server = _getServer(req) + + if tagID.isdigit(): + tagID = int(tagID) + tag = server.getTag(tagID, strict=True) + + all = int(all) + + numPackages = server.count('listPackages', tagID=tag['id'], inherited=True) + numBuilds = server.count('listTagged', tag=tag['id'], inherit=True) + values['numPackages'] = numPackages + values['numBuilds'] = numBuilds + + inheritance = server.getFullInheritance(tag['id']) + tagsByChild = {} + for parent in inheritance: + child_id = parent['child_id'] + if not tagsByChild.has_key(child_id): + tagsByChild[child_id] = [] + tagsByChild[child_id].append(child_id) + + srcTargets = server.getBuildTargets(buildTagID=tag['id']) + srcTargets.sort(_sortbyname) + destTargets = server.getBuildTargets(destTagID=tag['id']) + destTargets.sort(_sortbyname) + + values['tag'] = tag + values['tagID'] = tag['id'] + values['inheritance'] = inheritance + values['tagsByChild'] = tagsByChild + values['srcTargets'] = srcTargets + values['destTargets'] = destTargets + values['all'] = all + values['repo'] = server.getRepo(tag['id'], state=koji.REPO_READY) + + child = None + if childID != None: + child = server.getTag(int(childID), strict=True) + values['child'] = child + + if req.currentUser: + values['perms'] = server.getUserPerms(req.currentUser['id']) + else: + values['perms'] = [] + permList = server.getAllPerms() + allPerms = dict([(perm['id'], perm['name']) for perm in permList]) + values['allPerms'] = allPerms + + return _genHTML(req, 'taginfo.chtml') + +def tagcreate(req): + server = _getServer(req) + _assertLogin(req) + + form = req.form + + if form.has_key('add'): + name = form['name'].value + arches = form['arches'].value + if form.has_key('locked'): + locked = True + else: + locked = False + permission = form['permission'].value + if permission == 'none': + permission = None + else: + permission = int(permission) + + server.createTag(name) + tag = server.getTag(name) + + if tag == None: + raise koji.GenericError, 'error creating tag "%s"' % name + + server.editTag(tag['id'], name, arches, locked, permission) + + mod_python.util.redirect(req, 'taginfo?tagID=%i' % tag['id']) + elif form.has_key('cancel'): + mod_python.util.redirect(req, 'tags') + else: + values = _initValues(req, 'Add Tag', 'tags') + + values['tag'] = None + values['permissions'] = server.getAllPerms() + + return _genHTML(req, 'tagedit.chtml') + +def tagedit(req, tagID): + server = _getServer(req) + _assertLogin(req) + + tagID = int(tagID) + tag = server.getTag(tagID) + if tag == None: + raise koji.GenericError, 'no tag with ID: %i' % tagID + + form = req.form + + if form.has_key('save'): + name = form['name'].value + arches = form['arches'].value + locked = bool(form.has_key('locked')) + permission = form['permission'].value + if permission == 'none': + permission = None + else: + permission = int(permission) + + server.editTag(tag['id'], name, arches, locked, permission) + + mod_python.util.redirect(req, 'taginfo?tagID=%i' % tag['id']) + elif form.has_key('cancel'): + mod_python.util.redirect(req, 'taginfo?tagID=%i' % tag['id']) + else: + values = _initValues(req, 'Edit Tag', 'tags') + + values['tag'] = tag + values['permissions'] = server.getAllPerms() + + return _genHTML(req, 'tagedit.chtml') + +def tagdelete(req, tagID): + server = _getServer(req) + _assertLogin(req) + + tagID = int(tagID) + tag = server.getTag(tagID) + if tag == None: + raise koji.GenericError, 'no tag with ID: %i' % tagID + + server.deleteTag(tag['id']) + + mod_python.util.redirect(req, 'tags') + +def tagparent(req, tagID, parentID, action): + server = _getServer(req) + _assertLogin(req) + + tag = server.getTag(int(tagID), strict=True) + parent = server.getTag(int(parentID), strict=True) + + if action in ('add', 'edit'): + form = req.form + + if form.has_key('add') or form.has_key('save'): + newDatum = {} + newDatum['parent_id'] = parent['id'] + newDatum['priority'] = int(form['priority']) + maxdepth = form['maxdepth'] + maxdepth = len(maxdepth) > 0 and int(maxdepth) or None + newDatum['maxdepth'] = maxdepth + newDatum['intransitive'] = bool(form.has_key('intransitive')) + newDatum['noconfig'] = bool(form.has_key('noconfig')) + newDatum['pkg_filter'] = form['pkg_filter'].value + + data = server.getInheritanceData(tag['id']) + data.append(newDatum) + + server.setInheritanceData(tag['id'], data) + elif form.has_key('cancel'): + pass + else: + values = _initValues(req, action.capitalize() + ' Parent Tag', 'tags') + values['tag'] = tag + values['parent'] = parent + + inheritanceData = server.getInheritanceData(tag['id']) + values['numParents'] = len(inheritanceData) + inheritanceData = [datum for datum in inheritanceData \ + if datum['parent_id'] == parent['id']] + if len(inheritanceData) == 0: + values['inheritanceData'] = None + elif len(inheritanceData) == 1: + values['inheritanceData'] = inheritanceData[0] + else: + raise koji.GenericError, 'tag %i has tag %i listed as a parent more than once' % (tag['id'], parent['id']) + + return _genHTML(req, 'tagparent.chtml') + elif action == 'remove': + data = server.getInheritanceData(tag['id']) + for datum in data: + if datum['parent_id'] == parent['id']: + datum['delete link'] = True + break + else: + raise koji.GenericError, 'tag %i is not a parent of tag %i' % (parent['id'], tag['id']) + + server.setInheritanceData(tag['id'], data) + else: + raise koji.GenericError, 'unknown action: %s' % action + + mod_python.util.redirect(req, 'taginfo?tagID=%i' % tag['id']) + +def buildinfo(req, buildID): + values = _initValues(req, 'Build Info', 'builds') + server = _getServer(req) + + buildID = int(buildID) + + build = server.getBuild(buildID) + tags = server.listTags(build['id']) + tags.sort(_sortbyname) + rpms = server.listBuildRPMs(build['id']) + rpms.sort(_sortbyname) + + if build['task_id']: + task = server.getTaskInfo(build['task_id'], request=True) + else: + task = None + + values['build'] = build + values['tags'] = tags + values['rpms'] = rpms + values['task'] = task + if req.currentUser: + values['perms'] = server.getUserPerms(req.currentUser['id']) + else: + values['perms'] = [] + values['changelog'] = server.getChangelogEntries(build['id']) + if build['state'] == koji.BUILD_STATES['BUILDING']: + avgDuration = server.getAverageBuildDuration(build['package_id']) + if avgDuration != None: + avgDelta = datetime.timedelta(seconds=avgDuration) + startTime = datetime.datetime.fromtimestamp( + time.mktime(time.strptime(koji.formatTime(build['creation_time']), '%Y-%m-%d %H:%M:%S')) + ) + values['estCompletion'] = startTime + avgDelta + else: + values['estCompletion'] = None + + values['downloadBase'] = req.get_options()['KojiPackagesURL'] + + return _genHTML(req, 'buildinfo.chtml') + +def builds(req, userID=None, tagID=None, state=None, order='-completion_time', start=None, prefix=None, inherited='1'): + values = _initValues(req, 'Builds', 'builds') + server = _getServer(req) + + user = None + if userID != None: + userID = int(userID) + user = server.getUser(userID) + values['userID'] = userID + values['user'] = user + + tag = None + if tagID != None: + tagID = int(tagID) + tag = server.getTag(tagID) + values['tagID'] = tagID + values['tag'] = tag + + if state == 'all': + state = None + elif state != None: + state = int(state) + values['state'] = state + + if prefix: + prefix = prefix.lower() + values['prefix'] = prefix + + values['order'] = order + inherited = int(inherited) + values['inherited'] = inherited + + if tagID != None: + # don't need to consider 'state' here, since only completed builds would be tagged + builds = kojiweb.util.paginateResults(server, values, 'listTagged', kw={'tag': tagID, 'inherit': bool(inherited), 'prefix': prefix}, + start=start, dataName='builds', prefix='build', order=order) + else: + builds = kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'userID': userID, 'state': state, 'prefix': prefix}, + start=start, dataName='builds', prefix='build', order=order) + + values['chars'] = [chr(char) for char in range(48, 58) + range(97, 123)] + + return _genHTML(req, 'builds.chtml') + +def users(req, order='name', start=None): + values = _initValues(req, 'Users', 'users') + server = _getServer(req) + + values['order'] = order + + users = kojiweb.util.paginateMethod(server, values, 'listUsers', + start=start, dataName='users', prefix='user', order=order) + + return _genHTML(req, 'users.chtml') + +def userinfo(req, userID, packageOrder='package_name', packageStart=None, buildOrder='-completion_time', buildStart=None): + values = _initValues(req, 'User Info', 'users') + server = _getServer(req) + + userID = int(userID) + user = server.getUser(userID) + + values['user'] = user + values['userID'] = user['id'] + + packages = kojiweb.util.paginateResults(server, values, 'listPackages', kw={'userID': user['id']}, + start=packageStart, dataName='packages', prefix='package', order=packageOrder, pageSize=10) + + builds = kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'userID': user['id']}, + start=buildStart, dataName='builds', prefix='build', order=buildOrder, pageSize=10) + + return _genHTML(req, 'userinfo.chtml') + +def rpminfo(req, rpmID, fileOrder='name', fileStart=None): + values = _initValues(req, 'RPM Info', 'builds') + server = _getServer(req) + + rpmID = int(rpmID) + rpm = server.getRPM(rpmID) + build = server.getBuild(rpm['build_id']) + builtInRoot = None + if rpm['buildroot_id'] != None: + builtInRoot = server.getBuildroot(rpm['buildroot_id']) + requires = server.getRPMDeps(rpm['id'], koji.DEP_REQUIRE) + requires.sort(_sortbyname) + provides = server.getRPMDeps(rpm['id'], koji.DEP_PROVIDE) + provides.sort(_sortbyname) + obsoletes = server.getRPMDeps(rpm['id'], koji.DEP_OBSOLETE) + obsoletes.sort(_sortbyname) + conflicts = server.getRPMDeps(rpm['id'], koji.DEP_CONFLICT) + conflicts.sort(_sortbyname) + buildroots = server.listBuildroots(rpmID=rpm['id']) + buildroots.sort(kojiweb.util.sortByKeyFunc('-create_event_time')) + + values['rpmID'] = rpmID + values['rpm'] = rpm + values['build'] = build + values['builtInRoot'] = builtInRoot + values['requires'] = requires + values['provides'] = provides + values['obsoletes'] = obsoletes + values['conflicts'] = conflicts + values['buildroots'] = buildroots + + files = kojiweb.util.paginateMethod(server, values, 'listRPMFiles', args=[rpm['id']], + start=fileStart, dataName='files', prefix='file', order=fileOrder) + + return _genHTML(req, 'rpminfo.chtml') + +def fileinfo(req, rpmID, filename): + values = _initValues(req, 'File Info', 'builds') + server = _getServer(req) + + rpmID = int(rpmID) + rpm = server.getRPM(rpmID) + if not rpm: + raise koji.GenericError, 'invalid RPM ID: %i' % rpmID + file = server.getRPMFile(rpmID, filename) + if not file: + raise koji.GenericError, 'no file %s in RPM %i' % (filename, rpmID) + + values['rpm'] = rpm + values['file'] = file + + return _genHTML(req, 'fileinfo.chtml') + +def cancelbuild(req, buildID): + server = _getServer(req) + _assertLogin(req) + + buildID = int(buildID) + build = server.getBuild(buildID) + if build == None: + raise koji.GenericError, 'unknown build ID: %i' % buildID + + result = server.cancelBuild(build['id']) + if not result: + raise koji.GenericError, 'unable to cancel build' + + mod_python.util.redirect(req, 'buildinfo?buildID=%i' % build['id']) + +def hosts(req, start=None, order='name'): + values = _initValues(req, 'Hosts', 'hosts') + server = _getServer(req) + + values['order'] = order + + hosts = server.listHosts() + + server.multicall = True + for host in hosts: + server.getLastHostUpdate(host['id']) + updates = server.multiCall() + for host, [lastUpdate] in zip(hosts, updates): + host['last_update'] = lastUpdate + + # Paginate after retrieving last update info so we can sort on it + kojiweb.util.paginateList(values, hosts, start, 'hosts', 'host', order) + + return _genHTML(req, 'hosts.chtml') + +def hostinfo(req, hostID=None, userID=None): + values = _initValues(req, 'Host Info', 'hosts') + server = _getServer(req) + + if hostID: + if hostID.isdigit(): + hostID = int(hostID) + host = server.getHost(hostID) + if host == None: + raise koji.GenericError, 'invalid host ID: %s' % hostID + elif userID: + userID = int(userID) + hosts = server.listHosts(userID=userID) + host = None + if hosts: + host = hosts[0] + if host == None: + raise koji.GenericError, 'invalid host ID: %s' % userID + else: + raise koji.GenericError, 'hostID or userID must be provided' + + channels = server.listChannels(host['id']) + channels.sort(_sortbyname) + buildroots = server.listBuildroots(hostID=host['id'], + state=[state[1] for state in koji.BR_STATES.items() if state[0] != 'EXPIRED']) + buildroots.sort(kojiweb.util.sortByKeyFunc('-create_event_time')) + + values['host'] = host + values['channels'] = channels + values['buildroots'] = buildroots + values['lastUpdate'] = server.getLastHostUpdate(host['id']) + if req.currentUser: + values['perms'] = server.getUserPerms(req.currentUser['id']) + else: + values['perms'] = [] + + return _genHTML(req, 'hostinfo.chtml') + +def disablehost(req, hostID): + server = _getServer(req) + _assertLogin(req) + + hostID = int(hostID) + host = server.getHost(hostID, strict=True) + server.disableHost(host['name']) + + mod_python.util.redirect(req, 'hostinfo?hostID=%i' % host['id']) + +def enablehost(req, hostID): + server = _getServer(req) + _assertLogin(req) + + hostID = int(hostID) + host = server.getHost(hostID, strict=True) + server.enableHost(host['name']) + + mod_python.util.redirect(req, 'hostinfo?hostID=%i' % host['id']) + +def channelinfo(req, channelID): + values = _initValues(req, 'Channel Info', 'hosts') + server = _getServer(req) + + channelID = int(channelID) + channel = server.getChannel(channelID) + if channel == None: + raise koji.GenericError, 'invalid channel ID: %i' % channelID + + hosts = server.listHosts(channelID=channelID) + hosts.sort(_sortbyname) + + values['channel'] = channel + values['hosts'] = hosts + + return _genHTML(req, 'channelinfo.chtml') + +def buildrootinfo(req, buildrootID, builtStart=None, builtOrder=None, componentStart=None, componentOrder=None): + values = _initValues(req, 'Buildroot Info', 'hosts') + server = _getServer(req) + + buildrootID = int(buildrootID) + buildroot = server.getBuildroot(buildrootID) + if buildroot == None: + raise koji.GenericError, 'unknown buildroot ID: %i' % buildrootID + + values['buildroot'] = buildroot + + return _genHTML(req, 'buildrootinfo.chtml') + +def rpmlist(req, buildrootID, type, start=None, order='nvr'): + values = _initValues(req, 'RPM List', 'hosts') + server = _getServer(req) + + buildrootID = int(buildrootID) + buildroot = server.getBuildroot(buildrootID) + if buildroot == None: + raise koji.GenericError, 'unknown buildroot ID: %i' % buildrootID + + rpms = None + if type == 'component': + rpms = kojiweb.util.paginateMethod(server, values, 'listRPMs', kw={'componentBuildrootID': buildroot['id']}, + start=start, dataName='rpms', prefix='rpm', order=order) + elif type == 'built': + rpms = kojiweb.util.paginateMethod(server, values, 'listRPMs', kw={'buildrootID': buildroot['id']}, + start=start, dataName='rpms', prefix='rpm', order=order) + + values['buildroot'] = buildroot + values['type'] = type + + values['order'] = order + + return _genHTML(req, 'rpmlist.chtml') + +def buildtargets(req, start=None, order='name'): + values = _initValues(req, 'Build Targets', 'buildtargets') + server = _getServer(req) + + targets = kojiweb.util.paginateMethod(server, values, 'getBuildTargets', + start=start, dataName='targets', prefix='target', order=order) + + values['order'] = order + if req.currentUser: + values['perms'] = server.getUserPerms(req.currentUser['id']) + else: + values['perms'] = [] + + return _genHTML(req, 'buildtargets.chtml') + +def buildtargetinfo(req, targetID=None, name=None): + values = _initValues(req, 'Build Target Info', 'buildtargets') + server = _getServer(req) + + target = None + if targetID != None: + targetID = int(targetID) + target = server.getBuildTarget(targetID) + elif name != None: + target = server.getBuildTarget(name) + + if target == None: + raise koji.GenericError, 'invalid build target: %s' % (targetID or name) + + buildTag = server.getTag(target['build_tag']) + destTag = server.getTag(target['dest_tag']) + + values['target'] = target + values['buildTag'] = buildTag + values['destTag'] = destTag + if req.currentUser: + values['perms'] = server.getUserPerms(req.currentUser['id']) + else: + values['perms'] = [] + + return _genHTML(req, 'buildtargetinfo.chtml') + +def buildtargetedit(req, targetID): + server = _getServer(req) + _assertLogin(req) + + targetID = int(targetID) + + target = server.getBuildTarget(targetID) + if target == None: + raise koji.GenericError, 'invalid build target: %s' % targetID + + form = req.form + + if form.has_key('save'): + name = form['name'].value + buildTagID = int(form['buildTag']) + buildTag = server.getTag(buildTagID) + if buildTag == None: + raise koji.GenericError, 'invalid tag ID: %i' % buildTagID + + destTagID = int(form['destTag']) + destTag = server.getTag(destTagID) + if destTag == None: + raise koji.GenericError, 'invalid tag ID: %i' % destTagID + + server.editBuildTarget(target['id'], name, buildTag['id'], destTag['id']) + + mod_python.util.redirect(req, 'buildtargetinfo?targetID=%i' % target['id']) + elif form.has_key('cancel'): + mod_python.util.redirect(req, 'buildtargetinfo?targetID=%i' % target['id']) + else: + values = _initValues(req, 'Edit Build Target', 'buildtargets') + tags = server.listTags() + tags.sort(_sortbyname) + + values['target'] = target + values['tags'] = tags + + return _genHTML(req, 'buildtargetedit.chtml') + +def buildtargetcreate(req): + server = _getServer(req) + _assertLogin(req) + + form = req.form + + if form.has_key('add'): + # Use the str .value field of the StringField object, + # since xmlrpclib doesn't know how to marshal the StringFields + # returned by mod_python + name = form['name'].value + buildTagID = int(form['buildTag']) + destTagID = int(form['destTag']) + + server.createBuildTarget(name, buildTagID, destTagID) + target = server.getBuildTarget(name) + + if target == None: + raise koji.GenericError, 'error creating build target "%s"' % name + + mod_python.util.redirect(req, 'buildtargetinfo?targetID=%i' % target['id']) + elif form.has_key('cancel'): + mod_python.util.redirect(req, 'buildtargets') + else: + values = _initValues(req, 'Add Build Target', 'builtargets') + + tags = server.listTags() + tags.sort(_sortbyname) + + values['target'] = None + values['tags'] = tags + + return _genHTML(req, 'buildtargetedit.chtml') + +def buildtargetdelete(req, targetID): + server = _getServer(req) + _assertLogin(req) + + targetID = int(targetID) + + target = server.getBuildTarget(targetID) + if target == None: + raise koji.GenericError, 'invalid build target: %i' % targetID + + server.deleteBuildTarget(target['id']) + + mod_python.util.redirect(req, 'buildtargets') + +def reports(req): + values = _initValues(req, 'Reports', 'reports') + return _genHTML(req, 'reports.chtml') + +def buildsbyuser(req, start=None, order='-builds'): + values = _initValues(req, 'Builds by User', 'reports') + server = _getServer(req) + + maxBuilds = 1 + users = server.listUsers() + + server.multicall = True + for user in users: + server.listBuilds(userID=user['id'], queryOpts={'countOnly': True}) + buildCounts = server.multiCall() + + for user, [numBuilds] in zip(users, buildCounts): + user['builds'] = numBuilds + if numBuilds > maxBuilds: + maxBuilds = numBuilds + + values['order'] = order + + graphWidth = 400.0 + values['graphWidth'] = graphWidth + values['maxBuilds'] = maxBuilds + values['increment'] = graphWidth / maxBuilds + kojiweb.util.paginateList(values, users, start, 'userBuilds', 'userBuild', order) + + return _genHTML(req, 'buildsbyuser.chtml') + +def rpmsbyhost(req, start=None, order=None, hostArch=None, rpmArch=None): + values = _initValues(req, 'RPMs by Host', 'reports') + server = _getServer(req) + + maxRPMs = 1 + hostArchFilter = hostArch + if hostArchFilter == 'ix86': + hostArchFilter = ['i386', 'i486', 'i586', 'i686'] + hosts = server.listHosts(arches=hostArchFilter) + rpmArchFilter = rpmArch + if rpmArchFilter == 'ix86': + rpmArchFilter = ['i386', 'i486', 'i586', 'i686'] + + server.multicall = True + for host in hosts: + server.listRPMs(hostID=host['id'], arches=rpmArchFilter, queryOpts={'countOnly': True}) + rpmCounts = server.multiCall() + + for host, [numRPMs] in zip(hosts, rpmCounts): + host['rpms'] = numRPMs + if numRPMs > maxRPMs: + maxRPMs = numRPMs + + values['hostArch'] = hostArch + hostArchList = ['ix86', 'x86_64', 'ia64', 'ppc', 'ppc64', 's390', 's390x'] + values['hostArchList'] = hostArchList + values['rpmArch'] = rpmArch + values['rpmArchList'] = hostArchList + ['noarch', 'src'] + + if order == None: + order = '-rpms' + values['order'] = order + + graphWidth = 400.0 + values['graphWidth'] = graphWidth + values['maxRPMs'] = maxRPMs + values['increment'] = graphWidth / maxRPMs + kojiweb.util.paginateList(values, hosts, start, 'hosts', 'host', order) + + return _genHTML(req, 'rpmsbyhost.chtml') + +def packagesbyuser(req, start=None, order=None): + values = _initValues(req, 'Packages by User', 'reports') + server = _getServer(req) + + maxPackages = 1 + users = server.listUsers() + + server.multicall = True + for user in users: + server.count('listPackages', userID=user['id']) + packageCounts = server.multiCall() + + for user, [numPackages] in zip(users, packageCounts): + user['packages'] = numPackages + if numPackages > maxPackages: + maxPackages = numPackages + + if order == None: + order = '-packages' + values['order'] = order + + graphWidth = 400.0 + values['graphWidth'] = graphWidth + values['maxPackages'] = maxPackages + values['increment'] = graphWidth / maxPackages + kojiweb.util.paginateList(values, users, start, 'users', 'user', order) + + return _genHTML(req, 'packagesbyuser.chtml') + +def tasksbyhost(req, start=None, order='-tasks', hostArch=None): + values = _initValues(req, 'Tasks by Host', 'reports') + server = _getServer(req) + + maxTasks = 1 + + hostArchFilter = hostArch + if hostArchFilter == 'ix86': + hostArchFilter = ['i386', 'i486', 'i586', 'i686'] + + hosts = server.listHosts(arches=hostArchFilter) + + server.multicall = True + for host in hosts: + server.listTasks(opts={'host_id': host['id']}, queryOpts={'countOnly': True}) + taskCounts = server.multiCall() + + for host, [numTasks] in zip(hosts, taskCounts): + host['tasks'] = numTasks + if numTasks > maxTasks: + maxTasks = numTasks + + values['hostArch'] = hostArch + hostArchList = ['ix86', 'x86_64', 'ia64', 'ppc', 'ppc64', 's390', 's390x'] + values['hostArchList'] = hostArchList + + values['order'] = order + + graphWidth = 400.0 + values['graphWidth'] = graphWidth + values['maxTasks'] = maxTasks + values['increment'] = graphWidth / maxTasks + kojiweb.util.paginateList(values, hosts, start, 'hosts', 'host', order) + + return _genHTML(req, 'tasksbyhost.chtml') + +def tasksbyuser(req, start=None, order='-tasks'): + values = _initValues(req, 'Tasks by User', 'reports') + server = _getServer(req) + + maxTasks = 1 + + users = server.listUsers() + + server.multicall = True + for user in users: + server.listTasks(opts={'owner': user['id']}, queryOpts={'countOnly': True}) + taskCounts = server.multiCall() + + for user, [numTasks] in zip(users, taskCounts): + user['tasks'] = numTasks + if numTasks > maxTasks: + maxTasks = numTasks + + values['order'] = order + + graphWidth = 400.0 + values['graphWidth'] = graphWidth + values['maxTasks'] = maxTasks + values['increment'] = graphWidth / maxTasks + kojiweb.util.paginateList(values, users, start, 'users', 'user', order) + + return _genHTML(req, 'tasksbyuser.chtml') + +def buildsbystatus(req, days='7'): + values = _initValues(req, 'Builds by Status', 'reports') + server = _getServer(req) + + days = int(days) + if days != -1: + seconds = 60 * 60 * 24 * days + dateAfter = time.time() - seconds + else: + dateAfter = None + values['days'] = days + + server.multicall = True + # use taskID=-1 to filter out builds with a null task_id (imported rather than built in koji) + server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['COMPLETE'], taskID=-1, queryOpts={'countOnly': True}) + server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['FAILED'], taskID=-1, queryOpts={'countOnly': True}) + server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['CANCELED'], taskID=-1, queryOpts={'countOnly': True}) + [[numSucceeded], [numFailed], [numCanceled]] = server.multiCall() + + values['numSucceeded'] = numSucceeded + values['numFailed'] = numFailed + values['numCanceled'] = numCanceled + + maxBuilds = 1 + for value in (numSucceeded, numFailed, numCanceled): + if value > maxBuilds: + maxBuilds = value + + graphWidth = 400.0 + values['graphWidth'] = graphWidth + values['maxBuilds'] = maxBuilds + values['increment'] = graphWidth / maxBuilds + + return _genHTML(req, 'buildsbystatus.chtml') + +def buildsbytarget(req, days='7', start=None, order='-builds'): + values = _initValues(req, 'Builds by Target', 'reports') + server = _getServer(req) + + days = int(days) + if days != -1: + seconds = 60 * 60 * 24 * days + dateAfter = time.time() - seconds + else: + dateAfter = None + values['days'] = days + + targets = {} + maxBuilds = 1 + + tasks = server.listTasks(opts={'method': 'build', 'completeAfter': dateAfter, 'decode': True}) + + for task in tasks: + targetName = task['request'][1] + target = targets.get(targetName) + if not target: + target = {'name': targetName} + targets[targetName] = target + builds = target.get('builds', 0) + 1 + target['builds'] = builds + if builds > maxBuilds: + maxBuilds = builds + + kojiweb.util.paginateList(values, targets.values(), start, 'targets', 'target', order) + + values['order'] = order + + graphWidth = 400.0 + values['graphWidth'] = graphWidth + values['maxBuilds'] = maxBuilds + values['increment'] = graphWidth / maxBuilds + + return _genHTML(req, 'buildsbytarget.chtml') + +def recentbuilds(req, user=None, tag=None, userID=None, tagID=None): + values = _initValues(req, 'Recent Build RSS') + server = _getServer(req) + + tagObj = None + if tag != None: + tagObj = server.getTag(tag) + elif tagID != None: + tagID = int(tagID) + tagObj = server.getTag(tagID) + + userObj = None + if user != None: + userObj = server.getUser(user) + elif userID != None: + userID = int(userID) + userObj = server.getUser(userID) + + if tagObj != None: + builds = server.listTagged(tagObj['id'], inherit=True) + builds.sort(kojiweb.util.sortByKeyFunc('-completion_time', noneGreatest=True)) + builds = builds[:20] + elif userObj != None: + builds = server.listBuilds(userID=userObj['id'], queryOpts={'order': '-completion_time', + 'limit': 20}) + else: + builds = server.listBuilds(queryOpts={'order': '-completion_time', 'limit': 20}) + + server.multicall = True + for build in builds: + if build['task_id']: + server.getTaskInfo(build['task_id'], request=True) + else: + server.echo(None) + tasks = server.multiCall() + + server.multicall = True + queryOpts = {'limit': 3} + for build in builds: + if build['state'] == koji.BUILD_STATES['COMPLETE']: + server.getChangelogEntries(build['build_id'], queryOpts=queryOpts) + else: + server.echo(None) + clogs = server.multiCall() + + for i in range(len(builds)): + task = tasks[i][0] + if isinstance(task, list): + # this is the output of server.echo(None) above + task = None + builds[i]['task'] = task + builds[i]['changelog'] = clogs[i][0] + + values['tag'] = tagObj + values['user'] = userObj + values['builds'] = builds + values['weburl'] = req.get_options()['KojiWebURL'] + + req.content_type = 'text/xml' + return _genHTML(req, 'recentbuilds.chtml') + +_infoURLs = {'package': 'packageinfo?packageID=%(id)i', + 'build': 'buildinfo?buildID=%(id)i', + 'tag': 'taginfo?tagID=%(id)i', + 'target': 'buildtargetinfo?targetID=%(id)i', + 'user': 'userinfo?userID=%(id)i', + 'host': 'hostinfo?hostID=%(id)i', + 'rpm': 'rpminfo?rpmID=%(id)i', + 'file': 'fileinfo?rpmID=%(id)i&filename=%(name)s'} + +def search(req, start=None, order='name'): + values = _initValues(req, 'Search', 'search') + server = _getServer(req) + + form = req.form + if form.has_key('terms') and form['terms']: + terms = form['terms'].value + type = form['type'].value + match = form['match'].value + values['terms'] = terms + values['type'] = type + values['match'] = match + + if match == 'regexp': + try: + re.compile(terms) + except: + raise koji.GenericError, 'invalid regular expression: %s' % terms + + infoURL = _infoURLs.get(type) + if not infoURL: + raise koji.GenericError, 'unknown search type: %s' % type + values['infoURL'] = infoURL + values['order'] = order + + results = kojiweb.util.paginateMethod(server, values, 'search', args=(terms, type, match), + start=start, dataName='results', prefix='result', order=order) + if not start and len(results) == 1: + # if we found exactly one result, skip the result list and redirect to the info page + # (you're feeling lucky) + mod_python.util.redirect(req, infoURL % results[0]) + else: + return _genHTML(req, 'searchresults.chtml') + else: + return _genHTML(req, 'search.chtml') + +def watchlogs(req, taskID): + html = """ + + + + + + Logs for task %i | Koji + + +
+Loading logs...
+    
+ + +""" % int(taskID) + return html diff --git a/www/kojiweb/notificationedit.chtml b/www/kojiweb/notificationedit.chtml new file mode 100644 index 00000000..e68b2ce1 --- /dev/null +++ b/www/kojiweb/notificationedit.chtml @@ -0,0 +1,59 @@ +#from kojiweb import util + +#include "includes/header.chtml" + + #if $notif +

Edit notification

+ #else +

Create notification

+ #end if + +
+ #if $notif + + #end if + + + + + + + + + + + + + + + + + + + + +
Package + +
Tag + +
Success Only? +
Email
+ #if $notif + + #else + + #end if +
+
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/packageinfo.chtml b/www/kojiweb/packageinfo.chtml new file mode 100644 index 00000000..2563282b --- /dev/null +++ b/www/kojiweb/packageinfo.chtml @@ -0,0 +1,113 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Information for package $package.name

+ + + + + + + + + + + + + + + + + + + +
Name$package.name
ID$package.id
Builds + #if $len($builds) > 0 + + + + + + + + + + + #for $build in $builds + + + + + #set $stateName = $util.stateName($build.state) + + + #end for +
+ #if $len($buildPages) > 1 +
+ Page: + +
+ #end if + #if $buildStart > 0 + <<< + #end if + #echo $buildStart + 1 # through #echo $buildStart + $buildCount # of $totalBuilds + #if $buildStart + $buildCount < $totalBuilds + >>> + #end if +
NVR $util.sortImage($self, 'nvr', 'buildOrder')Built by $util.sortImage($self, 'owner_name', 'buildOrder')Finished $util.sortImage($self, 'completion_time', 'buildOrder')State $util.sortImage($self, 'state', 'buildOrder')
$build.nvr$build.owner_name$util.formatTime($build.completion_time)$util.stateImage($build.state)
+ #else + No builds + #end if +
Tags + #if $len($tags) > 0 + + + + + + + + + + + #for $tag in $tags + + + + #set $included = $tag.blocked and 'no' or 'yes' + + + + #end for +
+ #if $len($tagPages) > 1 +
+ Page: + +
+ #end if + #if $tagStart > 0 + <<< + #end if + #echo $tagStart + 1 # through #echo $tagStart + $tagCount # of $totalTags + #if $tagStart + $tagCount < $totalTags + >>> + #end if +
Name $util.sortImage($self, 'name', 'tagOrder')Owner $util.sortImage($self, 'owner_name', 'tagOrder')Included? $util.sortImage($self, 'blocked', 'tagOrder')Extra Arches $util.sortImage($self, 'extra_arches', 'tagOrder')
$tag.name$tag.owner_name$util.imageTag($included)$tag.extra_arches
+ #else + No tags + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/packages.chtml b/www/kojiweb/packages.chtml new file mode 100644 index 00000000..9f79fee9 --- /dev/null +++ b/www/kojiweb/packages.chtml @@ -0,0 +1,110 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Packages#if $prefix then ' starting with "%s"' % $prefix else ''##if $tag then ' in tag %s' % ($tag.id, $tag.name) else ''##if $user then ' owned by %s' % ($user.id, $user.name) else ''#

+ + + #if $tag + + + + #end if + + + + + + + + + + #if $tag or $user + + + + #end if + + #if $len($packages) > 0 + #for $package in $packages + + + + #if $tag or $user + + + + #end if + + #end for + #else + + + + #end if + + + +
+ #if $inherited + Hide inherited packages + #else + Show inherited packages + #end if +
+ #for $char in $chars + #if $prefix == $char + $char + #else + $char + #end if + | + #end for + #if $prefix + all + #else + all + #end if +
+ #if $len($packagePages) > 1 +
+ Page: + +
+ #end if + #if $packageStart > 0 + <<< + #end if + #if $totalPackages != 0 + Packages #echo $packageStart + 1 # through #echo $packageStart + $packageCount # of $totalPackages + #end if + #if $packageStart + $packageCount < $totalPackages + >>> + #end if +
ID $util.sortImage($self, 'package_id')Name $util.sortImage($self, 'package_name')Tag $util.sortImage($self, 'tag_name')Owner $util.sortImage($self, 'owner_name')Included? $util.sortImage($self, 'blocked')
$package.package_id$package.package_name$package.tag_name$package.owner_name#if $package.blocked then $util.imageTag('no') else $util.imageTag('yes')#
No packages
+ #if $len($packagePages) > 1 +
+ Page: + +
+ #end if + #if $packageStart > 0 + <<< + #end if + #if $totalPackages != 0 + Packages #echo $packageStart + 1 # through #echo $packageStart + $packageCount # of $totalPackages + #end if + #if $packageStart + $packageCount < $totalPackages + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/packagesbyuser.chtml b/www/kojiweb/packagesbyuser.chtml new file mode 100644 index 00000000..ec6241d4 --- /dev/null +++ b/www/kojiweb/packagesbyuser.chtml @@ -0,0 +1,73 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Packages by User

+ + + + + + + + + + #if $len($users) > 0 + #for $user in $users + + + + + + #end for + #else + + + + #end if + + + +
+ #if $len($userPages) > 1 +
+ Page: + +
+ #end if + #if $userStart > 0 + <<< + #end if + #if $totalUsers != 0 + Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers + #end if + #if $userStart + $userCount < $totalUsers + >>> + #end if +
Name $util.sortImage($self, 'name')Packages $util.sortImage($self, 'packages') 
$user.name$user.packages
No users
+ #if $len($userPages) > 1 +
+ Page: + +
+ #end if + #if $userStart > 0 + <<< + #end if + #if $totalUsers != 0 + Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers + #end if + #if $userStart + $userCount < $totalUsers + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/recentbuilds.chtml b/www/kojiweb/recentbuilds.chtml new file mode 100644 index 00000000..0271a9bf --- /dev/null +++ b/www/kojiweb/recentbuilds.chtml @@ -0,0 +1,48 @@ +#import koji +#import koji.util +#from kojiweb import util + +#def linkURL() + #set $query = [] + #if $tag + #silent $query.append('tagID=%i' % $tag.id) + #end if + #if $user + #silent $query.append('userID=%i' % $user.id) + #end if + #if $query + #echo '%s/%s?%s' % ($weburl, 'builds', '&'.join($query)) + #else + #echo '%s/%s' % ($weburl, 'builds') + #end if +#end def + + + + koji: most recent builds#if $tag then ' into tag ' + $tag.name else ''##if $user then ' by user ' + $user.name else ''# + $linkURL() + + A list of the most recent builds + #if $tag + into tag $tag.name + #end if + #if $user + by user $user.name + #end if + in the Koji Build System. The list is sorted in reverse chronological order by build completion time. + + $util.formatTimeRSS($currentDate) + #for $build in $builds + + $koji.BUILD_STATES[$build.state].lower(): $koji.buildLabel($build)#if $build.task then ', target: ' + $build.task.request[1] else ''# + $weburl/buildinfo?buildID=$build.build_id + #if $build.completion_time + $util.formatTimeRSS($build.completion_time) + #end if + #if $build.state == $koji.BUILD_STATES['COMPLETE'] and $build.changelog + <pre>$util.escapeHTML($koji.util.formatChangelog($build.changelog))</pre> + #end if + + #end for + + diff --git a/www/kojiweb/reports.chtml b/www/kojiweb/reports.chtml new file mode 100644 index 00000000..67184f9c --- /dev/null +++ b/www/kojiweb/reports.chtml @@ -0,0 +1,17 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Reports

+ + + +#include "includes/footer.chtml" diff --git a/www/kojiweb/rpminfo.chtml b/www/kojiweb/rpminfo.chtml new file mode 100644 index 00000000..b50a1f0a --- /dev/null +++ b/www/kojiweb/rpminfo.chtml @@ -0,0 +1,170 @@ +#from kojiweb import util +#import time + +#include "includes/header.chtml" + #set $epoch = ($rpm.epoch != None and $str($rpm.epoch) + ':' or '') +

Information for RPM $rpm.name-$epoch$rpm.version-$rpm.release.${rpm.arch}.rpm

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #if $builtInRoot + + + + #end if + + + + + + + + + + + + + + + + + + + + + + + + +
ID$rpm.id
Name$rpm.name
Version$rpm.version
Release$rpm.release
Epoch$rpm.epoch
Arch$rpm.arch
Size$rpm.size
Payload Hash$rpm.payloadhash
Build Time$time.strftime('%Y-%m-%d %H:%M:%S', $time.gmtime($rpm.buildtime)) GMT
Buildroot$builtInRoot.tag_name-$builtInRoot.id-$builtInRoot.repo_id
Provides + #if $len($provides) > 0 + + #for $dep in $provides + + + + #end for +
$util.formatDep($dep.name, $dep.version, $dep.flags)
+ #else + No Provides + #end if +
Requires + #if $len($requires) > 0 + + #for $dep in $requires + + + + #end for +
$util.formatDep($dep.name, $dep.version, $dep.flags)
+ #else + No Requires + #end if +
Obsoletes + #if $len($obsoletes) > 0 + + #for $dep in $obsoletes + + + + #end for +
$util.formatDep($dep.name, $dep.version, $dep.flags)
+ #else + No Obsoletes + #end if +
Conflicts + #if $len($conflicts) > 0 + + #for $dep in $conflicts + + + + #end for +
$util.formatDep($dep.name, $dep.version, $dep.flags)
+ #else + No Conflicts + #end if +
Files + #if $len($files) > 0 + + + + + + + + + #for $file in $files + + + + #end for +
+ #if $len($filePages) > 1 +
+ Page: + +
+ #end if + #if $fileStart > 0 + <<< + #end if + #echo $fileStart + 1 # through #echo $fileStart + $fileCount # of $totalFiles + #if $fileStart + $fileCount < $totalFiles + >>> + #end if +
Name $util.sortImage($self, 'name', 'fileOrder')Size $util.sortImage($self, 'size', 'fileOrder')
$file.name$file.size
+ #else + No files + #end if +
Component of + #if $len($buildroots) > 0 + + + + + #for $buildroot in $buildroots + + + + #set $update = $buildroot.is_update and 'yes' or 'no' + + + #end for +
BuildrootStateUpdate?
$buildroot.tag_name-$buildroot.id-$buildroot.repo_id$util.imageTag($util.brStateName($buildroot.state))$util.imageTag($update)
+ #else + No Buildroots + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/rpmlist.chtml b/www/kojiweb/rpmlist.chtml new file mode 100644 index 00000000..76493658 --- /dev/null +++ b/www/kojiweb/rpmlist.chtml @@ -0,0 +1,82 @@ +#from kojiweb import util + +#include "includes/header.chtml" + + #if $type == 'component' +

Component RPMs of buildroot $buildroot.tag_name-$buildroot.id-$buildroot.repo_id

+ #else +

RPMs built in buildroot $buildroot.tag_name-$buildroot.id-$buildroot.repo_id

+ #end if + + + + + + + + #if $type == 'component' + + #end if + + #if $len($rpms) > 0 + #for $rpm in $rpms + + #set $epoch = ($rpm.epoch != None and $str($rpm.epoch) + ':' or '') + + #if $type == 'component' + #set $update = $rpm.is_update and 'yes' or 'no' + + #end if + + #end for + #else + + + + #end if + + + +
+ #if $len($rpmPages) > 1 +
+ Page: + +
+ #end if + #if $rpmStart > 0 + <<< + #end if + #if $totalRpms != 0 + RPMs #echo $rpmStart + 1 # through #echo $rpmStart + $rpmCount # of $totalRpms + #end if + #if $rpmStart + $rpmCount < $totalRpms + >>> + #end if +
NVR $util.sortImage($self, 'nvr')Update? $util.sortImage($self, 'is_update')
$rpm.name-$epoch$rpm.version-$rpm.release.${rpm.arch}.rpm$util.imageTag($update)
No RPMs
+ #if $len($rpmPages) > 1 +
+ Page: + +
+ #end if + #if $rpmStart > 0 + <<< + #end if + #if $totalRpms != 0 + RPMs #echo $rpmStart + 1 # through #echo $rpmStart + $rpmCount # of $totalRpms + #end if + #if $rpmStart + $rpmCount < $totalRpms + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/rpmsbyhost.chtml b/www/kojiweb/rpmsbyhost.chtml new file mode 100644 index 00000000..c3018130 --- /dev/null +++ b/www/kojiweb/rpmsbyhost.chtml @@ -0,0 +1,105 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

#if $rpmArch then $rpmArch + ' ' else ''#RPMs by Host#if $hostArch then ' (%s)' % $hostArch else ''#

+ + + + + + + + + + + + + + + + #if $len($hosts) > 0 + #for $host in $hosts + + + + + + #end for + #else + + + + #end if + + + +
+ Host arch: #for $arch in $hostArchList + #if $arch == $hostArch + $arch | + #else + $arch | + #end if + #end for + #if $hostArch + all + #else + all + #end if +
+ RPM arch: #for $arch in $rpmArchList + #if $arch == $rpmArch + $arch | + #else + $arch | + #end if + #end for + #if $rpmArch + all + #else + all + #end if +
+ #if $len($hostPages) > 1 +
+ Page: + +
+ #end if + #if $hostStart > 0 + <<< + #end if + #if $totalHosts != 0 + Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts + #end if + #if $hostStart + $hostCount < $totalHosts + >>> + #end if +
Name $util.sortImage($self, 'name')RPMs $util.sortImage($self, 'rpms') 
$host.name$host.rpms
No hosts
+ #if $len($hostPages) > 1 +
+ Page: + +
+ #end if + #if $hostStart > 0 + <<< + #end if + #if $totalHosts != 0 + Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts + #end if + #if $hostStart + $hostCount < $totalHosts + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/search.chtml b/www/kojiweb/search.chtml new file mode 100644 index 00000000..6e129a6a --- /dev/null +++ b/www/kojiweb/search.chtml @@ -0,0 +1,39 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Search

+ +
+ + + + + + + + + + + + + + +
Search + +
  + glob + regexp +
 
+
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/searchresults.chtml b/www/kojiweb/searchresults.chtml new file mode 100644 index 00000000..c9d0ac44 --- /dev/null +++ b/www/kojiweb/searchresults.chtml @@ -0,0 +1,72 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Search Results for "$terms"

+ + + + + + + + + + #if $len($results) > 0 + #for $result in $results + + + + + #end for + #else + + + + #end if + + + +
+ #if $len($resultPages) > 1 +
+ Page: + +
+ #end if + #if $resultStart > 0 + <<< + #end if + #if $totalResults != 0 + Results #echo $resultStart + 1 # through #echo $resultStart + $resultCount # of $totalResults + #end if + #if $resultStart + $resultCount < $totalResults + >>> + #end if +
ID $util.sortImage($self, 'id')Name $util.sortImage($self, 'name')
$result.id$result.name
No search results
+ #if $len($resultPages) > 1 +
+ Page: + +
+ #end if + #if $resultStart > 0 + <<< + #end if + #if $totalResults != 0 + Results #echo $resultStart + 1 # through #echo $resultStart + $resultCount # of $totalResults + #end if + #if $resultStart + $resultCount < $totalResults + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/tagedit.chtml b/www/kojiweb/tagedit.chtml new file mode 100644 index 00000000..e78bc0e6 --- /dev/null +++ b/www/kojiweb/tagedit.chtml @@ -0,0 +1,54 @@ +#from kojiweb import util + +#include "includes/header.chtml" + + #if $tag +

Edit tag $tag.name

+ #else +

Create tag

+ #end if + +
+ + + + + + + + + + + + + + + + + + + + +
Name + + #if $tag + + #end if +
Arches
Locked +
Permission + +
+ #if $tag + + #else + + #end if +
+
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/taginfo.chtml b/www/kojiweb/taginfo.chtml new file mode 100644 index 00000000..9cdd3f3b --- /dev/null +++ b/www/kojiweb/taginfo.chtml @@ -0,0 +1,136 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Information for tag $tag.name

+ + + #if $child and 'admin' in $perms + + + + #end if + + + + + + + + + + + + + + + + + + + + #if $maxDepth >= $TRUNC_DEPTH + + + + #end if + #if 'admin' in $perms + + + + #end if + + + + + + + + + + + + + + + + + + + + #if 'admin' in $perms + + + + + + + #end if +
Add $tag.name as parent of $child.name
Name$tag.name
ID$tag.id
Arches$tag.arches
Locked#if $tag.locked then 'yes' else 'no'#
Permission#if $tag.perm_id then $allPerms[$tag.perm_id] else 'none'#
Inheritance + $tag.name + #set $numParents = $len($inheritance) + #set $iter = 0 + #set $maxDepth = 0 + #set $TRUNC_DEPTH = 7 +
    + #for $parent in $inheritance + #set $iter += 1 + #set $nextDepth = ($iter < $numParents and $inheritance[$iter].currdepth or 1) + #set $depth = $parent.currdepth + #if $depth > $maxDepth + #set $maxDepth = $depth + #end if + #if $depth == $TRUNC_DEPTH and not $all +
  • ...
  • +
  • + #else +
  • + #end if + #silent $tagsByChild[$parent.child_id].pop() + + + $parent.name + #if $depth == 1 and 'admin' in $perms + (edit) (remove) + #end if + + + #if $nextDepth > $depth +
      + #else + + #end if + #while $nextDepth < $depth +
    +
  • + #set $depth -= 1 + #end while + #end for +
+
+ #if $all + Show abbreviated tree + #else + Show full tree + #end if +
Add parent
Repo created#if $repo then $util.formatTimeRSS($repo.creation_time) else ''#
Packages$numPackages
Builds$numBuilds
Targets building from this tag + #if $len($srcTargets) + #for $target in $srcTargets + $target.name
+ #end for + #else + No build targets + #end if +
Targets building to this tag + #if $len($destTargets) + #for $target in $destTargets + $target.name
+ #end for + #else + No build targets + #end if +
Edit tag
Delete tag
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/tagparent.chtml b/www/kojiweb/tagparent.chtml new file mode 100644 index 00000000..cbef455f --- /dev/null +++ b/www/kojiweb/tagparent.chtml @@ -0,0 +1,71 @@ +#from kojiweb import util + +#include "includes/header.chtml" + + #if $inheritanceData +

Edit Parent

+ #else +

Add Parent

+ #end if + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Tag Name + $tag.name + +
Parent Tag Name + $parent.name + +
Priority + +
Max Depth + +
Intransitive + +
Packages Only + +
Package Filter + +
+ #if $inheritanceData + + #else + + #end if +
+
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/tags.chtml b/www/kojiweb/tags.chtml new file mode 100644 index 00000000..c94e4b8b --- /dev/null +++ b/www/kojiweb/tags.chtml @@ -0,0 +1,76 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Tags

+ + + + + + + + + #if $len($tags) > 0 + #for $tag in $tags + + + + + #end for + #else + + + + #end if + + + +
+ #if $len($tagPages) > 1 +
+ Page: + +
+ #end if + #if $tagStart > 0 + <<< + #end if + #if $totalTags != 0 + Tags #echo $tagStart + 1 # through #echo $tagStart + $tagCount # of $totalTags + #end if + #if $tagStart + $tagCount < $totalTags + >>> + #end if +
ID $util.sortImage($self, 'id')Name $util.sortImage($self, 'name')
$tag.id$tag.name
No tags
+ #if $len($tagPages) > 1 +
+ Page: + +
+ #end if + #if $tagStart > 0 + <<< + #end if + #if $totalTags != 0 + Tags #echo $tagStart + 1 # through #echo $tagStart + $tagCount # of $totalTags + #end if + #if $tagStart + $tagCount < $totalTags + >>> + #end if +
+ + #if 'admin' in $perms +
+ Create new Tag + #end if + +#include "includes/footer.chtml" diff --git a/www/kojiweb/taskinfo.chtml b/www/kojiweb/taskinfo.chtml new file mode 100644 index 00000000..f1b55d83 --- /dev/null +++ b/www/kojiweb/taskinfo.chtml @@ -0,0 +1,289 @@ +#import koji +#from kojiweb import util + +#def printValue($key, $value, $sep=', ') + #if $key == 'brootid' +$value + #else +#if $isinstance($value, list) then $sep.join([$str($val) for $val in $value]) else $value# + #end if +#end def + +#def printMap($vals, $prefix='') + #for $key, $value in $vals.items() + #if $key != '__starstar' + $prefix$key = $printValue($key, $value)
+ #end if + #end for +#end def + +#def printOpts($opts) + #if $opts + Options:
+ $printMap($opts, '  ') + #end if +#end def + +#def printChildren($taskID, $childMap) + #set $iter = 0 + #set $children = $childMap[$str($taskID)] + #if $children +
    + #for $child in $children + #set $iter += 1 + #if $iter < $len($children) +
  • + #else +
  • + #end if + #set $childState = $util.taskState($child.state) + + + $koji.taskLabel($child) + + + $printChildren($child.id, $childMap) +
  • + #end for +
+ #end if +#end def + +#include "includes/header.chtml" + +

Information for task $task.id

+ + + + + + + + + + + #set $state = $util.taskState($task.state) + + + + #if $taskBuild + + + + #end if + + + + + + + + + + + + + + + + + + + + + + #if $buildroots + + + + + #end if + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method$task.method
Parameters + #if $task.method == 'buildFromCVS' + CVS URL: $params[0]
+ Build Target: $params[1] + #elif $task.method == 'buildSRPMFromCVS' + CVS URL: $params[0] + #elif $task.method == 'multiArchBuild' + SRPM: $params[0]
+ Build Target: $params[1]
+ $printOpts($params[2]) + #elif $task.method == 'buildArch' + SRPM: $params[0]
+ Build Tag: $buildTag.name
+ Arch: $params[2]
+ Keep SRPM? #if $params[3] then 'yes' else 'no'#
+ #if $len($params) > 4 + $printOpts($params[4]) + #end if + #elif $task.method == 'tagBuild' + Destination Tag: $destTag.name
+ Build: $koji.buildLabel($build) + #elif $task.method == 'buildNotification' + #set $build = $params[1] + #set $buildTarget = $params[2] + Recipients: $printValue('', $params[0])
+ Build: $koji.buildLabel($build)
+ Build Target: $buildTarget.name
+ Web URL: $params[3] + #elif $task.method == 'tagNotification' + Recipients: $printValue('', $params[0])
+ Successful?: #if $params[1] then 'yes' else 'no'#
+ Tagged Into: $destTag.name
+ #if $srcTag + Moved From: $srcTag.name
+ #end if + Build: $koji.buildLabel($build)
+ Tagged By: $user.name
+ Ignore Success?: #if $params[6] then 'yes' else 'no'#
+ #if $params[7] + Failure Message: $params[7] + #end if + #elif $task.method == 'build' + Source: $params[0]
+ Build Target: $params[1]
+ $printOpts($params[2]) + #elif $task.method == 'runroot' + Tag: $params[0]
+ Arch: $params[1]
+ Command: $printValue('', $params[2], ' ')
+ #if $len($params) > 3 + $printOpts($params[3]) + #end if + #elif $task.method == 'newRepo' + Tag: $tag.name + #elif $task.method == 'prepRepo' + Tag: $params[0].name + #elif $task.method == 'createrepo' + Repo ID: $params[0]
+ Arch: $params[1]
+ #set $oldrepo = $params[2] + #if $oldrepo + Old Repo ID: $oldrepo.id
+ Old Repo Creation: $koji.formatTimeLong($oldrepo.creation_time) + #end if + #elif $task.method == 'dependantTask' + Dependant Tasks:
+ #for $dep in $deps +   $koji.taskLabel($dep)
+ #end for + Subtasks:
+ #for $subtask in $params[1] +   Method: $subtask[0]
+   Parameters: #echo ', '.join([$str($subparam) for $subparam in $subtask[1]])#
+ #if $len($subtask) > 2 and $subtask[2] +   Options:
+ $printMap($subtask[2], '    ') + #end if +
+ #end for + #elif $task.method == 'chainbuild' + Build Groups:
+ #set $groupNum = 0 + #for $urls in $params[0] + #set $groupNum += 1 +   $groupNum: #echo ', '.join($urls)#
+ #end for + Build Target: $params[1]
+ $printOpts($params[2]) + #elif $task.method == 'waitrepo' + Build Target: $params[0]
+ #if $params[1] + Newer Than: $params[1]
+ #end if + #if $params[2] + NVRs: $printValue('', $params[2]) + #end if + #else + $params + #end if +
State$state + #if $currentUser and ('admin' in $perms or $task.owner == $currentUser.id) + #if $task.state in ($koji.TASK_STATES.FREE, $koji.TASK_STATES.OPEN, $koji.TASK_STATES.ASSIGNED) + (cancel) + #elif $task.state in ($koji.TASK_STATES.CANCELED, $koji.TASK_STATES.FAILED) and (not $parent) + (resubmit) + #end if + #end if +
Build$koji.buildLabel($taskBuild)
Created$util.formatTimeLong($task.create_time)
Completed$util.formatTimeLong($task.completion_time)
Owner + #if $owner + #if $owner.usertype == $koji.USERTYPES['HOST'] + $owner.name + #else + $owner.name + #end if + #end if +
Channel + #if $task.channel_id + $channelName + #end if +
Host + #if $task.host_id + $hostName + #end if +
Arch$task.arch
Buildroot#if $len($buildroots) > 1 then 's' else ''# + #for $buildroot in $buildroots + /var/lib/mock/$buildroot.tag_name-$buildroot.id-$buildroot.repo_id
+ #end for +
Parent + #if $parent + $koji.taskLabel($parent) + #end if +
Descendent Tasks + #if $len($descendents[$str($task.id)]) > 0 + $task.method + #end if + $printChildren($task.id, $descendents) +
Waiting?#if $task.waiting then 'yes' else 'no'#
Awaited?#if $task.awaited then 'yes' else 'no'#
Priority$task.priority
Weight$task.weight
Result + #if $excClass + #if $hasattr($result, 'faultString') +
+$result.faultString.strip()
+          
+ #else + ${excClass.__name__}: $result + #end if + #elif $isinstance($result, dict) + $printMap($result) + #else + $printValue('', $result) + #end if +
Output + #for $filename in $output + $filename
+ #end for + #if $task.state not in ($koji.TASK_STATES.CLOSED, $koji.TASK_STATES.CANCELED, $koji.TASK_STATES.FAILED) and \ + $task.method in ('buildSRPMFromCVS', 'buildArch', 'runroot') + Watch logs + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/tasks.chtml b/www/kojiweb/tasks.chtml new file mode 100644 index 00000000..3c6b2df2 --- /dev/null +++ b/www/kojiweb/tasks.chtml @@ -0,0 +1,206 @@ +#import koji +#from kojiweb import util + +#def printChildren($taskID, $childMap) + #set $iter = 0 + #set $children = $childMap[$str($taskID)] + #if $children +
    + #for $child in $children + #set $iter += 1 + #if $iter < $len($children) +
  • + #else +
  • + #end if + #set $childState = $util.taskState($child.state) + + + $koji.taskLabel($child) + + + $printChildren($child.id, $childMap) +
  • + #end for +
+ #end if +#end def + +#def headerPrefix($state) + #if $state == 'active' +Active + #elif $state == 'toplevel' +Top-level + #elif $state == 'all' +All + #else +#echo $state.capitalize() + #end if +#end def + +#include "includes/header.chtml" + +

$headerPrefix($state) #if $method != 'all' then $method else ''# Tasks#if $ownerObj then ' owned by %s' % ($ownerObj.id, $ownerObj.name) else ''##if $host then ' on host %s' % ($host.id, $host.name) else ''#

+ + + + + + + + + + + + + + + + + #if $len($tasks) > 0 + #for $task in $tasks + + #set $taskState = $util.taskState($task.state) + + + #if $treeDisplay then ' ' else ''#$koji.taskLabel($task) + #if $treeDisplay + $printChildren($task.id, $task.descendents) + #end if + + + + + + + #end for + #else + + + + #end if + + + +
+
+ State: + #if $state == 'active' + active + #else + active + #end if + | + #if $state == 'toplevel' + toplevel + #else + toplevel + #end if + | + #if $state == 'all' + all + #else + all + #end if + | + +
+ Owner: + #if $ownerObj + everyone + #if $loggedInUser + #if $ownerObj.id == $loggedInUser.id + | me + #else + | me + #end if + #end if + #else + everyone + #if $loggedInUser + | me + #end if + #end if + | + +
+ Method: + +
+
+ #if $len($taskPages) > 1 +
+ Page: + +
+ #end if + #if $taskStart > 0 + <<< + #end if + #if $totalTasks != 0 + Tasks #echo $taskStart + 1 # through #echo $taskStart + $taskCount # of $totalTasks + #end if + #if $taskStart + $taskCount < $totalTasks + >>> + #end if +
ID $util.sortImage($self, 'id')Type $util.sortImage($self, 'method')Owner $util.sortImage($self, 'owner')Arch $util.sortImage($self, 'arch')Finished $util.sortImage($self, 'completion_time')State $util.sortImage($self, 'state')
$task.id + #if $task.owner_type == $koji.USERTYPES['HOST'] + $task.owner_name + #else + $task.owner_name + #end if + $task.arch$util.formatTime($task.completion_time)$util.imageTag($taskState)
No tasks
+ #if $len($taskPages) > 1 +
+ Page: + +
+ #end if + #if $taskStart > 0 + <<< + #end if + #if $totalTasks != 0 + Tasks #echo $taskStart + 1 # through #echo $taskStart + $taskCount # of $totalTasks + #end if + #if $taskStart + $taskCount < $totalTasks + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/tasksbyhost.chtml b/www/kojiweb/tasksbyhost.chtml new file mode 100644 index 00000000..5f2979b4 --- /dev/null +++ b/www/kojiweb/tasksbyhost.chtml @@ -0,0 +1,89 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Tasks by Host#if $hostArch then ' (%s)' % $hostArch else ''#

+ + + + + + + + + + + + + #if $len($hosts) > 0 + #for $host in $hosts + + + + + + #end for + #else + + + + #end if + + + +
+ Host arch: #for $arch in $hostArchList + #if $arch == $hostArch + $arch | + #else + $arch | + #end if + #end for + #if $hostArch + all + #else + all + #end if +
+ #if $len($hostPages) > 1 +
+ Page: + +
+ #end if + #if $hostStart > 0 + <<< + #end if + #if $totalHosts != 0 + Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts + #end if + #if $hostStart + $hostCount < $totalHosts + >>> + #end if +
Name $util.sortImage($self, 'name')Tasks $util.sortImage($self, 'tasks') 
$host.name$host.tasks
No hosts
+ #if $len($hostPages) > 1 +
+ Page: + +
+ #end if + #if $hostStart > 0 + <<< + #end if + #if $totalHosts != 0 + Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts + #end if + #if $hostStart + $hostCount < $totalHosts + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/tasksbyuser.chtml b/www/kojiweb/tasksbyuser.chtml new file mode 100644 index 00000000..6c29ced5 --- /dev/null +++ b/www/kojiweb/tasksbyuser.chtml @@ -0,0 +1,73 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Tasks by User

+ + + + + + + + + + #if $len($users) > 0 + #for $user in $users + + + + + + #end for + #else + + + + #end if + + + +
+ #if $len($userPages) > 1 +
+ Page: + +
+ #end if + #if $userStart > 0 + <<< + #end if + #if $totalUsers != 0 + Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers + #end if + #if $userStart + $userCount < $totalUsers + >>> + #end if +
Name $util.sortImage($self, 'name')Tasks $util.sortImage($self, 'tasks') 
$user.name$user.tasks
No users
+ #if $len($userPages) > 1 +
+ Page: + +
+ #end if + #if $userStart > 0 + <<< + #end if + #if $totalUsers != 0 + Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers + #end if + #if $userStart + $userCount < $totalUsers + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/userinfo.chtml b/www/kojiweb/userinfo.chtml new file mode 100644 index 00000000..5e6860b1 --- /dev/null +++ b/www/kojiweb/userinfo.chtml @@ -0,0 +1,108 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Information for user $user.name

+ + + + + + + + + + + + + + + + + + + +
Name$user.name
ID$user.id
Tasksview
Packages + #if $len($packages) > 0 + + + + + + + + + + #for $package in $packages + + + + + + #end for +
+ #if $len($packagePages) > 1 +
+ Page: + +
+ #end if + #if $packageStart > 0 + <<< + #end if + #echo $packageStart + 1 # through #echo $packageStart + $packageCount # of $totalPackages + #if $packageStart + $packageCount < $totalPackages + >>> + #end if +
Name $util.sortImage($self, 'package_name', 'packageOrder')Tag $util.sortImage($self, 'tag_name', 'packageOrder')Included? $util.sortImage($self, 'blocked', 'packageOrder')
$package.package_name$package.tag_name#if $package.blocked then $util.imageTag('no') else $util.imageTag('yes')#
+ #else + No packages + #end if +
Builds + #if $len($builds) > 0 + + + + + + + + + + #for $build in $builds + + #set $stateName = $util.stateName($build.state) + + + + + #end for +
+ #if $len($buildPages) > 1 +
+ Page: + +
+ #end if + #if $buildStart > 0 + <<< + #end if + #echo $buildStart + 1 # through #echo $buildStart + $buildCount # of $totalBuilds + #if $buildStart + $buildCount < $totalBuilds + >>> + #end if +
NVR $util.sortImage($self, 'nvr', 'buildOrder')Finished $util.sortImage($self, 'completion_time', 'buildOrder')State $util.sortImage($self, 'state', 'buildOrder')
$build.nvr$util.formatTime($build.completion_time)$util.stateImage($build.state)
+ #else + No builds + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/kojiweb/users.chtml b/www/kojiweb/users.chtml new file mode 100644 index 00000000..93e4709d --- /dev/null +++ b/www/kojiweb/users.chtml @@ -0,0 +1,77 @@ +#from kojiweb import util + +#include "includes/header.chtml" + +

Users

+ + + + + + + + + + + + #if $len($users) > 0 + #for $user in $users + + + + + + + + #end for + #else + + + + #end if + + + +
+ #if $len($userPages) > 1 +
+ Page: + +
+ #end if + #if $userStart > 0 + <<< + #end if + #if $totalUsers != 0 + Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers + #end if + #if $userStart + $userCount < $totalUsers + >>> + #end if +
ID $util.sortImage($self, 'id')Name $util.sortImage($self, 'name')PackagesBuildsTasks
$user.id$user.nameviewviewview
No users
+ #if $len($userPages) > 1 +
+ Page: + +
+ #end if + #if $userStart > 0 + <<< + #end if + #if $totalUsers != 0 + Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers + #end if + #if $userStart + $userCount < $totalUsers + >>> + #end if +
+ +#include "includes/footer.chtml" diff --git a/www/lib/Makefile b/www/lib/Makefile new file mode 100644 index 00000000..5fb11b51 --- /dev/null +++ b/www/lib/Makefile @@ -0,0 +1,20 @@ +SUBDIRS = kojiweb + +SERVERDIR = /var/www/koji-web/lib + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + for d in $(SUBDIRS); do make -s -C $$d clean; done + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR)/$(SERVERDIR) \ + -C $$d install; [ $$? = 0 ] || exit 1; done diff --git a/www/lib/kojiweb/Makefile b/www/lib/kojiweb/Makefile new file mode 100644 index 00000000..33d3ab5f --- /dev/null +++ b/www/lib/kojiweb/Makefile @@ -0,0 +1,30 @@ +PYTHON=python +PACKAGE = $(shell basename `pwd`) +PYFILES = $(wildcard *.py) +PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)') +PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix') +PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER) +PKGDIR = $(PYLIBDIR)/site-packages/$(PACKAGE) + +SERVERDIR = /kojiweb +FILES = $(wildcard *.py *.chtml) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + for d in $(SUBDIRS); do make -s -C $$d clean; done + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/$(SERVERDIR) + for p in $(PYFILES) ; do \ + install -m 644 $$p $(DESTDIR)/$(SERVERDIR)/$$p; \ + done + $(PYTHON) -c "import compileall; compileall.compile_dir('$(DESTDIR)/$(SERVERDIR)', 1, '$(PYDIR)', 1)" diff --git a/www/lib/kojiweb/__init__.py b/www/lib/kojiweb/__init__.py new file mode 100644 index 00000000..ac095f59 --- /dev/null +++ b/www/lib/kojiweb/__init__.py @@ -0,0 +1 @@ +# identify this directory as a python module diff --git a/www/lib/kojiweb/handlers.py b/www/lib/kojiweb/handlers.py new file mode 100644 index 00000000..aaaf6b6b --- /dev/null +++ b/www/lib/kojiweb/handlers.py @@ -0,0 +1,7 @@ +def cleanup(req): + """Perform any cleanup actions required at the end of a request. + At the moment, this logs out the webserver <-> koji session.""" + if hasattr(req, '_session') and req._session.logged_in: + req._session.logout() + + return 0 diff --git a/www/lib/kojiweb/util.py b/www/lib/kojiweb/util.py new file mode 100644 index 00000000..551c5492 --- /dev/null +++ b/www/lib/kojiweb/util.py @@ -0,0 +1,276 @@ +import time +import koji + +def toggleOrder(template, sortKey, orderVar='order'): + """ + If orderVar equals 'sortKey', return '-sortKey', else + return 'sortKey'. + """ + if template.getVar(orderVar) == sortKey: + return '-' + sortKey + else: + return sortKey + +def sortImage(template, sortKey, orderVar='order'): + """ + Return an html img tag suitable for inclusion in the sortKey of a sortable table, + if the sortValue is "sortKey" or "-sortKey". + """ + orderVal = template.getVar(orderVar) + if orderVal == sortKey: + return '' + elif orderVal == '-' + sortKey: + return '' + else: + return '' + +def passthrough(template, *vars): + """ + Construct a string suitable for use as URL + parameters. For each variable name in *vars, + if the template has a corresponding non-None value, + append that name-value pair to the string. The name-value + pairs will be separated by ampersands (&), and prefixed by + an ampersand if there are any name-value pairs. If there + are no name-value pairs, an empty string will be returned. + """ + result = [] + for var in vars: + value = template.getVar(var, default=None) + if value != None: + result.append('%s=%s' % (var, value)) + if result: + return '&' + '&'.join(result) + else: + return '' + +def sortByKeyFunc(key, noneGreatest=False): + """Return a function to sort a list of maps by the given key. + If the key starts with '-', sort in reverse order. If noneGreatest + is True, None will sort higher than all other values (instead of lower). + """ + if noneGreatest: + # Normally None evaluates to be less than every other value + # Invert the comparison so it always evaluates to greater + cmpFunc = lambda a, b: (a is None or b is None) and -(cmp(a, b)) or cmp(a, b) + else: + cmpFunc = cmp + + if key.startswith('-'): + key = key[1:] + sortFunc = lambda a, b: cmpFunc(b[key], a[key]) + else: + sortFunc = lambda a, b: cmpFunc(a[key], b[key]) + + return sortFunc + +def paginateList(values, data, start, dataName, prefix=None, order=None, noneGreatest=False, pageSize=50): + """ + Slice the 'data' list into one page worth. Start at offset + 'start' and limit the total number of pages to pageSize + (defaults to 50). 'dataName' is the name under which the + list will be added to the value map, and prefix is the name + under which a number of list-related metadata variables will + be added to the value map. + """ + if order != None: + data.sort(sortByKeyFunc(order, noneGreatest)) + + totalRows = len(data) + + if start: + start = int(start) + if not start or start < 0: + start = 0 + + data = data[start:(start + pageSize)] + count = len(data) + + _populateValues(values, dataName, prefix, data, totalRows, start, count, pageSize, order) + + return data + +def paginateMethod(server, values, methodName, args=None, kw=None, + start=None, dataName=None, prefix=None, order=None, pageSize=50): + """Paginate the results of the method with the given name when called with the given args and kws. + The method must support the queryOpts keyword parameter, and pagination is done in the database.""" + if args is None: + args = [] + if kw is None: + kw = {} + if start: + start = int(start) + if not start or start < 0: + start = 0 + if not dataName: + raise StandardError, 'dataName must be specified' + + kw['queryOpts'] = {'countOnly': True} + totalRows = getattr(server, methodName)(*args, **kw) + + kw['queryOpts'] = {'order': order, + 'offset': start, + 'limit': pageSize} + data = getattr(server, methodName)(*args, **kw) + count = len(data) + + _populateValues(values, dataName, prefix, data, totalRows, start, count, pageSize, order) + + return data + +def paginateResults(server, values, methodName, args=None, kw=None, + start=None, dataName=None, prefix=None, order=None, pageSize=50): + """Paginate the results of the method with the given name when called with the given args and kws. + This method should only be used when then method does not support the queryOpts command (because + the logic used to generate the result list prevents filtering/ordering from being done in the database). + The method must return a list of maps.""" + if args is None: + args = [] + if kw is None: + kw = {} + if start: + start = int(start) + if not start or start < 0: + start = 0 + if not dataName: + raise StandardError, 'dataName must be specified' + + totalRows = server.count(methodName, *args, **kw) + + kw['filterOpts'] = {'order': order, + 'offset': start, + 'limit': pageSize} + data = server.filterResults(methodName, *args, **kw) + count = len(data) + + _populateValues(values, dataName, prefix, data, totalRows, start, count, pageSize, order) + + return data + +def _populateValues(values, dataName, prefix, data, totalRows, start, count, pageSize, order): + """Populate the values list with the data about the list provided.""" + values[dataName] = data + # Don't use capitalize() to title() here, they mess up + # mixed-case name + values['total' + dataName[0].upper() + dataName[1:]] = totalRows + # Possibly prepend a prefix to the numeric parameters, to avoid namespace collisions + # when there is more than one list on the same page + values[(prefix and prefix + 'Start' or 'start')] = start + values[(prefix and prefix + 'Count' or 'count')] = count + values[(prefix and prefix + 'Range' or 'range')] = pageSize + values[(prefix and prefix + 'Order' or 'order')] = order + currentPage = start / pageSize + values[(prefix and prefix + 'CurrentPage' or 'currentPage')] = currentPage + totalPages = totalRows / pageSize + if totalRows % pageSize > 0: + totalPages += 1 + pages = [page for page in range(0, totalPages) if (abs(page - currentPage) < 100 or ((page + 1) % 100 == 0))] + values[(prefix and prefix + 'Pages') or 'pages'] = pages + +def stateName(stateID): + """Convert a numeric build state into a readable name.""" + return koji.BUILD_STATES[stateID].lower() + +def imageTag(name): + """Return an img tag that loads an icon with the given name""" + return '%s' \ + % (name, name, name) + +def stateImage(stateID): + """Return an IMG tag that loads an icon appropriate for + the given state""" + name = stateName(stateID) + return imageTag(name) + +def brStateName(stateID): + """Convert a numeric buildroot state into a readable name.""" + return koji.BR_STATES[stateID].lower() + +def repoStateName(stateID): + """Convert a numeric repository state into a readable name.""" + if stateID == koji.REPO_INIT: + return 'initializing' + elif stateID == koji.REPO_READY: + return 'ready' + elif stateID == koji.REPO_EXPIRED: + return 'expired' + elif stateID == koji.REPO_DELETED: + return 'deleted' + else: + return 'unknown' + +def taskState(stateID): + """Convert a numeric task state into a readable name""" + return koji.TASK_STATES[stateID].lower() + +formatTime = koji.formatTime +formatTimeRSS = koji.formatTimeLong +formatTimeLong = koji.formatTimeLong + +def formatDep(name, version, flags): + """Format dependency information into + a human-readable format. Copied from + rpmUtils/miscutils.py:formatRequires()""" + s = name + + if flags: + if flags & (koji.RPMSENSE_LESS | koji.RPMSENSE_GREATER | + koji.RPMSENSE_EQUAL): + s = s + " " + if flags & koji.RPMSENSE_LESS: + s = s + "<" + if flags & koji.RPMSENSE_GREATER: + s = s + ">" + if flags & koji.RPMSENSE_EQUAL: + s = s + "=" + if version: + s = "%s %s" %(s, version) + return s + +def rowToggle(template): + """If the value of template._rowNum is even, return 'row-even'; + if it is odd, return 'row-odd'. Increment the value before checking it. + If the template does not have that value, set it to 0.""" + if not hasattr(template, '_rowNum'): + template._rowNum = 0 + template._rowNum += 1 + if template._rowNum % 2: + return 'row-odd' + else: + return 'row-even' + +_fileFlags = {1: 'configuration', + 2: 'documentation', + 4: 'icon', + 8: 'missing ok', + 16: "don't replace", + 64: 'ghost', + 128: 'license', + 256: 'readme', + 512: 'exclude', + 1024: 'unpatched', + 2048: 'public key'} + +def formatFileFlags(flags): + """Format rpm fileflags for display. Returns + a list of human-readable strings specifying the + flags set in "flags".""" + results = [] + for flag, desc in _fileFlags.items(): + if flags & flag: + results.append(desc) + return results + +def escapeHTML(value): + """Replace special characters to the text can be displayed in + an HTML page correctly. + < : < + > : > + & : & + """ + if not value: + return value + + return value.replace('&', '&').\ + replace('<', '<').\ + replace('>', '>') diff --git a/www/static/Makefile b/www/static/Makefile new file mode 100644 index 00000000..9b4e2c96 --- /dev/null +++ b/www/static/Makefile @@ -0,0 +1,24 @@ +SUBDIRS = images errors js + +SERVERDIR = /var/www/koji-web/static +FILES = $(wildcard *.css) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + for d in $(SUBDIRS); do make -s -C $$d clean; done + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/$(SERVERDIR) + install -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR) + + for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR)/$(SERVERDIR) \ + -C $$d install; [ $$? = 0 ] || exit 1; done diff --git a/www/static/debug.css b/www/static/debug.css new file mode 100644 index 00000000..84cbaf41 --- /dev/null +++ b/www/static/debug.css @@ -0,0 +1,9 @@ +/* for debugging purposes */ + +@import url(koji.css); + +* { + border: 1px solid black !IMPORTANT; + margin: 1px !IMPORTANT; + padding: 1px !IMPORTANT; +} diff --git a/www/static/errors/Makefile b/www/static/errors/Makefile new file mode 100644 index 00000000..fad54932 --- /dev/null +++ b/www/static/errors/Makefile @@ -0,0 +1,18 @@ +SERVERDIR = /errors +FILES = $(wildcard *.html) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/$(SERVERDIR) + install -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR) diff --git a/www/static/errors/unauthorized.html b/www/static/errors/unauthorized.html new file mode 100644 index 00000000..4915d6a1 --- /dev/null +++ b/www/static/errors/unauthorized.html @@ -0,0 +1,37 @@ + + + + Authentication Failed | Koji + + + + + + + + +
+
+ + + + +
+

Kerberos Authentication Failed

+ The Koji Web UI was unable to verify your Kerberos credentials. Please make sure that you have valid + Kerberos tickets (obtainable via kinit), and that you have + configured your browser correctly. +
+ + + +
+
+ + + diff --git a/www/static/images/1px.gif b/www/static/images/1px.gif new file mode 100644 index 00000000..41ee92f2 Binary files /dev/null and b/www/static/images/1px.gif differ diff --git a/www/static/images/Makefile b/www/static/images/Makefile new file mode 100644 index 00000000..cb9e3596 --- /dev/null +++ b/www/static/images/Makefile @@ -0,0 +1,18 @@ +SERVERDIR = /images +FILES = $(wildcard *.gif *.png) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/$(SERVERDIR) + install -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR) diff --git a/www/static/images/assigned.png b/www/static/images/assigned.png new file mode 100644 index 00000000..9ad3715f Binary files /dev/null and b/www/static/images/assigned.png differ diff --git a/www/static/images/bkgrnd_greydots.png b/www/static/images/bkgrnd_greydots.png new file mode 100644 index 00000000..d5e79e84 Binary files /dev/null and b/www/static/images/bkgrnd_greydots.png differ diff --git a/www/static/images/building.png b/www/static/images/building.png new file mode 100644 index 00000000..1b4710b2 Binary files /dev/null and b/www/static/images/building.png differ diff --git a/www/static/images/canceled.png b/www/static/images/canceled.png new file mode 100644 index 00000000..acf20368 Binary files /dev/null and b/www/static/images/canceled.png differ diff --git a/www/static/images/closed.png b/www/static/images/closed.png new file mode 100644 index 00000000..8e7a9d75 Binary files /dev/null and b/www/static/images/closed.png differ diff --git a/www/static/images/complete.png b/www/static/images/complete.png new file mode 100644 index 00000000..8e7a9d75 Binary files /dev/null and b/www/static/images/complete.png differ diff --git a/www/static/images/deleted.png b/www/static/images/deleted.png new file mode 100644 index 00000000..bbdf2247 Binary files /dev/null and b/www/static/images/deleted.png differ diff --git a/www/static/images/expired.png b/www/static/images/expired.png new file mode 100644 index 00000000..dc7ed603 Binary files /dev/null and b/www/static/images/expired.png differ diff --git a/www/static/images/failed.png b/www/static/images/failed.png new file mode 100644 index 00000000..b4b29bfb Binary files /dev/null and b/www/static/images/failed.png differ diff --git a/www/static/images/free.png b/www/static/images/free.png new file mode 100644 index 00000000..fa147fe4 Binary files /dev/null and b/www/static/images/free.png differ diff --git a/www/static/images/gray-triangle-down.gif b/www/static/images/gray-triangle-down.gif new file mode 100644 index 00000000..e5d3dacd Binary files /dev/null and b/www/static/images/gray-triangle-down.gif differ diff --git a/www/static/images/gray-triangle-up.gif b/www/static/images/gray-triangle-up.gif new file mode 100644 index 00000000..09149a42 Binary files /dev/null and b/www/static/images/gray-triangle-up.gif differ diff --git a/www/static/images/init.png b/www/static/images/init.png new file mode 100644 index 00000000..8177e6bd Binary files /dev/null and b/www/static/images/init.png differ diff --git a/www/static/images/initializing.png b/www/static/images/initializing.png new file mode 100644 index 00000000..8177e6bd Binary files /dev/null and b/www/static/images/initializing.png differ diff --git a/www/static/images/koji.png b/www/static/images/koji.png new file mode 100644 index 00000000..2a7b508c Binary files /dev/null and b/www/static/images/koji.png differ diff --git a/www/static/images/no.png b/www/static/images/no.png new file mode 100644 index 00000000..b4b29bfb Binary files /dev/null and b/www/static/images/no.png differ diff --git a/www/static/images/open.png b/www/static/images/open.png new file mode 100644 index 00000000..c1e6e96d Binary files /dev/null and b/www/static/images/open.png differ diff --git a/www/static/images/ready.png b/www/static/images/ready.png new file mode 100644 index 00000000..8e7a9d75 Binary files /dev/null and b/www/static/images/ready.png differ diff --git a/www/static/images/unknown.png b/www/static/images/unknown.png new file mode 100644 index 00000000..5b83f4f7 Binary files /dev/null and b/www/static/images/unknown.png differ diff --git a/www/static/images/waiting.png b/www/static/images/waiting.png new file mode 100644 index 00000000..fa147fe4 Binary files /dev/null and b/www/static/images/waiting.png differ diff --git a/www/static/images/yes.png b/www/static/images/yes.png new file mode 100644 index 00000000..8e7a9d75 Binary files /dev/null and b/www/static/images/yes.png differ diff --git a/www/static/js/Makefile b/www/static/js/Makefile new file mode 100644 index 00000000..617087e6 --- /dev/null +++ b/www/static/js/Makefile @@ -0,0 +1,21 @@ +SERVERDIR = /js +FILES = $(wildcard *.js) + +_default: + @echo "nothing to make. try make install" + +clean: + rm -f *.o *.so *.pyc *~ + for d in $(SUBDIRS); do make -s -C $$d clean; done + +install: + @if [ "$(DESTDIR)" = "" ]; then \ + echo " "; \ + echo "ERROR: A destdir is required"; \ + exit 1; \ + fi + + mkdir -p $(DESTDIR)/$(SERVERDIR) + install -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR) + + cp -r jsolait $(DESTDIR)/$(SERVERDIR) diff --git a/www/static/js/jsolait/copying.txt b/www/static/js/jsolait/copying.txt new file mode 100644 index 00000000..b1e3f5a2 --- /dev/null +++ b/www/static/js/jsolait/copying.txt @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/www/static/js/jsolait/init.js b/www/static/js/jsolait/init.js new file mode 100644 index 00000000..6b340da2 --- /dev/null +++ b/www/static/js/jsolait/init.js @@ -0,0 +1,739 @@ +/* + Copyright (c) 2003-2005 Jan-Klaas Kollhof + + This file is part of the JavaScript o lait library(jsolait). + + jsolait is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + This software is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this software; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +/** + Evaluates script in a global scope. + @param [0] The code to evaluate. +*/ +globalEval=function(){ + return eval(arguments[0]); +} + + +/** + Creates a new class object which inherits from superClass. + @param className="anonymous" The name of the new class. + If the created class is a public member of a module then + the className is automatically set. + @param superClass=Object The class to inherit from (super class). + @param classScope A function which is executed for class construction. + As 1st parameter it will get the new class' protptype for + overrideing or extending the super class. As 2nd parameter it will get + the super class' wrapper for calling inherited methods. +*/ +Class = function(className, superClass, classScope){ + if(arguments.length == 2){ + classScope = superClass; + if(typeof className != "string"){ + superClass = className; + className = "anonymous"; + }else{ + superClass = Object; + } + }else if(arguments.length == 1){ + classScope = className; + superClass = Object; + className = "anonymous"; + } + + //this is the constructor for the new objects created from the new class. + //if and only if it is NOT used for prototyping/subclassing the init method of the newly created object will be called. + var NewClass = function(calledBy){ + if(calledBy !== Class){ + return this.init.apply(this, arguments); + } + } + //This will create a new prototype object of the new class. + NewClass.createPrototype = function(){ + return new NewClass(Class); + } + //setting class properties for the new class. + NewClass.superClass = superClass; + NewClass.className=className; + NewClass.toString = function(){ + return "[class %s]".format(NewClass.className); + } + if(superClass.createPrototype!=null){//see if the super class can create prototypes. (creating an object without calling init()) + NewClass.prototype = superClass.createPrototype(); + }else{//just create an object of the super class + NewClass.prototype = new superClass(); + } + //reset the constructor for new objects to the actual constructor. + NewClass.prototype.constructor = NewClass; + + if(superClass == Object){//all other objects already have a nice toString method. + NewClass.prototype.toString = function(){ + return "[object %s]".format(this.constructor.className); + } + } + //make sure the new class has an init method if it does not exist yet in the super class + if(NewClass.prototype.init==null){ + NewClass.prototype.init=function(){ + } + } + + //create a supr function to be used to call methods of the super class + var supr = function(self){ + //set up super class functionality so a call to super(this) will return an object with all super class methods + //the methods can be called like super(this).foo and the this object will be bound to that method + var wrapper = {}; + var superProto = superClass.prototype; + for(var n in superProto){ + if(typeof superProto[n] == "function"){ + wrapper[n] = function(){ + var f = arguments.callee; + return superProto[f._name].apply(self, arguments); + } + wrapper[n]._name = n; + } + } + return wrapper; + } + + //execute the scope of the class + classScope(NewClass.prototype, supr); + + return NewClass; +} +Class.toString = function(){ + return "[object Class]"; +} +Class.createPrototype=function(){ + throw "Can't use Class as a super class."; +} + +/** + Creates a new module and registers it. + @param name The name of the module. + @param version The version of a module. + @param moduleScope A function which is executed for module creation. + As 1st parameter it will get the module variable. +*/ +Module = function(name, version, moduleScope){ + var mod = new Object(); + mod.version = version; + mod.name = name; + mod.toString=function(){ + return "[module '%s' version: %s]".format(mod.name, mod.version); + } + + /** + Base class for all module-Exceptions. + */ + mod.Exception=Class("Exception", function(publ){ + /** + Initializes a new Exception. + @param msg The error message for the user. + @param trace=null The error causing this Exception if available. + */ + publ.init=function(msg, trace){ + this.name = this.constructor.className; + this.message = msg; + this.trace = trace; + } + + publ.toString=function(){ + var s = "%s %s\n\n".format(this.name, this.module); + s += this.message; + return s; + } + /** + Returns the complete trace of the exception. + @return The error trace. + */ + publ.toTraceString=function(){ + var s = "%s %s:\n ".format(this.name, this.module ); + s+="%s\n\n".format(this.message); + if(this.trace){ + if(this.trace.toTraceString){ + s+= this.trace.toTraceString(); + }else{ + s+= this.trace; + } + } + return s; + } + ///The name of the Exception(className). + publ.name; + ///The error message. + publ.message; + ///The module the Exception belongs to. + publ.module = mod; + ///The error which caused the Exception or null. + publ.trace; + }) + + //execute the scope of the module + moduleScope(mod); + + //todo: set classNames for anonymous classes. + for(var n in mod){ + if(mod[n].className == "anonymous"){ + mod[n].className = n; + } + } + + if(name != "jsolait"){ + jsolait.registerModule(mod); + } + return mod; +} +Module.toString = function(){ + return "[object Module]"; +} +Module.createPrototype=function(){ + throw "Can't use Module as a super class."; +} + + +//docstart + +/** + The root module for jsolait. + It provides some global functionality for loading modules, + some String enhancements. +*/ +Module("jsolait", "0.1.0", function(mod){ + ///The global jsolait object. + jsolait=mod; + + ///base url for user modules. + mod.baseURL="."; + ///The URL where jsolait is installed. + mod.libURL ="./jsolait"; + ///Collection of all loaded modules.(module cache) + mod.modules = new Array(); + ///The URLs of there the modules, part of jsolait. + mod.moduleURLs = {urllib:"%(libURL)s/lib/urllib.js", + xml:"%(libURL)s/lib/xml.js", + crypto:"%(libURL)s/lib/crypto.js", + codecs:"%(libURL)s/lib/codecs.js", + jsonrpc:"%(libURL)s/lib/jsonrpc.js", + lang:"%(libURL)s/lib/lang.js", + iter:"%(libURL)s/lib/iter.js", + xmlrpc:"%(libURL)s/lib/xmlrpc.js"}; + + mod.init=function(){ + //make jsolait work with WScript + var ws = null; + try{//see if WScript is available + ws = WScript; + }catch(e){ + } + if(ws != null){ + initWS(); + } + } + + ///initializes jsolait for using it with WScript + var initWS = function(){ + print=function(msg){ + WScript.echo(msg); + } + alert=function(msg){ + print(msg); + } + var args = WScript.arguments; + try{ + + //get script to execute + if(args(0) == "--test"){ + var fileURL = args(1); + var doTest = true; + }else{ + var fileURL = args(0); + var doTest = false; + } + var baseURL = fileURL.replace(/\\/g, "/"); + baseURL = baseURL.split("/"); + baseURL = baseURL.slice(0, baseURL.length-1); + //set base for user module loading + mod.baseURL = baseURL.join("/"); + }catch(e){ + throw new mod.Exception("Missing script filename to be run.", e); + } + + //location of jsolait/init.js + urlInit = WScript.ScriptFullName; + + urlInit = urlInit.replace(/\\/g, "/"); + urlInit = urlInit.split("/"); + urlInit = urlInit.slice(0, urlInit.length-1); + mod.libURL = "file://" + urlInit.join("/"); + + try{ + mod.loadScript(fileURL); + }catch(e){ + WScript.stdErr.write("%s(1,1) jsolait runtime error:\n%s\n".format(args(0).replace("file://",""), e.toTraceString())); + } + + if(doTest){ + var modName = fileURL.split("\\"); + modName = modName.pop(); + /*if(mod.libURL.toLowerCase() == mod.baseURL.slice(0, mod.libURL.length).toLowerCase()){ + var modName = fileURL.slice(mod.libURL.length + 5); + */ + modName = modName.slice(0, modName.length -3); + modName.replace(/\//g, "."); + print("importing module: %s".format(modName)); + var m = importModule(modName); + print("%s imported\ntesting...\n".format(m)); + m.test(); + print("\nfinished testing.".format(modName)); + //} + } + + } + + + /** + Imports a module given its name(someModule.someSubModule). + A module's file location is determined by treating each module name as a directory. + Only the last one points to a file. + If the module's URL is not known to jsolait then it will be searched for in jsolait.baseURL which is "." by default. + @param name The name of the module to load. + @return The module object. + */ + mod.importModule = function(name){ + + if (mod.modules[name]){ //module already loaded + return mod.modules[name]; + }else{ + var src,modURL; + //check if jsolait already knows the url of the module(moduleURLs contains urls to modules) + if(mod.moduleURLs[name]){ + modURL = mod.moduleURLs[name].format(mod); + }else{//assume it's a user module and located at baseURL + modURL = "%s/%s.js".format(mod.baseURL, name.split(".").join("/")); + } + try{//to load module from location calculated above + src = getFile(modURL); + }catch(e){//module could not be found at the location. + throw new mod.ModuleImportFailed(name, modURL, e); + } + + try{//interpret the script + globalEval(src); + }catch(e){ + throw new mod.ModuleImportFailed(name, modURL, e); + } + //the module should have registered itself + return mod.modules[name]; + } + } + //make it global + importModule = mod.importModule; + + /** + Loads and interprets a script file. + @param url The url of the script to load. + */ + mod.loadScript=function(url){ + var src = getFile(url); + try{//to interpret the source + globalEval(src); + }catch(e){ + throw new mod.EvalFailed(url, e); + } + } + /** + Registers a new module. + Registered modules can be imported with importModule(...). + @param module The module to register. + */ + mod.registerModule = function(module){ + this.modules[module.name] = module; + } + + /** + Creates an HTTP request object for retreiving files. + @return HTTP request object. + */ + var getHTTP=function() { + var obj; + try{ //to get the mozilla httprequest object + obj = new XMLHttpRequest(); + }catch(e){ + try{ //to get MS HTTP request object + obj=new ActiveXObject("Msxml2.XMLHTTP.4.0"); + }catch(e){ + try{ //to get MS HTTP request object + obj=new ActiveXObject("Msxml2.XMLHTTP"); + }catch(e){ + try{// to get the old MS HTTP request object + obj = new ActiveXObject("microsoft.XMLHTTP"); + }catch(e){ + throw new mod.Exception("Unable to get an HTTP request object."); + } + } + } + } + return obj; + } + /** + Retrieves a file given its URL. + @param url The url to load. + @param headers=[] The headers to use. + @return The content of the file. + */ + var getFile=function(url, headers) { + //if callback is defined then the operation is done async + headers = (headers != null) ? headers : []; + //setup the request + try{ + var xmlhttp= getHTTP(); + xmlhttp.open("GET", url, false); + for(var i=0;i< headers.length;i++){ + xmlhttp.setRequestHeader(headers[i][0], headers[i][1]); + } + xmlhttp.send(""); + }catch(e){ + throw new mod.Exception("Unable to load URL: '%s'.".format(url), e); + } + if(xmlhttp.status == 200 || xmlhttp.status == 0){ + return xmlhttp.responseText; + }else{ + throw new mod.Exception("File not loaded: '%s'.".format(url)); + } + } + + Error.prototype.toTraceString = function(){ + if(this.message){ + return "%s\n".format(this.message); + } + if (this.description){ + return "%s\n".format(this.description); + } + return "unknown error\n"; + } + + + /** + Thrown when a module could not be found. + */ + mod.ModuleImportFailed=Class(mod.Exception, function(publ, supr){ + /** + Initializes a new ModuleImportFailed Exception. + @param name The name of the module. + @param url The url of the module. + @param trace The error cousing this Exception. + */ + publ.init=function(moduleName, url, trace){ + supr(this).init("Failed to import module: '%s' from URL:'%s'".format(moduleName, url), trace); + this.moduleName = moduleName; + this.url = url; + } + ///The name of the module that was not found. + publ.moduleName; + ///The url the module was expected to be found at. + publ.url; + }) + + /** + Thrown when a source could not be loaded due to an interpretation error. + */ + mod.EvalFailed=Class(mod.Exception, function(publ, supr){ + /** + Initializes a new EvalFailed exception. + @param url The url of the module. + @param trace The exception that was thrown while interpreting the module's source code. + */ + publ.init=function(url, trace){ + supr(this).init("File '%s' Eval of script failed.".format(url), trace); + this.url = url; + } + ///The url the module was expected to be found at. + publ.url; + }) + + /** + Displays an exception and it's trace. + This works better than alert(e) because traces are taken into account. + @param exception The exception to display. + */ + mod.reportException=function(exception){ + if(exception.toTraceString){ + var s= exception.toTraceString(); + }else{ + var s = exception.toString(); + } + var ws = null; + try{//see if WScript is available + ws = WScript; + }catch(e){ + } + if(ws != null){ + WScript.stderr.write(s); + }else{ + alert(s); + } + } + ///The global exception report method; + reportException = mod.reportException; +}) + +//stringmod +/** + String formatting module. + It allows python like string formatting ("some text %s" % "something"). + Also similar to sprintf from C. +*/ +Module("stringformat", "0.1.0", function(mod){ + /** + Creates a format specifier object. + */ + var FormatSpecifier=function(s){ + var s = s.match(/%(\(\w+\)){0,1}([ 0-]){0,1}(\+){0,1}(\d+){0,1}(\.\d+){0,1}(.)/); + if(s[1]){ + this.key=s[1].slice(1,-1); + }else{ + this.key = null; + } + this.paddingFlag = s[2]; + if(this.paddingFlag==""){ + this.paddingFlag =" " + } + this.signed=(s[3] == "+"); + this.minLength = parseInt(s[4]); + if(isNaN(this.minLength)){ + this.minLength=0; + } + if(s[5]){ + this.percision = parseInt(s[5].slice(1,s[5].length)); + }else{ + this.percision=-1; + } + this.type = s[6]; + } + + /** + Formats a string replacing formatting specifiers with values provided as arguments + which are formatted according to the specifier. + This is an implementation of python's % operator for strings and is similar to sprintf from C. + Usage: + resultString = formatString.format(value1, v2, ...); + + Each formatString can contain any number of formatting specifiers which are + replaced with the formated values. + + specifier([...]-items are optional): + "%(key)[flag][sign][min][percision]typeOfValue" + + (key) If specified the 1st argument is treated as an object/associative array and the formating values + are retrieved from that object using the key. + + flag: + 0 Use 0s for padding. + - Left justify result, padding it with spaces. + Use spaces for padding. + sign: + + Numeric values will contain a +|- infront of the number. + min: + l The string will be padded with the padding character until it has a minimum length of l. + percision: + .x Where x is the percision for floating point numbers and the lenght for 0 padding for integers. + typeOfValue: + d Signed integer decimal. + i Signed integer decimal. + b Unsigned binary. //This does not exist in python! + o Unsigned octal. + u Unsigned decimal. + x Unsigned hexidecimal (lowercase). + X Unsigned hexidecimal (uppercase). + e Floating point exponential format (lowercase). + E Floating point exponential format (uppercase). + f Floating point decimal format. + F Floating point decimal format. + c Single character (accepts byte or single character string). + s String (converts any object using object.toString()). + + Examples: + "%02d".format(8) == "08" + "%05.2f".format(1.234) == "01.23" + "123 in binary is: %08b".format(123) == "123 in binary is: 01111011" + + @param * Each parameter is treated as a formating value. + @return The formated String. + */ + String.prototype.format=function(){ + var sf = this.match(/(%(\(\w+\)){0,1}[ 0-]{0,1}(\+){0,1}(\d+){0,1}(\.\d+){0,1}[dibouxXeEfFgGcrs%])|([^%]+)/g); + if(sf){ + if(sf.join("") != this){ + throw new mod.Exception("Unsupported formating string."); + } + }else{ + throw new mod.Exception("Unsupported formating string."); + } + var rslt =""; + var s; + var obj; + var cnt=0; + var frmt; + var sign=""; + + for(var i=0;i=arguments.length){ + throw new mod.Exception("Not enough arguments for format string"); + }else{ + obj=arguments[cnt]; + cnt++; + } + } + + if(frmt.type == "s"){//String + if (obj == null){ + obj = "null"; + } + s=obj.toString().pad(frmt.paddingFlag, frmt.minLength); + + }else if(frmt.type == "c"){//Character + if(frmt.paddingFlag == "0"){ + frmt.paddingFlag=" ";//padding only spaces + } + if(typeof obj == "number"){//get the character code + s = String.fromCharCode(obj).pad(frmt.paddingFlag , frmt.minLength) ; + }else if(typeof obj == "string"){ + if(obj.length == 1){//make sure it's a single character + s=obj.pad(frmt.paddingFlag, frmt.minLength); + }else{ + throw new mod.Exception("Character of length 1 required."); + } + }else{ + throw new mod.Exception("Character or Byte required."); + } + }else if(typeof obj == "number"){ + //get sign of the number + if(obj < 0){ + obj = -obj; + sign = "-"; //negative signs are always needed + }else if(frmt.signed){ + sign = "+"; // if sign is always wanted add it + }else{ + sign = ""; + } + //do percision padding and number conversions + switch(frmt.type){ + case "f": //floats + case "F": + if(frmt.percision > -1){ + s = obj.toFixed(frmt.percision).toString(); + }else{ + s = obj.toString(); + } + break; + case "E"://exponential + case "e": + if(frmt.percision > -1){ + s = obj.toExponential(frmt.percision); + }else{ + s = obj.toExponential(); + } + s = s.replace("e", frmt.type); + break; + case "b"://binary + s = obj.toString(2); + s = s.pad("0", frmt.percision); + break; + case "o"://octal + s = obj.toString(8); + s = s.pad("0", frmt.percision); + break; + case "x"://hexadecimal + s = obj.toString(16).toLowerCase(); + s = s.pad("0", frmt.percision); + break; + case "X"://hexadecimal + s = obj.toString(16).toUpperCase(); + s = s.pad("0", frmt.percision); + break; + default://integers + s = parseInt(obj).toString(); + s = s.pad("0", frmt.percision); + break; + } + if(frmt.paddingFlag == "0"){//do 0-padding + //make sure that the length of the possible sign is not ignored + s=s.pad("0", frmt.minLength - sign.length); + } + s=sign + s;//add sign + s=s.pad(frmt.paddingFlag, frmt.minLength);//do padding and justifiing + }else{ + throw new mod.Exception("Number required."); + } + } + rslt += s; + } + return rslt; + } + + /** + Padds a String with a character to have a minimum length. + + @param flag "-": to padd with " " and left justify the string. + Other: the character to use for padding. + @param len The minimum length of the resulting string. + */ + String.prototype.pad = function(flag, len){ + var s = ""; + if(flag == "-"){ + var c = " "; + }else{ + var c = flag; + } + for(var i=0;i> 16, (nBits & 0xff00) >> 8, nBits & 0xff); + } + //make sure padding chars are left out. + sDecoded[sDecoded.length-1] = sDecoded[sDecoded.length-1].substring(0, 3 - ((this.charCodeAt(i - 2) == 61) ? 2 : (this.charCodeAt(i - 1) == 61 ? 1 : 0))); + return sDecoded.join(""); + } + }else{ + throw new mod.Exception("String length must be divisible by 4."); + } + } + + /** + Encodes a string using Base64. + */ + String.prototype.encode_base64=function(){ + if(typeof(btoa) != "undefined"){//try using mozillas builtin codec + return btoa(this); + }else{ + var base64 = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z', + 'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z', + '0','1','2','3','4','5','6','7','8','9','+','/']; + var sbin; + var pad=0; + var s="" + this; + if((s.length % 3) == 1){ + s+=String.fromCharCode(0); + s+=String.fromCharCode(0); + pad=2; + }else if((s.length % 3) == 2){ + s+=String.fromCharCode(0); + pad=1; + } + //create a result buffer, this is much faster than having strings concatinated. + var rslt=new Array(s.length / 3); + var ri=0; + for(var i=0;i> 18) & 0x3f] + base64[(sbin >> 12) & 0x3f] + base64[(sbin >>6) & 0x3f] + base64[sbin & 0x3f]); + ri++; + } + if(pad>0){ + rslt[rslt.length-1] = rslt[rslt.length-1].substr(0, 4-pad) + ((pad==2) ? "==" : (pad==1) ? "=" : ""); + } + return rslt.join(""); + } + } + + /** + Decodes a URI using decodeURI. + */ + String.prototype.decode_uri=function(){ + return decodeURI(this); + } + + /** + Encodes a URI using encodeURI. + */ + String.prototype.encode_uri=function(){ + return encodeURI(this); + } +}) diff --git a/www/static/js/jsolait/lib/crypto.js b/www/static/js/jsolait/lib/crypto.js new file mode 100644 index 00000000..99c89079 --- /dev/null +++ b/www/static/js/jsolait/lib/crypto.js @@ -0,0 +1,151 @@ +/* + Copyright (c) 2003 Jan-Klaas Kollhof + + This file is part of the JavaScript o lait library(jsolait). + + jsolait is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + This software is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this software; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +/** + Cryptography module. + Provides String encryption/decryption and hashing. + @creator Jan-Klaas Kollhof + @created 2003-12-07 +*/ +Module("crypto", "0.1.2", function(mod){ + /** + Returns all all available encrypters. + @return An array of encrypters names. + */ + mod.listEncrypters=function(){ + var c=[]; + for(var attr in String.prototype){ + if(attr.slice(0, 8) == "encrypt_"){ + c.push(attr.slice(8)); + } + } + return c; + } + /** + Returns all all available decrypters. + @return An array of decrypters names. + */ + mod.listDecrypters=function(){ + var c=[]; + for(var attr in String.prototype){ + if(attr.slice(0, 8) == "decrypt_"){ + c.push(attr.slice(8)); + } + } + return c; + } + + /** + Encrypts a string. + Parameters but the crypdec parameter are forwardet to the crypdec. + @param codec The codec to use. + */ + String.prototype.encrypt=function(crydec){ + var n = "encrypt_" + crydec; + if(String.prototype[n]){ + var args=[]; + for(var i=1;i this.end){ + throw new mod.StopIteration(); + }else{ + this.current = this.current + this.step; + return this.current; + } + } + }) + + Range = mod.Range; + + /** + Iterator for Arrays. + */ + mod.ArrayItereator=Class(mod.Iterator, function(publ, supr){ + publ.init=function(array){ + this.array = array; + this.index = -1; + } + publ.next = function(){ + this.index += 1; + if(this.index >= this.array.length){ + throw new mod.StopIteration(); + } + return this.array[this.index]; + } + }) + + Array.prototype.iterator = function(){ + return new mod.ArrayItereator(this); + } + + /** + Interface of a IterationCallback. + @param item The item returned by the iterator for the current step. + @param iteration The Iteration object handling the iteration. + */ + mod.IterationCallback = function(item, iteration){}; + + /** + Iteration class for handling iteration steps and callbacks. + */ + mod.Iteration = Class(function(publ, supr){ + /** + Initializes an Iteration object. + @param iteratable An itaratable object. + @param callback An IterationCallback object. + */ + publ.init=function(iteratable, callback){ + this.doStop = false; + this.iterator = iteratable.iterator(); + this.callback = callback; + } + + ///Resumes a paused/stoped iteration. + publ.resume = function(){ + this.doStop = false; + while(!this.doStop){ + this.handleStep(); + } + } + ///Pauses an iteration. + publ.pause=function(){ + this.doStop = true; + } + ///Stops an iteration + publ.stop = function(){ + this.pause(); + } + ///Starts/resumes an iteration + publ.start = function(){ + this.resume(); + } + + ///Handles a single iteration step calling the callback with the next item or terminating. + publ.handleStep = function(){ + try{//to get the next item + var item=this.iterator.next(); + }catch(e){ + if(e.constructor != mod.StopIteration){ + throw e; //was a different error in the iterator, so throw it + }else{ + this.stop(); //this is the end of the iteration + return; + } + } + //let the callback handle the item + this.callback(item, this); + } + }) + + /** + Class for handling asynchronous iterations. + */ + mod.AsyncIteration = Class(mod.Iteration, function(publ, supr){ + /** + Initializes an AsyncIteration object. + @param iteratable An itaratable object. + @param interval The time in ms betwen each step. + @param callback An IterationCallback object. + */ + publ.init=function(iteratable, interval, callback){ + if(arguments.length == 2){ + callback = interval; + interval = 0; + } + this.iterator = iteratable.iterator(); + this.interval = interval; + this.callback = callback; + this.isRunning = false; + } + + publ.pause=function(){ + if(this.isRunning){ + this.isRunning = false; + clearTimeout(this.timeout); + delete fora.iterations[this.id]; + } + } + + publ.resume = function(){ + if(this.isRunning == false){ + this.isRunning = true; + var id=0;//find unused id + while(fora.iterations[id]){ + this.id++; + } + this.id = "" + id; + fora.iterations[this.id] = this; + //let the iteration be handled using a timer + this.timeout = setTimeout("fora.handleAsyncStep('" + this.id + "')", this.interval); + } + } + + publ.handleAsyncStep = function(){ + if(this.isRunning){ + this.handleStep(); + this.timeout = setTimeout("fora.handleAsyncStep('" + this.id + "')", this.interval); + } + } + }) + + /** + Asynchronous iteration function. + This function returns immidiately and executes each iteration step asynchronously. + @param iteratable An iteratable object. + @param interval=0 The interval time in ms for each iteration step. + @param cb The IterationCallback which is called for each itereation step. + @return An AsyncIteration object. + */ + fora = function(iteratable, interval, cb){ + if(arguments.length==2){ + var it = new mod.AsyncIteration(iteratable, interval); + }else{ + var it = new mod.AsyncIteration(iteratable, interval, cb); + } + it.start(); + return it; + } + + fora.handleAsyncStep = function(id){ + if(fora.iterations[id]){ + fora.iterations[id].handleAsyncStep(); + } + } + ///Helper object containing all async. iteration objects. + fora.iterations = new Object(); + + /** + Iterates over an Iteratable object and calls a callback for each item. + @param iteratable The iteratable object. + @param cb An IterationCallback object to call for each step. + */ + forin = function(iteratable, cb){ + var it = new mod.Iteration(iteratable, cb) + it.start(); + } + + mod.test=function(){ + forin(new mod.Range(10), function(item,i){ + print(item); + + }) + forin([1,2,3,4,5,6], function(item,i){ + print(item); + print("---") + }) + } +}) + diff --git a/www/static/js/jsolait/lib/jsonrpc.js b/www/static/js/jsolait/lib/jsonrpc.js new file mode 100644 index 00000000..f5741c48 --- /dev/null +++ b/www/static/js/jsolait/lib/jsonrpc.js @@ -0,0 +1,407 @@ +/* + Copyright (c) 2005 Jan-Klaas Kollhof + + This file is part of the JavaScript o lait library(jsolait). + + jsolait is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + This software is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this software; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + + +/** + Provides a lightweight JSON-RPC imlementation for JSON-RPC over HTTP. + @creator Jan-Klaas Kollhof + @created 2005-02-25 +*/ +Module("jsonrpc","0.4.2", function(mod){ + var urllib = importModule("urllib"); + /** + Thrown if a server did not respond with response status 200 (OK). + */ + mod.InvalidServerResponse = Class(mod.Exception, function(publ, supr){ + /** + Initializes the Exception. + @param status The status returned by the server. + */ + publ.init= function(status){ + supr(this).init("The server did not respond with a status 200 (OK) but with: " + status); + this.status = status; + } + ///The status returned by the server. + publ.status; + }) + + /** + Thrown if an JSON-RPC response is not well formed. + */ + mod.MalformedJSONRpc = Class(mod.Exception, function(publ, supr){ + /** + Initializes the Exception. + @param msg The error message of the user. + @param json The json source. + @param trace=null The error causing this Exception + */ + publ.init= function(msg, s, trace){ + supr(this).init(msg,trace); + this.source = s; + } + ///The json source which was mal formed. + publ.source; + }) + /** + Thrown if an JSON-RPC error is returned. + */ + mod.JSONRPCError = Class(mod.Exception, function(publ, supr){ + /** + Initializes the Exception. + @param err The error object. + @param trace=null The error causing this Exception + */ + publ.init= function(err, trace){ + supr(this).init(err,trace); + } + }) + + + /** + Marshalls an object to JSON.(Converts an object into JSON conforming source.) + It just calls the toJSON function of the objcect. + So, to customize serialization of objects one just needs to specify/override the toXmlRpc method + which should return an xml string conforming with XML-RPC spec. + @param obj The object to marshall + @return An xml representation of the object. + */ + mod.marshall = function(obj){ + if(obj == null){ + return "null"; + }else if(obj.toJSON){ + return obj.toJSON(); + }else{ + var v=[]; + for(var attr in obj){ + if(typeof obj[attr] != "function"){ + v.push('"' + attr + '": ' + mod.marshall(obj[attr])); + } + } + return "{" + v.join(", ") + "}"; + } + } + + /** + Unmarshalls a JSON source to a JavaScript object. + @param source The source to unmarshall. + @return The JavaScript object created. + */ + mod.unmarshall = function(source){ + try { + var obj; + eval("obj=" + source); + return obj; + }catch(e){ + throw new mod.MalformedJSONRpc("The server's response could not be parsed.", source, e); + } + } + /** + Class for creating JSON-RPC methods. + Calling the created method will result in a JSON-RPC call to the service. + The return value of this call will be the return value of the RPC call. + RPC-Errors will be raised as Exceptions. + + Asynchronous operation: + If the last parameter passed to the method is an JSONRPCAsyncCallback object, + then the remote method will be called asynchronously. + The results and errors are passed to the callback. + */ + mod.JSONRPCMethod =Class(function(publ){ + + var postData = function(url, user, pass, data, callback){ + if(callback == null){ + var rslt = urllib.postURL(url, user, pass, data, [["Content-Type", "text/plain"]]); + return rslt; + }else{ + urllib.postURL(url, user, pass, data, [["Content-Type", "text/xml"]], callback); + } + } + + var handleResponse=function(resp){ + var status=null; + try{//see if the server responded with a response code 200 OK. + status = resp.status; + }catch(e){ + } + if(status == 200){ + var respTxt = ""; + try{ + respTxt=resp.responseText; + }catch(e){ + } + if(respTxt == null || respTxt == ""){ + throw new mod.MalformedJSONRpc("The server responded with an empty document.", ""); + }else{ + var rslt = mod.unmarshall(respTxt); + if(rslt.error != null){ + throw new mod.JSONRPCError(rslt.error); + }else{ + return rslt.result; + } + } + }else{ + throw new mod.InvalidServerResponse(status); + } + } + + var jsonRequest = function(id, methodName, args){ + var p = [mod.marshall(id), mod.marshall(methodName), mod.marshall(args)]; + return '{"id":' + p[0] + ', "method":' + p[1] + ', "params":' + p[2] + "}"; + } + /** + Initializes the JSON-RPC method. + @param url The URL of the service providing the method. + @param methodName The name of the method to invoke. + @param user=null The user name to use for HTTP authentication. + @param pass=null The password to use for HTTP authentication. + */ + publ.init = function(url, methodName, user, pass){ + + //this is pretty much a hack. + //we create a function which mimics this class and + //return it instead of realy instanciating an object. + var fn=function(){ + var args=new Array(); + for(var i=0;i": case "!": + case "|": case "&": + switch(s2){ + case "==": case "!=": case "<>": case "<=": case ">=":case "||": case "&&": + rslt = new mod.Token(mod.tokens.OP, s2, this._pos); + break; + default: + rslt = new mod.Token(mod.tokens.OP, s1, this._pos); + } + break; + case "/": + if(s2 == "//" || s3 =="///"){ + s1 = extractSLComment(this._working); + rslt = new mod.Token(s1.charAt(2) != "/" ? mod.tokens.COMMENT:mod.tokens.DOCCOMMENT, s1, this._pos); + }else if(s2 == "/*" || s3 =="/**"){ + try{ + s1 = extractMLComment(this._working); + rslt = new mod.Token(s3 !="/**" ? mod.tokens.COMMENT: mod.tokens.DOCCOMMENT, s1, this._pos); + }catch(e){ + rslt= new mod.Token(mod.tokens.ERR, s3 != "/**" ? s2 : s3, this._pos, e); + } + }else{ + try{ + s1 = extractRegExp(this._working); + rslt = new mod.Token(mod.tokens.REGEXP, s1, this._pos); + }catch(e){ + rslt = new mod.Token(mod.tokens.OP, s1, this._pos, e); + } + } + break; + case " ": + var i = 0; + var s=""; + while(this._working.charAt(i) == " "){ + s+=" "; + i++; + } + rslt = new mod.Token(mod.tokens.WSP, s, this._pos); + break; + default: + s1=this._working.match(/\d+\.\d+|\d+|\w+/)[0]; + if(/^\d|\d\.\d/.test(s1)){//number + rslt = new mod.Token(mod.tokens.NUM, s1, this._pos); + }else{//name + rslt =new mod.Token(mod.tokens.NAME, s1, this._pos); + } + } + + this._working=this._working.slice(rslt.value.length); + this._pos += rslt.value.length; + return rslt; + } + + var searchQoute = function(s, q){ + if(q=="'"){ + return s.search(/[\\']/); + }else{ + return s.search(/[\\"]/); + } + } + + var extractQString=function(s){ + if(s.charAt(0) == "'"){ + var q="'"; + }else{ + var q='"'; + } + s=s.slice(1); + var rs=""; + var p= searchQoute(s, q); + while(p >= 0){ + if(p >=0){ + if(s.charAt(p) == q){ + rs += s.slice(0, p+1); + s = s.slice(p+1); + return q + rs; + }else{ + rs+=s.slice(0, p+2); + s = s.slice(p+2); + } + } + p = searchQoute(s, q); + } + throw new mod.Exception("End of String expected."); + } + + var extractSLComment=function(s){ + var p = s.search(/\n/); + if(p>=0){ + return s.slice(0,p+1); + }else{ + return s; + } + } + + var extractMLComment=function(s){ + var p = s.search(/\*\//); + if(p>=0){ + return s.slice(0,p+2); + }else{ + throw new mod.Exception("End of comment expected."); + } + } + + var extractRegExp=function(s){ + var p=0; + for(var i=0;i\n"; + } + break; + case PROCESSING_INSTRUCTION_NODE: + s+=""; + break; + case TEXT_NODE: + s+=node.nodeValue; + break; + case CDATA_SECTION_NODE: + s+="<" +"![CDATA[" + node.nodeValue + "]" + "]>"; + break; + case COMMENT_NODE: + s+=""; + break; + case ENTITY_REFERENCE_NODE: + case DOCUMENT_FRAGMENT_NODE: + case DOCUMENT_TYPE_NODE: + case NOTATION_NODE: + case ENTITY_NODE: + throw new mod.Exception("Nodetype(%s) not supported.".format(node.nodeType)); + break; + } + return s; + } +}) diff --git a/www/static/js/jsolait/lib/xmlrpc.js b/www/static/js/jsolait/lib/xmlrpc.js new file mode 100644 index 00000000..d0f7e7a9 --- /dev/null +++ b/www/static/js/jsolait/lib/xmlrpc.js @@ -0,0 +1,833 @@ +/* + Copyright (c) 2003-2004 Jan-Klaas Kollhof + + This file is part of the JavaScript o lait library(jsolait). + + jsolait is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + This software is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this software; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + + +/** + Provides an XML-RPC imlementation. + It is similar to python's xmlrpclib module. + @creator Jan-Klaas Kollhof + @created 2003-07-10 +*/ +Module("xmlrpc","1.3.3", function(mod){ + var xmlext = importModule("xml"); + var urllib = importModule("urllib"); + /** + Thrown if a server did not respond with response status 200 (OK). + */ + mod.InvalidServerResponse = Class("InvalidServerResponse", mod.Exception, function(publ, supr){ + /** + Initializes the Exception. + @param status The status returned by the server. + */ + publ.init= function(status){ + supr(this).init("The server did not respond with a status 200 (OK) but with: " + status); + this.status = status; + } + ///The status returned by the server. + publ.status; + }) + + /** + Thrown if an XML-RPC response is not well formed. + */ + mod.MalformedXmlRpc = Class("MalformedXmlRpc", mod.Exception, function(publ, supr){ + /** + Initializes the Exception. + @param msg The error message of the user. + @param xml The xml document's source. + @param trace=null The error causing this Exception + */ + publ.init= function(msg, xml, trace){ + supr(this).init(msg,trace); + this.xml = xml; + } + ///The xml source which was mal formed. + publ.xml; + }) + /** + Thrown if the RPC response is a Fault. + */ + mod.Fault = Class("Fault", mod.Exception, function(publ, supr){ + /** + Initializes the Exception. + @param faultCode The fault code returned by the rpc call. + @param faultString The fault string returned by the rpc call. + */ + publ.init= function(faultCode, faultString){ + supr(this).init("XML-RPC Fault: " + faultCode + "\n\n" + faultString); + this.faultCode = faultCode; + this.faultString = faultString; + } + ///The fault code returned from the rpc call. + publ.faultCode; + ///The fault string returned from the rpc call. + publ.faultString; + }) + + /** + Marshalls an object to XML-RPC.(Converts an object into XML-RPC conforming xml.) + It just calls the toXmlRpc function of the objcect. + So, to customize serialization of objects one just needs to specify/override the toXmlRpc method + which should return an xml string conforming with XML-RPC spec. + @param obj The object to marshall + @return An xml representation of the object. + */ + mod.marshall = function(obj){ + if(obj.toXmlRpc){ + return obj.toXmlRpc(); + }else{ + var s = ""; + for(var attr in obj){ + if(typeof obj[attr] != "function"){ + s += "" + attr + "" + mod.marshall(obj[attr]) + ""; + } + } + s += ""; + return s; + } + } + + /** + Unmarshalls an XML document to a JavaScript object. (Converts xml to JavaScript object.) + It parses the xml source and creates a JavaScript object. + @param xml The xml document source to unmarshall. + @return The JavaScript object created from the XML. + */ + mod.unmarshall = function(xml){ + try {//try to parse xml ... this will throw an Exception if failed + var doc = xmlext.parseXML(xml); + }catch(e){ + throw new mod.MalformedXmlRpc("The server's response could not be parsed.", xml, e); + } + var rslt = mod.unmarshallDoc(doc, xml); + doc=null; + return rslt; + } + + /** + Unmarshalls an XML document to a JavaScript object like unmarshall but expects a DOM document as parameter. + It parses the xml source and creates a JavaScript object. + @param doc The xml document(DOM compatible) to unmarshall. + @return The JavaScript object created from the XML. + */ + mod.unmarshallDoc = function(doc, xml){ + try{ + var node = doc.documentElement; + if(node==null){//just in case parse xml didn't throw an Exception but returned nothing usefull. + throw new mod.MalformedXmlRpc("No documentElement found.", xml); + } + switch(node.tagName){ + case "methodResponse": + return parseMethodResponse(node); + case "methodCall": + return parseMethodCall(node); + default://nothing usefull returned by parseXML. + throw new mod.MalformedXmlRpc("'methodCall' or 'methodResponse' element expected.\nFound: '" + node.tagName + "'", xml); + } + }catch(e){ + if(e instanceof mod.Fault){//just rethrow the fault. + throw e; + }else { + throw new mod.MalformedXmlRpc("Unmarshalling of XML failed.", xml, e); + } + } + } + + /** + Parses a methodeResponse element. + @param node The methodResponse element. + @return The return value of the XML-RPC. + */ + var parseMethodResponse=function(node){ + try{ + for(var i=0;i'; + if (args.length>0){ + data += ""; + for(var i=0;i'; + } + data += ''; + } + data += ''; + return data; + } + /** + Initializes the XML-RPC method. + @param url The URL of the service providing the method. + @param methodName The name of the method to invoke. + @param user=null The user name to use for HTTP authentication. + @param pass=null The password to use for HTTP authentication. + */ + publ.init = function(url, methodName, user, pass){ + + //this is pretty much a hack. + //we create a function which mimics this class and return it instead of really instanciating an object. + var fn=function(){ + //sync or async call + if(typeof arguments[arguments.length-1] != "function"){ + var data=getXML(fn.methodName,arguments); + var resp = postData(fn.url, fn.user, fn.password, data); + + return handleResponse(resp); + }else{ + var args=new Array(); + for(var i=0;i 0){ + var tryIntrospection=false; + }else{ + var tryIntrospection=true; + } + }else{ + pass=user; + user=methodNames; + methodNames=[]; + var tryIntrospection=true; + } + this._url = url; + this._user = user; + this._password = pass; + this._addMethodNames(methodNames); + if(tryIntrospection){ + try{//it's ok if it fails. + this._introspect(); + }catch(e){ + } + } + } + + /** + Adds new XMLRPCMethods to the proxy server which can then be invoked. + @param methodNames Array of names of methods that can be called on the server. + */ + publ._addMethodNames = function(methodNames){ + for(var i=0;i" + this.replace(/&/g, "&").replace(/"; + } + /** + XML-RPC representation of a number. + @return A string containing the Number's representation in XML. + */ + Number.prototype.toXmlRpc = function(){ + if(this == parseInt(this)){ + return "" + this + ""; + }else if(this == parseFloat(this)){ + return "" + this + ""; + }else{ + return false.toXmlRpc(); + } + } + /** + XML-RPC representation of a boolean. + @return A string containing the Boolean's representation in XML. + */ + Boolean.prototype.toXmlRpc = function(){ + if(this == true) { + return "1"; + }else{ + return "0"; + } + } + /** + XML-RPC representation of a date(iso 8601). + @return A string containing the Date's representation in XML. + */ + Date.prototype.toXmlRpc = function(){ + var padd=function(s, p){ + s=p+s + return s.substring(s.length - p.length) + } + var y = padd(this.getUTCFullYear(), "0000"); + var m = padd(this.getUTCMonth() + 1, "00"); + var d = padd(this.getUTCDate(), "00"); + var h = padd(this.getUTCHours(), "00"); + var min = padd(this.getUTCMinutes(), "00"); + var s = padd(this.getUTCSeconds(), "00"); + + var isodate = y + m + d + "T" + h + ":" + min + ":" + s + + return "" + isodate + ""; + } + /** + XML-RPC representation of an array. + Each entry in the array is a value in the XML-RPC. + @return A string containing the Array's representation in XML. + */ + Array.prototype.toXmlRpc = function(){ + var retstr = ""; + for(var i=0;i"; + } + return retstr + ""; + } + + + mod.test = function(){ + print("creating ServiceProxy object using introspection for method construction...\n"); + var s = new mod.ServiceProxy("http://localhost/testx.py"); + print("%s created\n".format(s)); + print("creating and marshalling test data:\n"); + var o = [1.234, 5, {a:"Hello & < ", b:new Date()}]; + print(mod.marshall(o)); + print("\ncalling echo() on remote service...\n"); + var r = s.echo(o); + print("service returned data(marshalled again):\n") + print(mod.marshall(r)); + } +}) + + + diff --git a/www/static/js/jsolait/missingmixin.js b/www/static/js/jsolait/missingmixin.js new file mode 100644 index 00000000..148d9404 --- /dev/null +++ b/www/static/js/jsolait/missingmixin.js @@ -0,0 +1,151 @@ +/* + Copyright (c) 2003 Jan-Klaas Kollhof + + This file is part of the JavaScript o lait library(jsolait). + + jsolait is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + This software is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this software; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +if(Function.prototype.apply == null){ + Function.prototype.apply = function(thisObj, args){ + var a =[]; + for(var i=0;i= 0) ? n+Math.pow(10, -(d+1)) : n-Math.pow(10, -(d+1)); + n += ''; + return d == 0 ? n.substring(0, n.indexOf('.')) : n.substring(0, n.indexOf('.') + d + 1); + } +} + +if(Number.prototype.toExponential == null){ + Number.prototype.toExponential = function(d){ + var n = this; + var e = 0; + if (n != 0){ + e = Math.floor(Math.log(Math.abs(n)) / Math.LN10); + } + n /= Math.pow(10, e); + if (isFinite(d)){ + if (Math.abs(n) + 5*Math.pow(10, -(d+1)) >= 10.0){ + n /= 10; + e += 1; + } + n = n.toFixed(d); + } + n += "e"; + if (e >= 0){ + n += "+"; + } + n += e; + return n; + } +} + diff --git a/www/static/js/watchlogs.js b/www/static/js/watchlogs.js new file mode 100644 index 00000000..0c2e2b4d --- /dev/null +++ b/www/static/js/watchlogs.js @@ -0,0 +1,165 @@ +// Direct jsolait to its install location +jsolait.libURL ="/koji-static/js/jsolait"; + +// import modules +var codecs = importModule("codecs"); +var xmlrpc = importModule("xmlrpc"); + +// Config variables +var SERVER_URL = 'http://' + window.location.hostname + '/kojihub'; +var ELEM_ID = 'logs'; // id of the html element where the logs will be +var MAX_ERRS = 5; // errors before we just stop +// if you are testing this script from somewhere that isn't SERVER_URL +// set TESTING true +var TESTING = false; + +// General globals +var server = null; +var errCount = 0; + +// Globals for watch_logs_rec +var offsets = {}; +var lastlog = ""; + +function parse_tasklist() { + var tasklist = []; + var queryStr = unescape(window.location.search.substring(1)); + var vars = queryStr.split('&'); + for (var i=0; i= origHeight) { + // Only scroll the window if we were already at the bottom + // of the document + window.scroll(window.pageXOffset, document.height); + } +} + +function watch_logs(tasklist) { + for (var i=0; i 0) { + docHeight = document.height; + currlog = task_id + ":" + log; + if (currlog != lastlog) { + logElem.appendChild(document.createTextNode("\n==> " + currlog + " <==\n")); + lastlog = currlog; + } + logElem.appendChild(document.createTextNode(content)); + maybeScroll(docHeight); + } + } while (content.length > 0); + } + } + } + + if (tasklist.length == 0) { + docHeight = document.height; + logElem.appendChild(document.createTextNode("\n==> Tasks have finished <==\n")); + maybeScroll(docHeight); + } else if (errCount < MAX_ERRS) { + setTimeout(watch_logs_rec, 1000, tasklist); + } +} + +function popup_error(e, msg) { + var err; + if (e.toTraceString) { + err = e.toTraceString(); + } else { + err = e.message; + } + alert(msg + "\n" + err); +} + +function main() { + var tasklist = parse_tasklist(); + connect(); + try { + watch_logs(tasklist); + } catch(e) { + popup_error(e, "Error while watching logs:"); + errCount++; + } +} diff --git a/www/static/koji.css b/www/static/koji.css new file mode 100644 index 00000000..3f65f63b --- /dev/null +++ b/www/static/koji.css @@ -0,0 +1,408 @@ +/* + Koji styling + Copyright (c) 2007 Red Hat, Inc. + + Authors: + Mike Bonnet +*/ + +html { + min-width: 800px; +} + +body { + margin: 0px; + padding: 0px; + font-size: small; + font-family: "Lucida Grande", "Luxi Sans", "Bitstream Vera Sans", helvetica, verdana, arial, sans-serif; + color: #333; + background: #fff url(images/bkgrnd_greydots.png) repeat; +} + +a { + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +#wrap { + min-width: 750px; + margin: 0 25px 10px 25px; + padding: 0; + text-align: left; + background: #fff; +} + +#innerwrap { + margin: 0 15px; + padding: 8px 0; +} + +#header { + width: 100%; + height: 40px; + clear: left; +} + +#rhLogo { + float: right; + margin-right: 15px; + margin-top: 6px; +} + +#kojiLogo { + float: left; +} + +div#content { + margin: 0px 20px 0px 20px; + float: left; +} + +p#footer { + padding-top: 40px; + margin-left: 15px; + line-height: 1.5em; + color: #999; + font-size: xx-small; + clear: both; +} + +p#footer a { + text-decoration: none; +} + +.hide { + display: none; +} + +#mainNav { + width: 100%; + background-color: #009; + margin-bottom: 5px; + font-weight: bold; + font-family: verdana, helvetica, arial, sans-serif; + height: 2.1em; +} + +#mainNav ul { + padding: 0px; + margin: 0px; + list-style-type: none; +} + +#mainNav ul li { + background-color: #006; + color: #fff; + display: block; + float: left; + padding: 0px; + margin: 0px; + border-style: solid; + border-width: 2px; + border-color: #009; +} + +#mainNav ul li a { + display: block; + color: #fff; + text-decoration: none; + padding: 0.4em 1.5em; + font-size: 0.77em; + height: 1.5em; +} + +#mainNav ul li:hover { + border-color: #ddd; +} + +body#summary #mainNav li#summary a, +body#tasks #mainNav li#tasks a, +body#tags #mainNav li#tags a, +body#builds #mainNav li#builds a, +body#packages #mainNav li#packages a, +body#users #mainNav li#users a, +body#hosts #mainNav li#hosts a, +body#buildtargets #mainNav li#buildtargets a, +body#reports #mainNav li#reports a, +body#search #mainNav li#search a { + background-color: #eee; + color: #000; +} + +h4 { + color: #fff; + background-color: #006; + padding: 0.3em; + margin: 5px 0px 5px 0px; +} + +h4 a { + color: #fff; +} + +table { + border-spacing: 0px; +} + +th { + font-weight: bold; + vertical-align: text-top; +} + +th, td { + padding: 5px; +} + +td.building { + color: #cc0; +} + +td.complete { + color: #0c0; +} + +td.deleted, +td.failed, +td.canceled { + color: #c00; +} + +td.false { + color: #c00; +} + +td.true { + color: #0c0; +} + +img.sort { + vertical-align: baseline; +} + +td.paginate { + text-align: center; +} + +form.pageJump { + float: right; + margin-left: 20px; +} + +form.pageJump select { + font-size: smaller; +} + +div.dataHeader { + font-weight: bold; +} + +div.pageHeader { + margin-bottom: 10px; + font-weight: bold; + font-size: 1.5em; +} + +table.nested { + float: left; +} + +td.container { + padding: 4px 0px; + width: 100%; +} + +table.nested th, +table.nested td { + padding: 2px 4px; +} + +div.toggle { + padding: 6px; +} + +td.tree { + background-color: #fff; +} + +.tree span.root { + font-weight: bold; + background-color: #fff; +} + +.tree ul { + padding-left: 2em; + list-style: none; + margin-top: 0em; + margin-bottom: 0em; +} + +.tree span.treeBranch { + border-bottom: 1px solid #000; + border-left: 1px solid #000; + font-size: 1.2em; +} + +.tree li.sibling > span.treeBranch { + border-left-width: 0em; +} + +.tree li.sibling { + border-left: 1px solid #000; +} + +.tree a { + text-decoration: none; +} + +.tree span.treeLabel { + position: relative; + top: 0.6em; + margin-left: 1.2em; + padding-left: 0.2em; + background-color: #fff; + font-size: 0.83em; +} + +.hidden { + display: none; +} + +.tree span.treeToggle { + font-weight: bold; +} + +.tree span.treeLink { + font-size: smaller; +} + +.adminLink { + color: #000; +} + +img.stateimg { + margin-top: -6px; + margin-bottom: -6px; +} + +.charlist { + text-align: center; +} + +img.graphrow { + background-color: #00f; + vertical-align: bottom; +} + +table.data-list { + width: 100%; +} + +table.data-list td { + vertical-align: text-top; +} + +tr.list-header { + background-color: #fff; +} + +tr.list-header th { + background-color: #ddd; +} + +tr.list-header th:first-child { + -moz-border-radius-topleft: 15px; +} + +tr.list-header th:last-child { + -moz-border-radius-topright: 15px; +} + +tr.row-odd { + background-color: #fff; +} + +tr.row-odd td { + border-bottom: 1px solid #eee; +} + +tr.row-even { + background-color: #eee; +} + +tr.row-even td { + border-bottom: 1px solid #fff; +} + +tr.row-odd td:first-child, +tr.row-even td:first-child { + border-left: 1px solid #eee; +} + +tr.row-odd td:last-child, +tr.row-even td:last-child { + border-right: 1px solid #eee; +} + +tr.row-even td.tree { + background-color: #eee; +} + +tr.row-even td.tree span.treeLabel { + background-color: #eee; +} + +.taskfree { + color: #30c; +} + +.taskopen { + color: #f60; +} + +.taskclosed { + color: #0c0; +} + +.taskcanceled { + color: #c90; +} + +.taskassigned { + color: #c0f; +} + +.taskfailed { + color: #c00; +} + +a.help { + text-decoration: underline; +} + +abbr { + cursor: help; +} + +.changelog { + font-size: medium; +} + +#headerHelp { + float: right; + margin: 15px 10px 0 0; +} + +.filterlist { + font-size: smaller; +} + +span#loginInfo { + float: right; + font-weight: bold; + margin-left: 0.5em; + margin-right: 0.5em; + margin-bottom: 4px; +}