2014-04-07 12:38:09 +00:00
|
|
|
# dnfpayload.py
|
|
|
|
# DNF/rpm software payload management.
|
|
|
|
#
|
|
|
|
# Copyright (C) 2013 Red Hat, Inc.
|
|
|
|
#
|
|
|
|
# This copyrighted material is made available to anyone wishing to use,
|
|
|
|
# modify, copy, or redistribute it subject to the terms and conditions of
|
|
|
|
# the GNU General Public License v.2, or (at your option) any later version.
|
|
|
|
# This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
# ANY WARRANTY expressed or implied, including the implied warranties of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
|
|
|
|
# Public License for more details. You should have received a copy of the
|
|
|
|
# GNU General Public License along with this program; if not, write to the
|
|
|
|
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
|
|
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
|
|
|
|
# source code or documentation are not subject to the GNU General Public
|
|
|
|
# License and may only be used or replicated with the express permission of
|
|
|
|
# Red Hat, Inc.
|
|
|
|
#
|
|
|
|
# Red Hat Author(s): Ales Kozumplik <akozumpl@redhat.com>
|
|
|
|
#
|
2015-05-30 11:20:59 +00:00
|
|
|
import os
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
from blivet.size import Size
|
2015-05-30 11:20:59 +00:00
|
|
|
import blivet.arch
|
2014-04-07 12:38:09 +00:00
|
|
|
from pyanaconda.flags import flags
|
|
|
|
from pyanaconda.i18n import _
|
|
|
|
from pyanaconda.progress import progressQ
|
2016-04-10 04:00:00 +00:00
|
|
|
from pyanaconda.simpleconfig import simple_replace
|
2014-04-07 12:38:09 +00:00
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
import configparser
|
2015-03-23 11:36:12 +00:00
|
|
|
import collections
|
2014-04-07 12:38:09 +00:00
|
|
|
import itertools
|
|
|
|
import logging
|
|
|
|
import multiprocessing
|
2015-03-23 11:36:12 +00:00
|
|
|
import operator
|
2015-05-30 11:20:59 +00:00
|
|
|
from pyanaconda import constants
|
2015-03-23 11:36:12 +00:00
|
|
|
from pykickstart.constants import GROUP_ALL, GROUP_DEFAULT, KS_MISSING_IGNORE
|
2014-04-07 12:38:09 +00:00
|
|
|
import pyanaconda.errors as errors
|
2015-03-23 11:36:12 +00:00
|
|
|
import pyanaconda.iutil
|
|
|
|
import pyanaconda.localization
|
2014-04-07 12:38:09 +00:00
|
|
|
import pyanaconda.packaging as packaging
|
2015-05-30 11:20:59 +00:00
|
|
|
import shutil
|
2014-04-07 12:38:09 +00:00
|
|
|
import sys
|
|
|
|
import time
|
2016-04-10 04:00:00 +00:00
|
|
|
import threading
|
2015-05-30 11:20:59 +00:00
|
|
|
from pyanaconda.iutil import ProxyString, ProxyStringError
|
2016-04-10 04:00:00 +00:00
|
|
|
from pyanaconda.iutil import open # pylint: disable=redefined-builtin
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
log = logging.getLogger("packaging")
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
import dnf
|
|
|
|
import dnf.exceptions
|
|
|
|
import dnf.repo
|
|
|
|
import dnf.callback
|
|
|
|
import rpm
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
DNF_CACHE_DIR = '/tmp/dnf.cache'
|
2016-04-10 04:00:00 +00:00
|
|
|
DNF_PLUGINCONF_DIR = '/tmp/dnf.pluginconf'
|
2015-03-23 11:36:12 +00:00
|
|
|
DNF_PACKAGE_CACHE_DIR_SUFFIX = 'dnf.package.cache'
|
|
|
|
DOWNLOAD_MPOINTS = {'/tmp',
|
|
|
|
'/',
|
|
|
|
'/mnt/sysimage',
|
|
|
|
'/mnt/sysimage/home',
|
|
|
|
'/mnt/sysimage/tmp',
|
|
|
|
'/mnt/sysimage/var',
|
|
|
|
}
|
2014-04-07 12:38:09 +00:00
|
|
|
REPO_DIRS = ['/etc/yum.repos.d',
|
|
|
|
'/etc/anaconda.repos.d',
|
|
|
|
'/tmp/updates/anaconda.repos.d',
|
|
|
|
'/tmp/product/anaconda.repos.d']
|
2015-05-30 11:20:59 +00:00
|
|
|
YUM_REPOS_DIR = "/etc/yum.repos.d/"
|
2014-04-07 12:38:09 +00:00
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
_DNF_INSTALLER_LANGPACK_CONF = DNF_PLUGINCONF_DIR + "/langpacks.conf"
|
|
|
|
_DNF_TARGET_LANGPACK_CONF = "/etc/dnf/plugins/langpacks.conf"
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
def _failure_limbo():
|
|
|
|
progressQ.send_quit(1)
|
|
|
|
while True:
|
|
|
|
time.sleep(10000)
|
|
|
|
|
2015-03-23 11:36:12 +00:00
|
|
|
def _df_map():
|
|
|
|
"""Return (mountpoint -> size available) mapping."""
|
|
|
|
output = pyanaconda.iutil.execWithCapture('df', ['--output=target,avail'])
|
|
|
|
output = output.rstrip()
|
|
|
|
lines = output.splitlines()
|
|
|
|
structured = {}
|
|
|
|
for line in lines:
|
|
|
|
items = line.split()
|
|
|
|
key = items[0]
|
|
|
|
val = items[1]
|
|
|
|
if not key.startswith('/'):
|
|
|
|
continue
|
|
|
|
structured[key] = Size(int(val)*1024)
|
|
|
|
return structured
|
|
|
|
|
|
|
|
def _paced(fn):
|
|
|
|
"""Execute `fn` no more often then every 2 seconds."""
|
|
|
|
def paced_fn(self, *args):
|
|
|
|
now = time.time()
|
|
|
|
if now - self.last_time < 2:
|
|
|
|
return
|
|
|
|
self.last_time = now
|
|
|
|
return fn(self, *args)
|
|
|
|
return paced_fn
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
def _pick_mpoint(df, download_size, install_size):
|
2015-03-23 11:36:12 +00:00
|
|
|
def reasonable_mpoint(mpoint):
|
|
|
|
return mpoint in DOWNLOAD_MPOINTS
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
requested = download_size
|
|
|
|
requested_root = requested + install_size
|
|
|
|
root_mpoint = pyanaconda.iutil.getSysroot()
|
|
|
|
sufficients = {key : val for (key, val) in df.items()
|
|
|
|
# for root we need to take in count both download and install size
|
|
|
|
if ((key != root_mpoint and val > requested)
|
|
|
|
or val > requested_root) and reasonable_mpoint(key)}
|
|
|
|
log.debug('Estimated size: download %s & install %s - df: %s', requested,
|
|
|
|
(requested_root - requested), df)
|
2015-03-23 11:36:12 +00:00
|
|
|
log.info('Sufficient mountpoints found: %s', sufficients)
|
|
|
|
|
|
|
|
if not len(sufficients):
|
|
|
|
return None
|
|
|
|
# default to the biggest one:
|
2015-05-30 11:20:59 +00:00
|
|
|
return sorted(sufficients.items(), key=operator.itemgetter(1),
|
2015-03-23 11:36:12 +00:00
|
|
|
reverse=True)[0][0]
|
|
|
|
|
|
|
|
class PayloadRPMDisplay(dnf.callback.LoggingTransactionDisplay):
|
2016-04-10 04:00:00 +00:00
|
|
|
def __init__(self, queue_instance):
|
2014-04-07 12:38:09 +00:00
|
|
|
super(PayloadRPMDisplay, self).__init__()
|
2016-04-10 04:00:00 +00:00
|
|
|
self._queue = queue_instance
|
2014-04-07 12:38:09 +00:00
|
|
|
self._last_ts = None
|
|
|
|
self.cnt = 0
|
|
|
|
|
|
|
|
def event(self, package, action, te_current, te_total, ts_current, ts_total):
|
|
|
|
if action == self.PKG_INSTALL and te_current == 0:
|
|
|
|
# do not report same package twice
|
|
|
|
if self._last_ts == ts_current:
|
|
|
|
return
|
|
|
|
self._last_ts = ts_current
|
|
|
|
|
|
|
|
msg = '%s.%s (%d/%d)' % \
|
|
|
|
(package.name, package.arch, ts_current, ts_total)
|
|
|
|
self.cnt += 1
|
|
|
|
self._queue.put(('install', msg))
|
|
|
|
elif action == self.TRANS_POST:
|
|
|
|
self._queue.put(('post', None))
|
|
|
|
|
2015-03-23 11:36:12 +00:00
|
|
|
class DownloadProgress(dnf.callback.DownloadProgress):
|
|
|
|
def __init__(self):
|
|
|
|
self.downloads = collections.defaultdict(int)
|
|
|
|
self.last_time = time.time()
|
|
|
|
self.total_files = 0
|
|
|
|
self.total_size = Size(0)
|
|
|
|
|
|
|
|
@_paced
|
|
|
|
def _update(self):
|
|
|
|
msg = _('Downloading %(total_files)s RPMs, '
|
|
|
|
'%(downloaded)s / %(total_size)s (%(percent)d%%) done.')
|
|
|
|
downloaded = Size(sum(self.downloads.values()))
|
|
|
|
vals = {
|
|
|
|
'downloaded' : downloaded,
|
|
|
|
'percent' : int(100 * downloaded/self.total_size),
|
|
|
|
'total_files' : self.total_files,
|
|
|
|
'total_size' : self.total_size
|
|
|
|
}
|
|
|
|
progressQ.send_message(msg % vals)
|
|
|
|
|
|
|
|
def end(self, payload, status, err_msg):
|
|
|
|
nevra = str(payload)
|
|
|
|
if status is dnf.callback.STATUS_OK:
|
|
|
|
self.downloads[nevra] = payload.download_size
|
|
|
|
self._update()
|
|
|
|
return
|
|
|
|
log.critical("Failed to download '%s': %d - %s", nevra, status, err_msg)
|
|
|
|
|
|
|
|
def progress(self, payload, done):
|
|
|
|
nevra = str(payload)
|
|
|
|
self.downloads[nevra] = done
|
|
|
|
self._update()
|
|
|
|
|
|
|
|
def start(self, total_files, total_size):
|
|
|
|
self.total_files = total_files
|
|
|
|
self.total_size = Size(total_size)
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
def do_transaction(base, queue_instance):
|
2014-04-07 12:38:09 +00:00
|
|
|
try:
|
2016-04-10 04:00:00 +00:00
|
|
|
display = PayloadRPMDisplay(queue_instance)
|
2014-04-07 12:38:09 +00:00
|
|
|
base.do_transaction(display=display)
|
|
|
|
except BaseException as e:
|
|
|
|
log.error('The transaction process has ended abruptly')
|
|
|
|
log.info(e)
|
2016-04-10 04:00:00 +00:00
|
|
|
queue_instance.put(('quit', str(e)))
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
class DNFPayload(packaging.PackagePayload):
|
|
|
|
def __init__(self, data):
|
|
|
|
packaging.PackagePayload.__init__(self, data)
|
|
|
|
|
|
|
|
self._base = None
|
2015-05-30 11:20:59 +00:00
|
|
|
self._download_location = None
|
2014-04-07 12:38:09 +00:00
|
|
|
self._configure()
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
# Protect access to _base.repos to ensure that the dictionary is not
|
|
|
|
# modified while another thread is attempting to iterate over it. The
|
|
|
|
# lock only needs to be held during operations that change the number
|
|
|
|
# of repos or that iterate over the repos.
|
|
|
|
self._repos_lock = threading.RLock()
|
|
|
|
|
2015-03-23 11:36:12 +00:00
|
|
|
def unsetup(self):
|
|
|
|
super(DNFPayload, self).unsetup()
|
|
|
|
self._base = None
|
2015-05-30 11:20:59 +00:00
|
|
|
self._configure()
|
|
|
|
|
|
|
|
def _replace_vars(self, url):
|
|
|
|
""" Replace url variables with their values
|
|
|
|
|
|
|
|
:param url: url string to do replacement on
|
|
|
|
:type url: string
|
|
|
|
:returns: string with variables substituted
|
|
|
|
:rtype: string or None
|
|
|
|
|
|
|
|
Currently supports $releasever and $basearch
|
|
|
|
"""
|
|
|
|
if not url:
|
|
|
|
return url
|
|
|
|
|
|
|
|
url = url.replace("$releasever", self._base.conf.releasever)
|
|
|
|
url = url.replace("$basearch", blivet.arch.getArch())
|
|
|
|
|
|
|
|
return url
|
|
|
|
|
2015-03-23 11:36:12 +00:00
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
def _add_repo(self, ksrepo):
|
2015-03-23 11:36:12 +00:00
|
|
|
"""Add a repo to the dnf repo object
|
|
|
|
|
|
|
|
:param ksrepo: Kickstart Repository to add
|
|
|
|
:type ksrepo: Kickstart RepoData object.
|
|
|
|
:returns: None
|
|
|
|
"""
|
2014-04-07 12:38:09 +00:00
|
|
|
repo = dnf.repo.Repo(ksrepo.name, DNF_CACHE_DIR)
|
2015-05-30 11:20:59 +00:00
|
|
|
url = self._replace_vars(ksrepo.baseurl)
|
|
|
|
mirrorlist = self._replace_vars(ksrepo.mirrorlist)
|
2016-04-10 04:00:00 +00:00
|
|
|
|
|
|
|
if url and url.startswith("nfs://"):
|
|
|
|
(server, path) = url[6:].split(":", 1)
|
|
|
|
mountpoint = "%s/%s.nfs" % (constants.MOUNT_DIR, repo.name)
|
|
|
|
self._setupNFS(mountpoint, server, path, None)
|
|
|
|
|
|
|
|
url = "file://" + mountpoint
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
if url:
|
|
|
|
repo.baseurl = [url]
|
|
|
|
if mirrorlist:
|
|
|
|
repo.mirrorlist = mirrorlist
|
|
|
|
repo.sslverify = not (ksrepo.noverifyssl or flags.noverifyssl)
|
2015-05-30 11:20:59 +00:00
|
|
|
if ksrepo.proxy:
|
|
|
|
try:
|
|
|
|
repo.proxy = ProxyString(ksrepo.proxy).url
|
|
|
|
except ProxyStringError as e:
|
|
|
|
log.error("Failed to parse proxy for _add_repo %s: %s",
|
|
|
|
ksrepo.proxy, e)
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
if ksrepo.cost:
|
|
|
|
repo.cost = ksrepo.cost
|
|
|
|
|
|
|
|
if ksrepo.includepkgs:
|
|
|
|
repo.include = ksrepo.includepkgs
|
|
|
|
|
|
|
|
if ksrepo.excludepkgs:
|
|
|
|
repo.exclude = ksrepo.excludepkgs
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
# If this repo is already known, it's one of two things:
|
|
|
|
# (1) The user is trying to do "repo --name=updates" in a kickstart file
|
|
|
|
# and we should just know to enable the already existing on-disk
|
|
|
|
# repo config.
|
|
|
|
# (2) It's a duplicate, and we need to delete the existing definition
|
|
|
|
# and use this new one. The highest profile user of this is livecd
|
|
|
|
# kickstarts.
|
|
|
|
if repo.id in self._base.repos:
|
|
|
|
if not url and not mirrorlist:
|
|
|
|
self._base.repos[repo.id].enable()
|
|
|
|
else:
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
self._base.repos.pop(repo.id)
|
|
|
|
self._base.repos.add(repo)
|
2015-05-30 11:20:59 +00:00
|
|
|
repo.enable()
|
|
|
|
# If the repo's not already known, we've got to add it.
|
|
|
|
else:
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
self._base.repos.add(repo)
|
2015-05-30 11:20:59 +00:00
|
|
|
repo.enable()
|
2015-03-23 11:36:12 +00:00
|
|
|
|
|
|
|
# Load the metadata to verify that the repo is valid
|
|
|
|
try:
|
|
|
|
self._base.repos[repo.id].load()
|
|
|
|
except dnf.exceptions.RepoError as e:
|
|
|
|
raise packaging.MetadataError(e)
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
log.info("added repo: '%s' - %s", ksrepo.name, url or mirrorlist)
|
2014-04-07 12:38:09 +00:00
|
|
|
|
2015-03-23 11:36:12 +00:00
|
|
|
def addRepo(self, ksrepo):
|
|
|
|
"""Add a repo to dnf and kickstart repo lists
|
|
|
|
|
|
|
|
:param ksrepo: Kickstart Repository to add
|
|
|
|
:type ksrepo: Kickstart RepoData object.
|
|
|
|
:returns: None
|
|
|
|
"""
|
|
|
|
self._add_repo(ksrepo)
|
|
|
|
super(DNFPayload, self).addRepo(ksrepo)
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
def _apply_selections(self):
|
2015-03-23 11:36:12 +00:00
|
|
|
if self.data.packages.nocore:
|
|
|
|
log.info("skipping core group due to %%packages --nocore; system may not be complete")
|
|
|
|
else:
|
2015-05-30 11:20:59 +00:00
|
|
|
try:
|
|
|
|
self._select_group('core', required=True)
|
|
|
|
log.info("selected group: core")
|
|
|
|
except packaging.NoSuchGroup as e:
|
|
|
|
self._miss(e)
|
2015-03-23 11:36:12 +00:00
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
env = None
|
|
|
|
|
|
|
|
if self.data.packages.default and self.environments:
|
|
|
|
env = self.environments[0]
|
|
|
|
elif self.data.packages.environment:
|
|
|
|
env = self.data.packages.environment
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
excludedGroups = [group.name for group in self.data.packages.excludedGroupList]
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
if env:
|
2014-04-07 12:38:09 +00:00
|
|
|
try:
|
2016-04-10 04:00:00 +00:00
|
|
|
self._select_environment(env, excludedGroups)
|
2015-05-30 11:20:59 +00:00
|
|
|
log.info("selected env: %s", env)
|
|
|
|
except packaging.NoSuchGroup as e:
|
2014-04-07 12:38:09 +00:00
|
|
|
self._miss(e)
|
|
|
|
|
|
|
|
for group in self.data.packages.groupList:
|
2016-04-10 04:00:00 +00:00
|
|
|
if group.name == 'core' or group.name in excludedGroups:
|
2015-03-23 11:36:12 +00:00
|
|
|
continue
|
2016-04-10 04:00:00 +00:00
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
default = group.include in (GROUP_ALL,
|
|
|
|
GROUP_DEFAULT)
|
|
|
|
optional = group.include == GROUP_ALL
|
2016-04-10 04:00:00 +00:00
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
try:
|
|
|
|
self._select_group(group.name, default=default, optional=optional)
|
2015-05-30 11:20:59 +00:00
|
|
|
log.info("selected group: %s", group.name)
|
2014-04-07 12:38:09 +00:00
|
|
|
except packaging.NoSuchGroup as e:
|
|
|
|
self._miss(e)
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
for pkg_name in set(self.data.packages.packageList) - set(self.data.packages.excludedList):
|
2015-05-30 11:20:59 +00:00
|
|
|
try:
|
|
|
|
self._install_package(pkg_name)
|
|
|
|
log.info("selected package: '%s'", pkg_name)
|
|
|
|
except packaging.NoSuchPackage as e:
|
|
|
|
self._miss(e)
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
self._select_kernel_package()
|
2015-05-30 11:20:59 +00:00
|
|
|
|
|
|
|
for pkg_name in self.requiredPackages:
|
|
|
|
try:
|
|
|
|
self._install_package(pkg_name, required=True)
|
|
|
|
log.debug("selected required package: %s", pkg_name)
|
|
|
|
except packaging.NoSuchPackage as e:
|
|
|
|
self._miss(e)
|
|
|
|
|
|
|
|
for group in self.requiredGroups:
|
|
|
|
try:
|
|
|
|
self._select_group(group, required=True)
|
|
|
|
log.debug("selected required group: %s", group)
|
|
|
|
except packaging.NoSuchGroup as e:
|
|
|
|
self._miss(e)
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
def _bump_tx_id(self):
|
|
|
|
if self.txID is None:
|
|
|
|
self.txID = 1
|
|
|
|
else:
|
|
|
|
self.txID += 1
|
|
|
|
return self.txID
|
|
|
|
|
|
|
|
def _configure(self):
|
|
|
|
self._base = dnf.Base()
|
|
|
|
conf = self._base.conf
|
|
|
|
conf.cachedir = DNF_CACHE_DIR
|
2016-04-10 04:00:00 +00:00
|
|
|
conf.pluginconfpath = DNF_PLUGINCONF_DIR
|
2015-03-23 11:36:12 +00:00
|
|
|
conf.logdir = '/tmp/'
|
2014-04-07 12:38:09 +00:00
|
|
|
# disable console output completely:
|
|
|
|
conf.debuglevel = 0
|
|
|
|
conf.errorlevel = 0
|
|
|
|
self._base.logging.setup_from_dnf_conf(conf)
|
|
|
|
|
|
|
|
conf.releasever = self._getReleaseVersion(None)
|
2015-03-23 11:36:12 +00:00
|
|
|
conf.installroot = pyanaconda.iutil.getSysroot()
|
2014-04-07 12:38:09 +00:00
|
|
|
conf.prepend_installroot('persistdir')
|
|
|
|
|
|
|
|
# NSS won't survive the forking we do to shield out chroot during
|
|
|
|
# transaction, disable it in RPM:
|
|
|
|
conf.tsflags.append('nocrypto')
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
if self.data.packages.multiLib:
|
|
|
|
conf.multilib_policy = "all"
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
if hasattr(self.data.method, "proxy") and self.data.method.proxy:
|
|
|
|
try:
|
|
|
|
proxy = ProxyString(self.data.method.proxy)
|
|
|
|
conf.proxy = proxy.noauth_url
|
|
|
|
if proxy.username:
|
|
|
|
conf.proxy_username = proxy.username
|
|
|
|
if proxy.password:
|
|
|
|
conf.proxy_password = proxy.password
|
|
|
|
log.info("Using %s as proxy", self.data.method.proxy)
|
|
|
|
except ProxyStringError as e:
|
|
|
|
log.error("Failed to parse proxy for dnf configure %s: %s",
|
|
|
|
self.data.method.proxy, e)
|
|
|
|
|
|
|
|
# Start with an empty comps so we can go ahead and use the environment
|
|
|
|
# and group properties. Unset reposdir to ensure dnf has nothing it can
|
|
|
|
# check automatically
|
|
|
|
conf.reposdir = []
|
|
|
|
self._base.read_comps()
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
conf.reposdir = REPO_DIRS
|
|
|
|
|
2015-03-23 11:36:12 +00:00
|
|
|
@property
|
|
|
|
def _download_space(self):
|
|
|
|
transaction = self._base.transaction
|
|
|
|
if transaction is None:
|
|
|
|
return Size(0)
|
|
|
|
|
|
|
|
size = sum(tsi.installed.downloadsize for tsi in transaction)
|
2016-04-10 04:00:00 +00:00
|
|
|
# reserve extra
|
|
|
|
return Size(size) + Size("150 MB")
|
2015-03-23 11:36:12 +00:00
|
|
|
|
|
|
|
def _install_package(self, pkg_name, required=False):
|
2014-04-07 12:38:09 +00:00
|
|
|
try:
|
|
|
|
return self._base.install(pkg_name)
|
2015-03-23 11:36:12 +00:00
|
|
|
except dnf.exceptions.MarkingError:
|
|
|
|
raise packaging.NoSuchPackage(pkg_name, required=required)
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
def _miss(self, exn):
|
2015-03-23 11:36:12 +00:00
|
|
|
if self.data.packages.handleMissing == KS_MISSING_IGNORE:
|
2014-04-07 12:38:09 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
log.error('Missed: %r', exn)
|
2015-03-23 11:36:12 +00:00
|
|
|
if errors.errorHandler.cb(exn) == errors.ERROR_RAISE:
|
2014-04-07 12:38:09 +00:00
|
|
|
# The progress bar polls kind of slowly, thus installation could
|
|
|
|
# still continue for a bit before the quit message is processed.
|
|
|
|
# Doing a sys.exit also ensures the running thread quits before
|
|
|
|
# it can do anything else.
|
|
|
|
progressQ.send_quit(1)
|
2015-05-30 11:20:59 +00:00
|
|
|
pyanaconda.iutil.ipmi_report(constants.IPMI_ABORTED)
|
2014-04-07 12:38:09 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
2015-03-23 11:36:12 +00:00
|
|
|
def _pick_download_location(self):
|
2016-04-10 04:00:00 +00:00
|
|
|
download_size = self._download_space
|
|
|
|
install_size = self._spaceRequired()
|
2015-03-23 11:36:12 +00:00
|
|
|
df_map = _df_map()
|
2016-04-10 04:00:00 +00:00
|
|
|
mpoint = _pick_mpoint(df_map, download_size, install_size)
|
2015-03-23 11:36:12 +00:00
|
|
|
if mpoint is None:
|
|
|
|
msg = "Not enough disk space to download the packages."
|
|
|
|
raise packaging.PayloadError(msg)
|
|
|
|
|
|
|
|
pkgdir = '%s/%s' % (mpoint, DNF_PACKAGE_CACHE_DIR_SUFFIX)
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
for repo in self._base.repos.iter_enabled():
|
|
|
|
repo.pkgdir = pkgdir
|
2015-03-23 11:36:12 +00:00
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
return pkgdir
|
|
|
|
|
2015-03-23 11:36:12 +00:00
|
|
|
def _select_group(self, group_id, default=True, optional=False, required=False):
|
2014-04-07 12:38:09 +00:00
|
|
|
grp = self._base.comps.group_by_pattern(group_id)
|
|
|
|
if grp is None:
|
2015-03-23 11:36:12 +00:00
|
|
|
raise packaging.NoSuchGroup(group_id, required=required)
|
2014-04-07 12:38:09 +00:00
|
|
|
types = {'mandatory'}
|
|
|
|
if default:
|
|
|
|
types.add('default')
|
|
|
|
if optional:
|
|
|
|
types.add('optional')
|
2015-03-23 11:36:12 +00:00
|
|
|
exclude = self.data.packages.excludedList
|
2015-05-30 11:20:59 +00:00
|
|
|
try:
|
|
|
|
self._base.group_install(grp, types, exclude=exclude)
|
|
|
|
except dnf.exceptions.CompsError as e:
|
|
|
|
# DNF raises this when it is already selected
|
|
|
|
log.debug(e)
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
def _select_environment(self, env_id, excluded):
|
|
|
|
# dnf.base.environment_install excludes on packages instead of groups,
|
|
|
|
# which is unhelpful. Instead, use group_install for each group in
|
|
|
|
# the environment so we can skip the ones that are excluded.
|
|
|
|
for groupid in set(self.environmentGroups(env_id, optional=False)) - set(excluded):
|
|
|
|
self._select_group(groupid)
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
def _select_kernel_package(self):
|
|
|
|
kernels = self.kernelPackages
|
|
|
|
for kernel in kernels:
|
|
|
|
try:
|
|
|
|
self._install_package(kernel)
|
|
|
|
except packaging.NoSuchPackage:
|
|
|
|
log.info('kernel: no such package %s', kernel)
|
|
|
|
else:
|
|
|
|
log.info('kernel: selected %s', kernel)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
log.error('kernel: failed to select a kernel from %s', kernels)
|
|
|
|
|
|
|
|
def _sync_metadata(self, dnf_repo):
|
|
|
|
try:
|
|
|
|
dnf_repo.load()
|
|
|
|
except dnf.exceptions.RepoError as e:
|
2015-03-23 11:36:12 +00:00
|
|
|
id_ = dnf_repo.id
|
|
|
|
log.info('_sync_metadata: addon repo error: %s', e)
|
|
|
|
self.disableRepo(id_)
|
2016-04-10 04:00:00 +00:00
|
|
|
self.verbose_errors.append(str(e))
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def baseRepo(self):
|
2016-04-10 04:00:00 +00:00
|
|
|
# is any locking needed here?
|
2015-03-23 11:36:12 +00:00
|
|
|
repo_names = [constants.BASE_REPO_NAME] + self.DEFAULT_REPOS
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
for repo in self._base.repos.iter_enabled():
|
|
|
|
if repo.id in repo_names:
|
|
|
|
return repo.id
|
2014-04-07 12:38:09 +00:00
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def environments(self):
|
2016-04-10 04:00:00 +00:00
|
|
|
return [env.id for env in self._base.comps.environments]
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def groups(self):
|
|
|
|
groups = self._base.comps.groups_iter()
|
|
|
|
return [g.id for g in groups]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def mirrorEnabled(self):
|
2015-03-23 11:36:12 +00:00
|
|
|
return True
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def repos(self):
|
|
|
|
# known repo ids
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
return [r.id for r in self._base.repos.values()]
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def spaceRequired(self):
|
2016-04-10 04:00:00 +00:00
|
|
|
size = self._spaceRequired()
|
|
|
|
download_size = self._download_space
|
|
|
|
valid_points = _df_map()
|
|
|
|
root_mpoint = pyanaconda.iutil.getSysroot()
|
|
|
|
for (key, val) in self.storage.mountpoints.items():
|
|
|
|
new_key = key
|
|
|
|
if key.endswith('/'):
|
|
|
|
new_key = key[:-1]
|
|
|
|
# we can ignore swap
|
|
|
|
if key.startswith('/') and ((root_mpoint + new_key) not in valid_points):
|
|
|
|
valid_points[root_mpoint + new_key] = val.format.freeSpaceEstimate(val.size)
|
|
|
|
|
|
|
|
m_points = _pick_mpoint(valid_points, download_size, size)
|
|
|
|
if not m_points or m_points == root_mpoint:
|
|
|
|
# download and install to the same mount point
|
|
|
|
size = size + download_size
|
|
|
|
log.debug("Instalation space required %s for mpoints %s", size, m_points)
|
|
|
|
return size
|
|
|
|
|
|
|
|
def _spaceRequired(self):
|
2014-04-07 12:38:09 +00:00
|
|
|
transaction = self._base.transaction
|
|
|
|
if transaction is None:
|
2015-03-23 11:36:12 +00:00
|
|
|
return Size("3000 MB")
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
size = sum(tsi.installed.installsize for tsi in transaction)
|
|
|
|
# add 35% to account for the fact that the above method is laughably
|
|
|
|
# inaccurate:
|
|
|
|
size *= 1.35
|
|
|
|
return Size(size)
|
|
|
|
|
|
|
|
def _isGroupVisible(self, grpid):
|
|
|
|
grp = self._base.comps.group_by_pattern(grpid)
|
|
|
|
if grp is None:
|
|
|
|
raise packaging.NoSuchGroup(grpid)
|
|
|
|
return grp.visible
|
|
|
|
|
|
|
|
def _groupHasInstallableMembers(self, grpid):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def checkSoftwareSelection(self):
|
|
|
|
log.info("checking software selection")
|
|
|
|
self._bump_tx_id()
|
|
|
|
self._base.reset(goal=True)
|
|
|
|
self._apply_selections()
|
|
|
|
|
|
|
|
try:
|
|
|
|
if self._base.resolve():
|
|
|
|
log.debug("checking dependencies: success.")
|
|
|
|
else:
|
|
|
|
log.debug("empty transaction")
|
|
|
|
except dnf.exceptions.DepsolveError as e:
|
|
|
|
msg = str(e)
|
|
|
|
log.warning(msg)
|
2016-04-10 04:00:00 +00:00
|
|
|
raise packaging.DependencyError(msg)
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
log.info("%d packages selected totalling %s",
|
|
|
|
len(self._base.transaction), self.spaceRequired)
|
|
|
|
|
|
|
|
def disableRepo(self, repo_id):
|
|
|
|
try:
|
|
|
|
self._base.repos[repo_id].disable()
|
|
|
|
log.info("Disabled '%s'", repo_id)
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
super(DNFPayload, self).disableRepo(repo_id)
|
|
|
|
|
|
|
|
def enableRepo(self, repo_id):
|
|
|
|
try:
|
|
|
|
self._base.repos[repo_id].enable()
|
|
|
|
log.info("Enabled '%s'", repo_id)
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
super(DNFPayload, self).enableRepo(repo_id)
|
|
|
|
|
|
|
|
def environmentDescription(self, environmentid):
|
|
|
|
env = self._base.comps.environment_by_pattern(environmentid)
|
|
|
|
if env is None:
|
|
|
|
raise packaging.NoSuchGroup(environmentid)
|
|
|
|
return (env.ui_name, env.ui_description)
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
def environmentId(self, environment):
|
|
|
|
""" Return environment id for the environment specified by id or name."""
|
|
|
|
env = self._base.comps.environment_by_pattern(environment)
|
|
|
|
if env is None:
|
|
|
|
raise packaging.NoSuchGroup(environment)
|
|
|
|
return env.id
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
def environmentGroups(self, environmentid, optional=True):
|
2014-04-07 12:38:09 +00:00
|
|
|
env = self._base.comps.environment_by_pattern(environmentid)
|
|
|
|
if env is None:
|
|
|
|
raise packaging.NoSuchGroup(environmentid)
|
|
|
|
group_ids = (id_.name for id_ in env.group_ids)
|
|
|
|
option_ids = (id_.name for id_ in env.option_ids)
|
2015-05-30 11:20:59 +00:00
|
|
|
if optional:
|
|
|
|
return list(itertools.chain(group_ids, option_ids))
|
|
|
|
else:
|
|
|
|
return list(group_ids)
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
def environmentHasOption(self, environmentid, grpid):
|
|
|
|
env = self._base.comps.environment_by_pattern(environmentid)
|
|
|
|
if env is None:
|
|
|
|
raise packaging.NoSuchGroup(environmentid)
|
|
|
|
return grpid in (id_.name for id_ in env.option_ids)
|
|
|
|
|
|
|
|
def environmentOptionIsDefault(self, environmentid, grpid):
|
|
|
|
env = self._base.comps.environment_by_pattern(environmentid)
|
|
|
|
if env is None:
|
|
|
|
raise packaging.NoSuchGroup(environmentid)
|
2015-05-30 11:20:59 +00:00
|
|
|
|
|
|
|
# Look for a group in the optionlist that matches the group_id and has
|
|
|
|
# default set
|
|
|
|
return any(grp for grp in env.option_ids if grp.name == grpid and grp.default)
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
def groupDescription(self, grpid):
|
|
|
|
""" Return name/description tuple for the group specified by id. """
|
|
|
|
grp = self._base.comps.group_by_pattern(grpid)
|
|
|
|
if grp is None:
|
|
|
|
raise packaging.NoSuchGroup(grpid)
|
|
|
|
return (grp.ui_name, grp.ui_description)
|
|
|
|
|
|
|
|
def gatherRepoMetadata(self):
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
for repo in self._base.repos.iter_enabled():
|
|
|
|
self._sync_metadata(repo)
|
2014-04-07 12:38:09 +00:00
|
|
|
self._base.fill_sack(load_system_repo=False)
|
|
|
|
self._base.read_comps()
|
2015-03-23 11:36:12 +00:00
|
|
|
self._refreshEnvironmentAddons()
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
def install(self):
|
|
|
|
progressQ.send_message(_('Starting package installation process'))
|
2015-03-23 11:36:12 +00:00
|
|
|
|
|
|
|
# Add the rpm macros to the global transaction environment
|
|
|
|
for macro in self.rpmMacros:
|
|
|
|
rpm.addMacro(macro[0], macro[1])
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
if self.install_device:
|
|
|
|
self._setupMedia(self.install_device)
|
|
|
|
try:
|
|
|
|
self.checkSoftwareSelection()
|
2015-05-30 11:20:59 +00:00
|
|
|
self._download_location = self._pick_download_location()
|
2015-03-23 11:36:12 +00:00
|
|
|
except packaging.PayloadError as e:
|
2014-04-07 12:38:09 +00:00
|
|
|
if errors.errorHandler.cb(e) == errors.ERROR_RAISE:
|
|
|
|
_failure_limbo()
|
|
|
|
|
|
|
|
pkgs_to_download = self._base.transaction.install_set
|
2015-05-30 11:20:59 +00:00
|
|
|
log.info('Downloading packages.')
|
2014-04-07 12:38:09 +00:00
|
|
|
progressQ.send_message(_('Downloading packages'))
|
2015-03-23 11:36:12 +00:00
|
|
|
progress = DownloadProgress()
|
|
|
|
try:
|
|
|
|
self._base.download_packages(pkgs_to_download, progress)
|
|
|
|
except dnf.exceptions.DownloadError as e:
|
|
|
|
msg = 'Failed to download the following packages: %s' % str(e)
|
|
|
|
exc = packaging.PayloadInstallError(msg)
|
|
|
|
if errors.errorHandler.cb(exc) == errors.ERROR_RAISE:
|
|
|
|
_failure_limbo()
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
log.info('Downloading packages finished.')
|
|
|
|
|
|
|
|
pre_msg = _("Preparing transaction from installation source")
|
|
|
|
progressQ.send_message(pre_msg)
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
queue_instance = multiprocessing.Queue()
|
2014-04-07 12:38:09 +00:00
|
|
|
process = multiprocessing.Process(target=do_transaction,
|
2016-04-10 04:00:00 +00:00
|
|
|
args=(self._base, queue_instance))
|
2014-04-07 12:38:09 +00:00
|
|
|
process.start()
|
2016-04-10 04:00:00 +00:00
|
|
|
(token, msg) = queue_instance.get()
|
2014-04-07 12:38:09 +00:00
|
|
|
while token not in ('post', 'quit'):
|
|
|
|
if token == 'install':
|
|
|
|
msg = _("Installing %s") % msg
|
|
|
|
progressQ.send_message(msg)
|
2016-04-10 04:00:00 +00:00
|
|
|
(token, msg) = queue_instance.get()
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
if token == 'quit':
|
|
|
|
_failure_limbo()
|
|
|
|
|
|
|
|
post_msg = _("Performing post-installation setup tasks")
|
|
|
|
progressQ.send_message(post_msg)
|
|
|
|
process.join()
|
2015-03-23 11:36:12 +00:00
|
|
|
self._base.close()
|
2015-05-30 11:20:59 +00:00
|
|
|
if os.path.exists(self._download_location):
|
|
|
|
log.info("Cleaning up downloaded packages: %s", self._download_location)
|
|
|
|
shutil.rmtree(self._download_location)
|
|
|
|
else:
|
|
|
|
# Some installation sources, such as NFS, don't need to download packages to
|
|
|
|
# local storage, so the download location might not always exist. So for now
|
|
|
|
# warn about this, at least until the RFE in bug 1193121 is implemented and
|
|
|
|
# we don't have to care about clearing the download location ourselves.
|
|
|
|
log.warning("Can't delete nonexistent download location: %s", self._download_location)
|
|
|
|
|
|
|
|
def getRepo(self, repo_id):
|
|
|
|
""" Return the yum repo object. """
|
|
|
|
return self._base.repos[repo_id]
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
def isRepoEnabled(self, repo_id):
|
|
|
|
try:
|
|
|
|
return self._base.repos[repo_id].enabled
|
|
|
|
except (dnf.exceptions.RepoError, KeyError):
|
|
|
|
return super(DNFPayload, self).isRepoEnabled(repo_id)
|
|
|
|
|
2015-03-23 11:36:12 +00:00
|
|
|
def languageGroups(self):
|
|
|
|
locales = [self.data.lang.lang] + self.data.lang.addsupport
|
|
|
|
match_fn = pyanaconda.localization.langcode_matches_locale
|
|
|
|
gids = set()
|
|
|
|
gl_tuples = ((g.id, g.lang_only) for g in self._base.comps.groups_iter())
|
|
|
|
for (gid, lang) in gl_tuples:
|
|
|
|
for locale in locales:
|
|
|
|
if match_fn(lang, locale):
|
|
|
|
gids.add(gid)
|
|
|
|
log.info('languageGroups: %s', gids)
|
|
|
|
return list(gids)
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
def preInstall(self, packages=None, groups=None):
|
2015-05-30 11:20:59 +00:00
|
|
|
super(DNFPayload, self).preInstall(packages, groups)
|
2016-04-10 04:00:00 +00:00
|
|
|
self.requiredPackages += ["dnf"]
|
2015-05-30 11:20:59 +00:00
|
|
|
if packages:
|
|
|
|
self.requiredPackages += packages
|
2015-03-23 11:36:12 +00:00
|
|
|
self.requiredGroups = groups
|
2016-04-10 04:00:00 +00:00
|
|
|
|
|
|
|
# Write the langpacks config
|
|
|
|
pyanaconda.iutil.mkdirChain(DNF_PLUGINCONF_DIR)
|
|
|
|
langs = [self.data.lang.lang] + self.data.lang.addsupport
|
|
|
|
|
|
|
|
# Start with the file in /etc, if one exists. Otherwise make an empty config
|
|
|
|
if os.path.exists(_DNF_TARGET_LANGPACK_CONF):
|
|
|
|
shutil.copy2(_DNF_TARGET_LANGPACK_CONF, _DNF_INSTALLER_LANGPACK_CONF)
|
|
|
|
else:
|
|
|
|
with open(_DNF_INSTALLER_LANGPACK_CONF, "w") as f:
|
|
|
|
f.write("[main]\n")
|
|
|
|
|
|
|
|
# langpacks.conf is an INI style config file, read it and
|
|
|
|
# add or change the enabled and langpack_locales entries without
|
|
|
|
# changing anything else.
|
|
|
|
keys=[("langpack_locales", "langpack_locales=" + ", ".join(langs)),
|
|
|
|
("enabled", "enabled=1")]
|
|
|
|
simple_replace(_DNF_INSTALLER_LANGPACK_CONF, keys)
|
2014-04-07 12:38:09 +00:00
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
def reset(self):
|
2014-04-07 12:38:09 +00:00
|
|
|
super(DNFPayload, self).reset()
|
2015-05-30 11:20:59 +00:00
|
|
|
shutil.rmtree(DNF_CACHE_DIR, ignore_errors=True)
|
2016-04-10 04:00:00 +00:00
|
|
|
shutil.rmtree(DNF_PLUGINCONF_DIR, ignore_errors=True)
|
2014-04-07 12:38:09 +00:00
|
|
|
self.txID = None
|
|
|
|
self._base.reset(sack=True, repos=True)
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
def updateBaseRepo(self, fallback=True, checkmount=True):
|
2014-04-07 12:38:09 +00:00
|
|
|
log.info('configuring base repo')
|
|
|
|
self.reset()
|
|
|
|
url, mirrorlist, sslverify = self._setupInstallDevice(self.storage,
|
|
|
|
checkmount)
|
|
|
|
method = self.data.method
|
2015-03-23 11:36:12 +00:00
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
# Read in all the repos from the installation environment, make a note of which
|
|
|
|
# are enabled, and then disable them all. If the user gave us a method, we want
|
|
|
|
# to use that instead of the default repos.
|
|
|
|
self._base.read_all_repos()
|
|
|
|
|
|
|
|
enabled = []
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
for repo in self._base.repos.iter_enabled():
|
|
|
|
enabled.append(repo.id)
|
|
|
|
repo.disable()
|
2015-05-30 11:20:59 +00:00
|
|
|
|
|
|
|
# If askmethod was specified on the command-line, leave all the repos
|
|
|
|
# disabled and return
|
|
|
|
if flags.askmethod:
|
|
|
|
return
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
if method.method:
|
2015-03-23 11:36:12 +00:00
|
|
|
try:
|
|
|
|
self._base.conf.releasever = self._getReleaseVersion(url)
|
|
|
|
log.debug("releasever from %s is %s", url, self._base.conf.releasever)
|
2016-04-10 04:00:00 +00:00
|
|
|
except configparser.MissingSectionHeaderError as e:
|
2015-03-23 11:36:12 +00:00
|
|
|
log.error("couldn't set releasever from base repo (%s): %s",
|
|
|
|
method.method, e)
|
|
|
|
|
|
|
|
try:
|
2015-05-30 11:20:59 +00:00
|
|
|
proxy = getattr(method, "proxy", None)
|
2014-04-07 12:38:09 +00:00
|
|
|
base_ksrepo = self.data.RepoData(
|
|
|
|
name=constants.BASE_REPO_NAME, baseurl=url,
|
2015-05-30 11:20:59 +00:00
|
|
|
mirrorlist=mirrorlist, noverifyssl=not sslverify, proxy=proxy)
|
2014-04-07 12:38:09 +00:00
|
|
|
self._add_repo(base_ksrepo)
|
2015-03-23 11:36:12 +00:00
|
|
|
except (packaging.MetadataError, packaging.PayloadError) as e:
|
|
|
|
log.error("base repo (%s/%s) not valid -- removing it",
|
|
|
|
method.method, url)
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
self._base.repos.pop(constants.BASE_REPO_NAME, None)
|
2015-03-23 11:36:12 +00:00
|
|
|
if not fallback:
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
for repo in self._base.repos.iter_enabled():
|
|
|
|
self.disableRepo(repo.id)
|
2015-03-23 11:36:12 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# this preserves the method details while disabling it
|
2014-04-07 12:38:09 +00:00
|
|
|
method.method = None
|
2015-03-23 11:36:12 +00:00
|
|
|
self.install_device = None
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
# We need to check this again separately in case method.method was unset above.
|
2014-04-07 12:38:09 +00:00
|
|
|
if not method.method:
|
2015-05-30 11:20:59 +00:00
|
|
|
# If this is a kickstart install, just return now
|
|
|
|
if flags.automatedInstall:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Otherwise, fall back to the default repos that we disabled above
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
for (id_, repo) in self._base.repos.items():
|
|
|
|
if id_ in enabled:
|
|
|
|
repo.enable()
|
2014-04-07 12:38:09 +00:00
|
|
|
|
|
|
|
for ksrepo in self.data.repo.dataList():
|
2016-04-10 04:00:00 +00:00
|
|
|
log.debug("repo %s: mirrorlist %s, baseurl %s",
|
|
|
|
ksrepo.name, ksrepo.mirrorlist, ksrepo.baseurl)
|
|
|
|
# one of these must be set to create new repo
|
|
|
|
if not (ksrepo.mirrorlist or ksrepo.baseurl):
|
|
|
|
raise packaging.PayloadSetupError("Repository %s has no mirror or baseurl set"
|
|
|
|
% ksrepo.name)
|
|
|
|
|
2014-04-07 12:38:09 +00:00
|
|
|
self._add_repo(ksrepo)
|
|
|
|
|
|
|
|
ksnames = [r.name for r in self.data.repo.dataList()]
|
|
|
|
ksnames.append(constants.BASE_REPO_NAME)
|
2016-04-10 04:00:00 +00:00
|
|
|
with self._repos_lock:
|
|
|
|
for repo in self._base.repos.iter_enabled():
|
|
|
|
id_ = repo.id
|
|
|
|
if 'source' in id_ or 'debuginfo' in id_:
|
|
|
|
self.disableRepo(id_)
|
|
|
|
elif constants.isFinal and 'rawhide' in id_:
|
|
|
|
self.disableRepo(id_)
|
2015-05-30 11:20:59 +00:00
|
|
|
|
|
|
|
def _writeDNFRepo(self, repo, repo_path):
|
|
|
|
""" Write a repo object to a DNF repo.conf file
|
|
|
|
|
|
|
|
:param repo: DNF repository object
|
|
|
|
:param string repo_path: Path to write the repo to
|
|
|
|
:raises: PayloadSetupError if the repo doesn't have a url
|
|
|
|
"""
|
|
|
|
with open(repo_path, "w") as f:
|
|
|
|
f.write("[%s]\n" % repo.id)
|
|
|
|
f.write("name=%s\n" % repo.id)
|
|
|
|
if self.isRepoEnabled(repo.id):
|
|
|
|
f.write("enabled=1\n")
|
|
|
|
else:
|
|
|
|
f.write("enabled=0\n")
|
|
|
|
|
|
|
|
if repo.mirrorlist:
|
|
|
|
f.write("mirrorlist=%s\n" % repo.mirrorlist)
|
|
|
|
elif repo.metalink:
|
|
|
|
f.write("metalink=%s\n" % repo.metalink)
|
|
|
|
elif repo.baseurl:
|
|
|
|
f.write("baseurl=%s\n" % repo.baseurl[0])
|
|
|
|
else:
|
|
|
|
f.close()
|
|
|
|
os.unlink(repo_path)
|
|
|
|
raise packaging.PayloadSetupError("repo %s has no baseurl, mirrorlist or metalink", repo.id)
|
|
|
|
|
|
|
|
# kickstart repo modifiers
|
|
|
|
ks_repo = self.getAddOnRepo(repo.id)
|
|
|
|
if not ks_repo:
|
|
|
|
return
|
|
|
|
|
|
|
|
if ks_repo.noverifyssl:
|
|
|
|
f.write("sslverify=0\n")
|
|
|
|
|
|
|
|
if ks_repo.proxy:
|
|
|
|
try:
|
|
|
|
proxy = ProxyString(ks_repo.proxy)
|
|
|
|
f.write("proxy=%s\n" % proxy.url)
|
|
|
|
except ProxyStringError as e:
|
|
|
|
log.error("Failed to parse proxy for _writeInstallConfig %s: %s",
|
|
|
|
ks_repo.proxy, e)
|
|
|
|
|
|
|
|
if ks_repo.cost:
|
|
|
|
f.write("cost=%d\n" % ks_repo.cost)
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
if ks_repo.includepkgs:
|
|
|
|
f.write("include=%s\n" % ",".join(ks_repo.includepkgs))
|
|
|
|
|
|
|
|
if ks_repo.excludepkgs:
|
|
|
|
f.write("exclude=%s\n" % ",".join(ks_repo.excludepkgs))
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
def postInstall(self):
|
|
|
|
""" Perform post-installation tasks. """
|
|
|
|
# Write selected kickstart repos to target system
|
|
|
|
for ks_repo in (ks for ks in (self.getAddOnRepo(r) for r in self.addOns) if ks.install):
|
|
|
|
try:
|
|
|
|
repo = self.getRepo(ks_repo.name)
|
|
|
|
if not repo:
|
|
|
|
continue
|
|
|
|
except (dnf.exceptions.RepoError, KeyError):
|
|
|
|
continue
|
|
|
|
repo_path = pyanaconda.iutil.getSysroot() + YUM_REPOS_DIR + "%s.repo" % repo.id
|
|
|
|
try:
|
|
|
|
log.info("Writing %s.repo to target system.", repo.id)
|
|
|
|
self._writeDNFRepo(repo, repo_path)
|
|
|
|
except packaging.PayloadSetupError as e:
|
|
|
|
log.error(e)
|
|
|
|
|
2016-04-10 04:00:00 +00:00
|
|
|
# Write the langpacks config to the target system
|
|
|
|
target_langpath = pyanaconda.iutil.getSysroot() + _DNF_TARGET_LANGPACK_CONF
|
|
|
|
pyanaconda.iutil.mkdirChain(os.path.dirname(target_langpath))
|
|
|
|
shutil.copy2(_DNF_INSTALLER_LANGPACK_CONF, target_langpath)
|
|
|
|
|
2015-05-30 11:20:59 +00:00
|
|
|
super(DNFPayload, self).postInstall()
|
2016-04-10 04:00:00 +00:00
|
|
|
|
|
|
|
def writeStorageLate(self):
|
|
|
|
pass
|