# # spec file for package ceph # # Copyright (C) 2004-2019 The Ceph Project Developers. See COPYING file # at the top-level directory of this distribution and at # https://github.com/ceph/ceph/blob/master/COPYING # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed # upon. # # This file is under the GNU Lesser General Public License, version 2.1 # # Please submit bugfixes or comments via http://tracker.ceph.com/ # # `rdopkg update-patches` will automatically update this macro: %global commit e456e8b705cb2f4a779689a0d80b122bcb0d67c9 ################################################################################# # conditional build section # # please read http://rpm.org/user_doc/conditional_builds.html for explanation of # bcond syntax! ################################################################################# %bcond_with make_check %bcond_with zbd %bcond_with cmake_verbose_logging %bcond_without ceph_test_package %ifarch s390 %bcond_with tcmalloc %else %bcond_without tcmalloc %endif %if 0%{?fedora} || 0%{?rhel} %bcond_without selinux %ifarch x86_64 %bcond_without rbd_rwl_cache %bcond_with rbd_ssd_cache %global _system_pmdk 1 %else %bcond_with rbd_rwl_cache %bcond_with rbd_ssd_cache %endif %if 0%{?rhel} >= 8 %bcond_with cephfs_java %else %bcond_without cephfs_java %endif %bcond_without amqp_endpoint %bcond_without kafka_endpoint %bcond_without lttng %bcond_without libradosstriper %bcond_without ocf %global _remote_tarball_prefix https://download.ceph.com/tarballs/ %endif %if 0%{?suse_version} %bcond_with amqp_endpoint %bcond_with cephfs_java %bcond_with kafka_endpoint %bcond_with libradosstriper %ifarch x86_64 aarch64 ppc64le %bcond_without lttng %global _system_pmdk 1 %bcond_without rbd_rwl_cache %bcond_with rbd_ssd_cache %else %bcond_with lttng %global _system_pmdk 0 %bcond_with rbd_rwl_cache %bcond_with rbd_ssd_cache %endif %bcond_with ocf %bcond_with selinux #Compat macro for _fillupdir macro introduced in Nov 2017 %if ! %{defined _fillupdir} %global _fillupdir /var/adm/fillup-templates %endif %endif %bcond_with seastar %bcond_with jaeger %if 0%{?fedora} || 0%{?suse_version} >= 1500 # distros that ship cmd2 and/or colorama %bcond_without cephfs_shell %else # distros that do _not_ ship cmd2/colorama %bcond_with cephfs_shell %endif %if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 8 %global weak_deps 1 %endif %if %{with selinux} # get selinux policy version # Force 0.0.0 policy version for centos builds to avoid repository sync issues between rhel and centos %if 0%{?centos} %global _selinux_policy_version 0.0.0 %else %{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0} %endif %endif %if 0%{?suse_version} %if !0%{?is_opensuse} # SLE does not support luarocks %bcond_with lua_packages %else %global luarocks_package_name lua53-luarocks %bcond_without lua_packages %endif %else %global luarocks_package_name luarocks %bcond_without lua_packages %endif %{!?_udevrulesdir: %global _udevrulesdir /lib/udev/rules.d} %{!?tmpfiles_create: %global tmpfiles_create systemd-tmpfiles --create} %{!?python3_pkgversion: %global python3_pkgversion 3} %{!?python3_version_nodots: %global python3_version_nodots 3} %{!?python3_version: %global python3_version 3} # disable dwz which compresses the debuginfo %global _find_debuginfo_dwz_opts %{nil} ################################################################################# # main package definition ################################################################################# Name: ceph Version: 16.2.0 Release: 152%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 %endif # define _epoch_prefix macro which will expand to the empty string if epoch is # undefined %global _epoch_prefix %{?epoch:%{epoch}:} Summary: User space components of the Ceph file system License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and MIT %if 0%{?suse_version} Group: System/Filesystems %endif URL: http://ceph.com/ Source0: %{?_remote_tarball_prefix}%{name}-%{version}.tar.bz2 Source1: %{name}-%{version}-%{commit}-changes.tar.gz Source2: dashboard_frontend-%{version}-%{commit}.tar.gz %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x %endif ################################################################################# # dependencies that apply across all distro families ################################################################################# Requires: ceph-osd = %{_epoch_prefix}%{version}-%{release} Requires: ceph-mds = %{_epoch_prefix}%{version}-%{release} Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: ceph-mon = %{_epoch_prefix}%{version}-%{release} Requires(post): binutils %if 0%{with cephfs_java} BuildRequires: java-devel BuildRequires: sharutils %endif %if 0%{with selinux} BuildRequires: checkpolicy BuildRequires: selinux-policy-devel %endif BuildRequires: gperf BuildRequires: cmake > 3.5 BuildRequires: cryptsetup BuildRequires: fuse-devel %if 0%{with seastar} BuildRequires: gcc-toolset-9-gcc-c++ >= 9.2.1-2.3 %else BuildRequires: gcc-c++ %endif BuildRequires: gdbm %if 0%{with tcmalloc} %if 0%{?fedora} || 0%{?rhel} BuildRequires: gperftools-devel >= 2.6.1 %endif %if 0%{?suse_version} BuildRequires: gperftools-devel >= 2.4 %endif %endif BuildRequires: leveldb-devel > 1.2 BuildRequires: libaio-devel BuildRequires: libblkid-devel >= 2.17 BuildRequires: cryptsetup-devel BuildRequires: libcurl-devel BuildRequires: libcap-ng-devel BuildRequires: fmt-devel >= 5.2.1 BuildRequires: pkgconfig(libudev) BuildRequires: libnl3-devel BuildRequires: liboath-devel BuildRequires: libtool BuildRequires: libxml2-devel BuildRequires: make BuildRequires: ncurses-devel BuildRequires: libicu-devel BuildRequires: parted BuildRequires: patch BuildRequires: perl BuildRequires: pkgconfig BuildRequires: procps BuildRequires: python%{python3_pkgversion} BuildRequires: python%{python3_pkgversion}-devel BuildRequires: snappy-devel BuildRequires: sqlite-devel BuildRequires: sudo BuildRequires: pkgconfig(udev) BuildRequires: util-linux BuildRequires: valgrind-devel BuildRequires: which BuildRequires: xfsprogs BuildRequires: xfsprogs-devel BuildRequires: xmlstarlet BuildRequires: nasm BuildRequires: lua-devel %if 0%{with amqp_endpoint} BuildRequires: librabbitmq-devel %endif %if 0%{with kafka_endpoint} BuildRequires: librdkafka-devel %endif %if 0%{with lua_packages} BuildRequires: %{luarocks_package_name} %endif %if 0%{with make_check} BuildRequires: jq BuildRequires: libuuid-devel BuildRequires: python%{python3_pkgversion}-bcrypt BuildRequires: python%{python3_pkgversion}-nose BuildRequires: python%{python3_pkgversion}-pecan BuildRequires: python%{python3_pkgversion}-requests BuildRequires: python%{python3_pkgversion}-dateutil BuildRequires: python%{python3_pkgversion}-virtualenv BuildRequires: python%{python3_pkgversion}-coverage BuildRequires: python%{python3_pkgversion}-pyOpenSSL BuildRequires: socat %endif %if 0%{with zbd} BuildRequires: libzbd-devel %endif %if 0%{with jaeger} BuildRequires: bison BuildRequires: flex %if 0%{?fedora} || 0%{?rhel} BuildRequires: json-devel %endif %if 0%{?suse_version} BuildRequires: nlohmann_json-devel %endif BuildRequires: libevent-devel BuildRequires: yaml-cpp-devel %endif %if 0%{?_system_pmdk} BuildRequires: libpmem-devel BuildRequires: libpmemobj-devel %endif %if 0%{with seastar} BuildRequires: c-ares-devel BuildRequires: gnutls-devel BuildRequires: hwloc-devel BuildRequires: libpciaccess-devel BuildRequires: lksctp-tools-devel BuildRequires: protobuf-devel BuildRequires: ragel BuildRequires: systemtap-sdt-devel BuildRequires: yaml-cpp-devel %if 0%{?fedora} BuildRequires: libubsan BuildRequires: libasan BuildRequires: libatomic %endif %if 0%{?rhel} BuildRequires: gcc-toolset-9-annobin BuildRequires: gcc-toolset-9-libubsan-devel BuildRequires: gcc-toolset-9-libasan-devel BuildRequires: gcc-toolset-9-libatomic-devel %endif %endif ################################################################################# # distro-conditional dependencies ################################################################################# %if 0%{?suse_version} BuildRequires: pkgconfig(systemd) BuildRequires: systemd-rpm-macros %{?systemd_requires} PreReq: %fillup_prereq BuildRequires: fdupes BuildRequires: net-tools BuildRequires: libbz2-devel BuildRequires: mozilla-nss-devel BuildRequires: keyutils-devel BuildRequires: libopenssl-devel BuildRequires: lsb-release BuildRequires: openldap2-devel #BuildRequires: krb5 #BuildRequires: krb5-devel BuildRequires: cunit-devel BuildRequires: python%{python3_pkgversion}-setuptools BuildRequires: python%{python3_pkgversion}-Cython BuildRequires: python%{python3_pkgversion}-PrettyTable BuildRequires: python%{python3_pkgversion}-Sphinx BuildRequires: rdma-core-devel BuildRequires: liblz4-devel >= 1.7 # for prometheus-alerts BuildRequires: golang-github-prometheus-prometheus %endif %if 0%{?fedora} || 0%{?rhel} Requires: systemd BuildRequires: boost-random BuildRequires: nss-devel BuildRequires: keyutils-libs-devel BuildRequires: libibverbs-devel BuildRequires: librdmacm-devel BuildRequires: openldap-devel #BuildRequires: krb5-devel BuildRequires: openssl-devel BuildRequires: CUnit-devel BuildRequires: redhat-lsb-core BuildRequires: python%{python3_pkgversion}-devel BuildRequires: python%{python3_pkgversion}-setuptools BuildRequires: python%{python3_pkgversion}-Cython BuildRequires: python%{python3_pkgversion}-prettytable BuildRequires: python%{python3_pkgversion}-sphinx BuildRequires: lz4-devel >= 1.7 %endif # distro-conditional make check dependencies %if 0%{with make_check} %if 0%{?fedora} || 0%{?rhel} BuildRequires: libtool-ltdl-devel BuildRequires: xmlsec1 BuildRequires: xmlsec1-devel %ifarch x86_64 BuildRequires: xmlsec1-nss %endif BuildRequires: xmlsec1-openssl BuildRequires: xmlsec1-openssl-devel BuildRequires: python%{python3_pkgversion}-cherrypy BuildRequires: python%{python3_pkgversion}-jwt BuildRequires: python%{python3_pkgversion}-routes BuildRequires: python%{python3_pkgversion}-scipy BuildRequires: python%{python3_pkgversion}-werkzeug BuildRequires: python%{python3_pkgversion}-pyOpenSSL %endif %if 0%{?suse_version} BuildRequires: libxmlsec1-1 BuildRequires: libxmlsec1-nss1 BuildRequires: libxmlsec1-openssl1 BuildRequires: python%{python3_pkgversion}-CherryPy BuildRequires: python%{python3_pkgversion}-PyJWT BuildRequires: python%{python3_pkgversion}-Routes BuildRequires: python%{python3_pkgversion}-Werkzeug BuildRequires: python%{python3_pkgversion}-numpy-devel BuildRequires: xmlsec1-devel BuildRequires: xmlsec1-openssl-devel %endif %endif # lttng and babeltrace for rbd-replay-prep %if %{with lttng} %if 0%{?fedora} || 0%{?rhel} BuildRequires: lttng-ust-devel BuildRequires: libbabeltrace-devel %endif %if 0%{?suse_version} BuildRequires: lttng-ust-devel BuildRequires: babeltrace-devel %endif %endif %if 0%{?suse_version} BuildRequires: libexpat-devel %endif %if 0%{?rhel} || 0%{?fedora} BuildRequires: expat-devel %endif #hardened-cc1 %if 0%{?fedora} || 0%{?rhel} BuildRequires: redhat-rpm-config %endif %if 0%{with seastar} %if 0%{?fedora} || 0%{?rhel} BuildRequires: cryptopp-devel BuildRequires: numactl-devel BuildRequires: protobuf-compiler %endif %if 0%{?suse_version} BuildRequires: libcryptopp-devel BuildRequires: libnuma-devel %endif %endif %if 0%{?rhel} >= 8 BuildRequires: /usr/bin/pathfix.py %endif %description Ceph is a massively scalable, open-source, distributed storage system that runs on commodity hardware and delivers object, block and file system storage. ################################################################################# # subpackages ################################################################################# %package base Summary: Ceph Base Package %if 0%{?suse_version} Group: System/Filesystems %endif Provides: ceph-test:/usr/bin/ceph-kvstore-tool Requires: ceph-common = %{_epoch_prefix}%{version}-%{release} Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} %if 0%{with selinux} Requires: ceph-selinux = %{_epoch_prefix}%{version}-%{release} %endif Requires: cryptsetup Requires: e2fsprogs Requires: findutils Requires: grep Requires: logrotate Requires: parted Requires: psmisc Requires: python%{python3_pkgversion}-setuptools Requires: util-linux Requires: xfsprogs Requires: which %if 0%{?fedora} || 0%{?rhel} # The following is necessary due to tracker 36508 and can be removed once the # associated upstream bugs are resolved. %if 0%{with tcmalloc} Requires: gperftools-libs >= 2.6.1 %endif %endif %if 0%{?weak_deps} Recommends: chrony %endif %description base Base is the package that includes all the files shared amongst ceph servers %package -n cephadm Summary: Utility to bootstrap Ceph clusters BuildArch: noarch Requires: lvm2 Requires: python%{python3_pkgversion} %if 0%{?weak_deps} Recommends: podman >= 2.0.2 %endif %description -n cephadm Utility to bootstrap a Ceph cluster and manage Ceph daemons deployed with systemd and podman. %package -n ceph-common Summary: Ceph Common %if 0%{?suse_version} Group: System/Filesystems %endif Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-rbd = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-cephfs = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-rgw = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release} %if 0%{with jaeger} Requires: libjaeger = %{_epoch_prefix}%{version}-%{release} %endif %if 0%{?fedora} || 0%{?rhel} Requires: python%{python3_pkgversion}-prettytable %endif %if 0%{?suse_version} Requires: python%{python3_pkgversion}-PrettyTable %endif %if 0%{with libradosstriper} Requires: libradosstriper1 = %{_epoch_prefix}%{version}-%{release} %endif %{?systemd_requires} %if 0%{?suse_version} Requires(pre): pwdutils %endif %description -n ceph-common Common utilities to mount and interact with a ceph storage cluster. Comprised of files that are common to Ceph clients and servers. %package mds Summary: Ceph Metadata Server Daemon %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} %description mds ceph-mds is the metadata server daemon for the Ceph distributed file system. One or more instances of ceph-mds collectively manage the file system namespace, coordinating access to the shared OSD cluster. %package mon Summary: Ceph Monitor Daemon %if 0%{?suse_version} Group: System/Filesystems %endif Provides: ceph-test:/usr/bin/ceph-monstore-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} %if 0%{?weak_deps} Recommends: nvme-cli %if 0%{?suse_version} Requires: smartmontools %else Recommends: smartmontools %endif %endif %if 0%{with jaeger} Requires: libjaeger = %{_epoch_prefix}%{version}-%{release} %endif %description mon ceph-mon is the cluster monitor daemon for the Ceph distributed file system. One or more instances of ceph-mon form a Paxos part-time parliament cluster that provides extremely reliable and durable storage of cluster membership, configuration, and state. %package mgr Summary: Ceph Manager Daemon %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} Requires: ceph-mgr-modules-core = %{_epoch_prefix}%{version}-%{release} Requires: libcephsqlite = %{_epoch_prefix}%{version}-%{release} %if 0%{?weak_deps} Recommends: ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release} Recommends: ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release} Recommends: ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release} Recommends: ceph-mgr-cephadm = %{_epoch_prefix}%{version}-%{release} Recommends: python%{python3_pkgversion}-influxdb %endif %description mgr ceph-mgr enables python modules that provide services (such as the REST module derived from Calamari) and expose CLI hooks. ceph-mgr gathers the cluster maps, the daemon metadata, and performance counters, and exposes all these to the python modules. %package mgr-dashboard Summary: Ceph Dashboard BuildArch: noarch %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release} Requires: ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release} %if 0%{?fedora} || 0%{?rhel} Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-jwt Requires: python%{python3_pkgversion}-routes Requires: python%{python3_pkgversion}-werkzeug %if 0%{?weak_deps} Recommends: python%{python3_pkgversion}-saml %endif %endif %if 0%{?suse_version} Requires: python%{python3_pkgversion}-CherryPy Requires: python%{python3_pkgversion}-PyJWT Requires: python%{python3_pkgversion}-Routes Requires: python%{python3_pkgversion}-Werkzeug Recommends: python%{python3_pkgversion}-python3-saml %endif %description mgr-dashboard ceph-mgr-dashboard is a manager module, providing a web-based application to monitor and manage many aspects of a Ceph cluster and related components. See the Dashboard documentation at http://docs.ceph.com/ for details and a detailed feature overview. %package mgr-diskprediction-local Summary: Ceph Manager module for predicting disk failures BuildArch: noarch %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-numpy %if 0%{?fedora} || 0%{?suse_version} Requires: python%{python3_pkgversion}-scikit-learn %endif Requires: python3-scipy %description mgr-diskprediction-local ceph-mgr-diskprediction-local is a ceph-mgr module that tries to predict disk failures using local algorithms and machine-learning databases. %package mgr-modules-core Summary: Ceph Manager modules which are always enabled BuildArch: noarch %if 0%{?suse_version} Group: System/Filesystems %endif Requires: python%{python3_pkgversion}-bcrypt Requires: python%{python3_pkgversion}-pecan Requires: python%{python3_pkgversion}-pyOpenSSL Requires: python%{python3_pkgversion}-requests Requires: python%{python3_pkgversion}-dateutil %if 0%{?fedora} || 0%{?rhel} >= 8 Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-pyyaml Requires: python%{python3_pkgversion}-werkzeug %endif %if 0%{?suse_version} Requires: python%{python3_pkgversion}-CherryPy Requires: python%{python3_pkgversion}-PyYAML Requires: python%{python3_pkgversion}-Werkzeug %endif %if 0%{?weak_deps} Recommends: ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release} %endif %description mgr-modules-core ceph-mgr-modules-core provides a set of modules which are always enabled by ceph-mgr. %package mgr-rook BuildArch: noarch Summary: Ceph Manager module for Rook-based orchestration %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-kubernetes Requires: python%{python3_pkgversion}-jsonpatch %description mgr-rook ceph-mgr-rook is a ceph-mgr module for orchestration functions using a Rook backend. %package mgr-k8sevents BuildArch: noarch Summary: Ceph Manager module to orchestrate ceph-events to kubernetes' events API %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-kubernetes %description mgr-k8sevents ceph-mgr-k8sevents is a ceph-mgr module that sends every ceph-events to kubernetes' events API %package mgr-cephadm Summary: Ceph Manager module for cephadm-based orchestration BuildArch: noarch %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-remoto Requires: cephadm = %{_epoch_prefix}%{version}-%{release} %if 0%{?suse_version} Requires: openssh Requires: python%{python3_pkgversion}-Jinja2 %endif %if 0%{?rhel} || 0%{?fedora} Requires: openssh-clients Requires: python%{python3_pkgversion}-jinja2 %endif %description mgr-cephadm ceph-mgr-cephadm is a ceph-mgr module for orchestration functions using the integrated cephadm deployment tool management operations. %package fuse Summary: Ceph fuse-based client %if 0%{?suse_version} Group: System/Filesystems %endif Requires: fuse Requires: python%{python3_pkgversion} %description fuse FUSE based client for Ceph distributed network file system %package -n cephfs-mirror Summary: Ceph daemon for mirroring CephFS snapshots %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} %description -n cephfs-mirror Daemon for mirroring CephFS snapshots between Ceph clusters. %package -n rbd-fuse Summary: Ceph fuse-based client %if 0%{?suse_version} Group: System/Filesystems %endif Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} %description -n rbd-fuse FUSE based client to map Ceph rbd images to files %package -n rbd-mirror Summary: Ceph daemon for mirroring RBD images %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} %description -n rbd-mirror Daemon for mirroring RBD images between Ceph clusters, streaming changes asynchronously. %package immutable-object-cache Summary: Ceph daemon for immutable object cache %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} Requires: librados2 = %{_epoch_prefix}%{version}-%{release} %description immutable-object-cache Daemon for immutable object cache. %package -n rbd-nbd Summary: Ceph RBD client base on NBD %if 0%{?suse_version} Group: System/Filesystems %endif Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} %description -n rbd-nbd NBD based client to map Ceph rbd images to local device %package radosgw Summary: Rados REST gateway %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} %if 0%{with selinux} Requires: ceph-selinux = %{_epoch_prefix}%{version}-%{release} %endif Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} %if 0%{?rhel} || 0%{?fedora} Requires: mailcap %endif %if 0%{?weak_deps} Recommends: gawk %endif %description radosgw RADOS is a distributed object store used by the Ceph distributed storage system. This package provides a REST gateway to the object store that aims to implement a superset of Amazon's S3 service as well as the OpenStack Object Storage ("Swift") API. %package -n cephfs-top Summary: top(1) like utility for Ceph Filesystem BuildArch: noarch Requires: python%{python3_pkgversion}-rados %description -n cephfs-top This package provides a top(1) like utility to display Ceph Filesystem metrics in realtime. %if %{with ocf} %package resource-agents Summary: OCF-compliant resource agents for Ceph daemons %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version} Requires: resource-agents %description resource-agents Resource agents for monitoring and managing Ceph daemons under Open Cluster Framework (OCF) compliant resource managers such as Pacemaker. %endif %package osd Summary: Ceph Object Storage Daemon %if 0%{?suse_version} Group: System/Filesystems %endif Provides: ceph-test:/usr/bin/ceph-osdomap-tool Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} Requires: lvm2 Requires: sudo Requires: libstoragemgmt Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release} %if 0%{?weak_deps} Recommends: nvme-cli %if 0%{?suse_version} Requires: smartmontools %else Recommends: smartmontools %endif %endif %description osd ceph-osd is the object storage daemon for the Ceph distributed file system. It is responsible for storing objects on a local file system and providing access to them over the network. %if 0%{with seastar} %package crimson-osd Summary: Ceph Object Storage Daemon (crimson) %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-osd = %{_epoch_prefix}%{version}-%{release} %description crimson-osd crimson-osd is the object storage daemon for the Ceph distributed file system. It is responsible for storing objects on a local file system and providing access to them over the network. %endif %package -n librados2 Summary: RADOS distributed object store client library %if 0%{?suse_version} Group: System/Libraries %endif %if 0%{?rhel} || 0%{?fedora} Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release} %endif %description -n librados2 RADOS is a reliable, autonomic distributed object storage cluster developed as part of the Ceph distributed storage system. This is a shared library allowing applications to access the distributed object store using a simple file-like interface. %package -n librados-devel Summary: RADOS headers %if 0%{?suse_version} Group: Development/Libraries/C and C++ %endif Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release} Provides: librados2-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: librados2-devel < %{_epoch_prefix}%{version}-%{release} %description -n librados-devel This package contains C libraries and headers needed to develop programs that use RADOS object store. %package -n libradospp-devel Summary: RADOS headers %if 0%{?suse_version} Group: Development/Libraries/C and C++ %endif Requires: librados2 = %{_epoch_prefix}%{version}-%{release} Requires: librados-devel = %{_epoch_prefix}%{version}-%{release} %description -n libradospp-devel This package contains C++ libraries and headers needed to develop programs that use RADOS object store. %package -n librgw2 Summary: RADOS gateway client library %if 0%{?suse_version} Group: System/Libraries %endif Requires: librados2 = %{_epoch_prefix}%{version}-%{release} %description -n librgw2 This package provides a library implementation of the RADOS gateway (distributed object store with S3 and Swift personalities). %package -n librgw-devel Summary: RADOS gateway client library %if 0%{?suse_version} Group: Development/Libraries/C and C++ %endif Requires: librados-devel = %{_epoch_prefix}%{version}-%{release} Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} Provides: librgw2-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: librgw2-devel < %{_epoch_prefix}%{version}-%{release} %description -n librgw-devel This package contains libraries and headers needed to develop programs that use RADOS gateway client library. %package -n python%{python3_pkgversion}-rgw Summary: Python 3 libraries for the RADOS gateway %if 0%{?suse_version} Group: Development/Libraries/Python %endif Requires: librgw2 = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release} %{?python_provide:%python_provide python%{python3_pkgversion}-rgw} Provides: python-rgw = %{_epoch_prefix}%{version}-%{release} Obsoletes: python-rgw < %{_epoch_prefix}%{version}-%{release} %description -n python%{python3_pkgversion}-rgw This package contains Python 3 libraries for interacting with Ceph RADOS gateway. %package -n python%{python3_pkgversion}-rados Summary: Python 3 libraries for the RADOS object store %if 0%{?suse_version} Group: Development/Libraries/Python %endif Requires: python%{python3_pkgversion} Requires: librados2 = %{_epoch_prefix}%{version}-%{release} %{?python_provide:%python_provide python%{python3_pkgversion}-rados} Provides: python-rados = %{_epoch_prefix}%{version}-%{release} Obsoletes: python-rados < %{_epoch_prefix}%{version}-%{release} %description -n python%{python3_pkgversion}-rados This package contains Python 3 libraries for interacting with Ceph RADOS object store. %package -n libcephsqlite Summary: SQLite3 VFS for Ceph %if 0%{?suse_version} Group: System/Libraries %endif Requires: librados2 = %{_epoch_prefix}%{version}-%{release} %description -n libcephsqlite A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS distributed object store. %package -n libcephsqlite-devel Summary: SQLite3 VFS for Ceph headers %if 0%{?suse_version} Group: Development/Libraries/C and C++ %endif Requires: sqlite-devel Requires: libcephsqlite = %{_epoch_prefix}%{version}-%{release} Requires: librados-devel = %{_epoch_prefix}%{version}-%{release} Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release} Provides: libcephsqlite-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: libcephsqlite-devel < %{_epoch_prefix}%{version}-%{release} %description -n libcephsqlite-devel A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS distributed object store. %if 0%{with libradosstriper} %package -n libradosstriper1 Summary: RADOS striping interface %if 0%{?suse_version} Group: System/Libraries %endif Requires: librados2 = %{_epoch_prefix}%{version}-%{release} %description -n libradosstriper1 Striping interface built on top of the rados library, allowing to stripe bigger objects onto several standard rados objects using an interface very similar to the rados one. %package -n libradosstriper-devel Summary: RADOS striping interface headers %if 0%{?suse_version} Group: Development/Libraries/C and C++ %endif Requires: libradosstriper1 = %{_epoch_prefix}%{version}-%{release} Requires: librados-devel = %{_epoch_prefix}%{version}-%{release} Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release} Provides: libradosstriper1-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: libradosstriper1-devel < %{_epoch_prefix}%{version}-%{release} %description -n libradosstriper-devel This package contains libraries and headers needed to develop programs that use RADOS striping interface. %endif %package -n librbd1 Summary: RADOS block device client library %if 0%{?suse_version} Group: System/Libraries %endif Requires: librados2 = %{_epoch_prefix}%{version}-%{release} %if 0%{?suse_version} Requires(post): coreutils %endif %if 0%{?rhel} || 0%{?fedora} Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release} %endif %description -n librbd1 RBD is a block device striped across multiple distributed objects in RADOS, a reliable, autonomic distributed object storage cluster developed as part of the Ceph distributed storage system. This is a shared library allowing applications to manage these block devices. %package -n librbd-devel Summary: RADOS block device headers %if 0%{?suse_version} Group: Development/Libraries/C and C++ %endif Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} Requires: librados-devel = %{_epoch_prefix}%{version}-%{release} Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release} Provides: librbd1-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: librbd1-devel < %{_epoch_prefix}%{version}-%{release} %description -n librbd-devel This package contains libraries and headers needed to develop programs that use RADOS block device. %package -n python%{python3_pkgversion}-rbd Summary: Python 3 libraries for the RADOS block device %if 0%{?suse_version} Group: Development/Libraries/Python %endif Requires: librbd1 = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release} %{?python_provide:%python_provide python%{python3_pkgversion}-rbd} Provides: python-rbd = %{_epoch_prefix}%{version}-%{release} Obsoletes: python-rbd < %{_epoch_prefix}%{version}-%{release} %description -n python%{python3_pkgversion}-rbd This package contains Python 3 libraries for interacting with Ceph RADOS block device. %package -n libcephfs2 Summary: Ceph distributed file system client library %if 0%{?suse_version} Group: System/Libraries %endif Obsoletes: libcephfs1 < %{_epoch_prefix}%{version}-%{release} %if 0%{?rhel} || 0%{?fedora} Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release} Obsoletes: ceph-libcephfs %endif %description -n libcephfs2 Ceph is a distributed network file system designed to provide excellent performance, reliability, and scalability. This is a shared library allowing applications to access a Ceph distributed file system via a POSIX-like interface. %package -n libcephfs-devel Summary: Ceph distributed file system headers %if 0%{?suse_version} Group: Development/Libraries/C and C++ %endif Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} Requires: librados-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release} Provides: libcephfs2-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: libcephfs2-devel < %{_epoch_prefix}%{version}-%{release} %description -n libcephfs-devel This package contains libraries and headers needed to develop programs that use Ceph distributed file system. %if 0%{with jaeger} %package -n libjaeger Summary: Ceph distributed file system tracing library %if 0%{?suse_version} Group: System/Libraries %endif Provides: libjaegertracing.so.0()(64bit) Provides: libopentracing.so.1()(64bit) Provides: libthrift.so.0.13.0()(64bit) %description -n libjaeger This package contains libraries needed to provide distributed tracing for Ceph. %endif %package -n python%{python3_pkgversion}-cephfs Summary: Python 3 libraries for Ceph distributed file system %if 0%{?suse_version} Group: Development/Libraries/Python %endif Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release} Requires: python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release} %{?python_provide:%python_provide python%{python3_pkgversion}-cephfs} Provides: python-cephfs = %{_epoch_prefix}%{version}-%{release} Obsoletes: python-cephfs < %{_epoch_prefix}%{version}-%{release} %description -n python%{python3_pkgversion}-cephfs This package contains Python 3 libraries for interacting with Ceph distributed file system. %package -n python%{python3_pkgversion}-ceph-argparse Summary: Python 3 utility libraries for Ceph CLI %if 0%{?suse_version} Group: Development/Libraries/Python %endif %{?python_provide:%python_provide python%{python3_pkgversion}-ceph-argparse} %description -n python%{python3_pkgversion}-ceph-argparse This package contains types and routines for Python 3 used by the Ceph CLI as well as the RESTful interface. These have to do with querying the daemons for command-description information, validating user command input against those descriptions, and submitting the command to the appropriate daemon. %package -n python%{python3_pkgversion}-ceph-common Summary: Python 3 utility libraries for Ceph %if 0%{?fedora} || 0%{?rhel} >= 8 Requires: python%{python3_pkgversion}-pyyaml %endif %if 0%{?suse_version} Requires: python%{python3_pkgversion}-PyYAML %endif %if 0%{?suse_version} Group: Development/Libraries/Python %endif %{?python_provide:%python_provide python%{python3_pkgversion}-ceph-common} %description -n python%{python3_pkgversion}-ceph-common This package contains data structures, classes and functions used by Ceph. It also contains utilities used for the cephadm orchestrator. %if 0%{with cephfs_shell} %package -n cephfs-shell Summary: Interactive shell for Ceph file system Requires: python%{python3_pkgversion}-cmd2 Requires: python%{python3_pkgversion}-colorama Requires: python%{python3_pkgversion}-cephfs %description -n cephfs-shell This package contains an interactive tool that allows accessing a Ceph file system without mounting it by providing a nice pseudo-shell which works like an FTP client. %endif %if 0%{with ceph_test_package} %package -n ceph-test Summary: Ceph benchmarks and test tools %if 0%{?suse_version} Group: System/Benchmark %endif Requires: ceph-common = %{_epoch_prefix}%{version}-%{release} Requires: xmlstarlet Requires: jq Requires: socat %description -n ceph-test This package contains Ceph benchmarks and test tools. %endif %if 0%{with cephfs_java} %package -n libcephfs_jni1 Summary: Java Native Interface library for CephFS Java bindings %if 0%{?suse_version} Group: System/Libraries %endif Requires: java Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release} %description -n libcephfs_jni1 This package contains the Java Native Interface library for CephFS Java bindings. %package -n libcephfs_jni-devel Summary: Development files for CephFS Java Native Interface library %if 0%{?suse_version} Group: Development/Libraries/Java %endif Requires: java Requires: libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release} Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release} Provides: libcephfs_jni1-devel = %{_epoch_prefix}%{version}-%{release} Obsoletes: libcephfs_jni1-devel < %{_epoch_prefix}%{version}-%{release} %description -n libcephfs_jni-devel This package contains the development files for CephFS Java Native Interface library. %package -n cephfs-java Summary: Java libraries for the Ceph File System %if 0%{?suse_version} Group: System/Libraries %endif Requires: java Requires: libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release} Requires: junit BuildRequires: junit %description -n cephfs-java This package contains the Java libraries for the Ceph File System. %endif %package -n rados-objclass-devel Summary: RADOS object class development kit %if 0%{?suse_version} Group: Development/Libraries/C and C++ %endif Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release} %description -n rados-objclass-devel This package contains libraries and headers needed to develop RADOS object class plugins. %if 0%{with selinux} %package selinux Summary: SELinux support for Ceph MON, OSD and MDS %if 0%{?suse_version} Group: System/Filesystems %endif Requires: ceph-base = %{_epoch_prefix}%{version}-%{release} Requires: policycoreutils, libselinux-utils Requires(post): ceph-base = %{_epoch_prefix}%{version}-%{release} Requires(post): selinux-policy-base >= %{_selinux_policy_version}, policycoreutils, gawk Requires(postun): policycoreutils %description selinux This package contains SELinux support for Ceph MON, OSD and MDS. The package also performs file-system relabelling which can take a long time on heavily populated file-systems. %endif %package grafana-dashboards Summary: The set of Grafana dashboards for monitoring purposes BuildArch: noarch %if 0%{?suse_version} Group: System/Filesystems %endif %description grafana-dashboards This package provides a set of Grafana dashboards for monitoring of Ceph clusters. The dashboards require a Prometheus server setup collecting data from Ceph Manager "prometheus" module and Prometheus project "node_exporter" module. The dashboards are designed to be integrated with the Ceph Manager Dashboard web UI. %package prometheus-alerts Summary: Prometheus alerts for a Ceph deployment BuildArch: noarch Group: System/Monitoring %description prometheus-alerts This package provides Ceph default alerts for Prometheus. ################################################################################# # common ################################################################################# %prep %autosetup -p1 # Apply the rdopkg-tar patches %autosetup -T -D -b 1 # Remove the old dashboard dist rm -rf src/pybind/mgr/dashboard/frontend/dist # Apply new dashboard dist %autosetup -T -D -a 2 # Rewrite .git_version file. echo %{commit} > src/.git_version echo %{version}-%{release} >> src/.git_version %build # LTO can be enabled as soon as the following GCC bug is fixed: # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200 %define _lto_cflags %{nil} %if 0%{with seastar} && 0%{?rhel} . /opt/rh/gcc-toolset-9/enable %endif %if 0%{with cephfs_java} # Find jni.h for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do [ -d $i ] && java_inc="$java_inc -I$i" done %endif %if 0%{?suse_version} # the following setting fixed an OOM condition we once encountered in the OBS RPM_OPT_FLAGS="$RPM_OPT_FLAGS --param ggc-min-expand=20 --param ggc-min-heapsize=32768" %endif export CPPFLAGS="$java_inc" export CFLAGS="$RPM_OPT_FLAGS" export CXXFLAGS="$RPM_OPT_FLAGS" export LDFLAGS="$RPM_LD_FLAGS" %if 0%{with seastar} # seastar uses longjmp() to implement coroutine. and this annoys longjmp_chk() export CXXFLAGS=$(echo $RPM_OPT_FLAGS | sed -e 's/-Wp,-D_FORTIFY_SOURCE=2//g') %endif # Parallel build settings ... CEPH_MFLAGS_JOBS="%{?_smp_mflags}" CEPH_SMP_NCPUS=$(echo "$CEPH_MFLAGS_JOBS" | sed 's/-j//') %if 0%{?__isa_bits} == 32 # 32-bit builds can use 3G memory max, which is not enough even for -j2 CEPH_SMP_NCPUS="1" %endif # do not eat all memory echo "Available memory:" free -h echo "System limits:" ulimit -a if test -n "$CEPH_SMP_NCPUS" -a "$CEPH_SMP_NCPUS" -gt 1 ; then mem_per_process=3200 max_mem=$(LANG=C free -m | sed -n "s|^Mem: *\([0-9]*\).*$|\1|p") max_jobs="$(($max_mem / $mem_per_process))" test "$CEPH_SMP_NCPUS" -gt "$max_jobs" && CEPH_SMP_NCPUS="$max_jobs" && echo "Warning: Reducing build parallelism to -j$max_jobs because of memory limits" test "$CEPH_SMP_NCPUS" -le 0 && CEPH_SMP_NCPUS="1" && echo "Warning: Not using parallel build at all because of memory limits" fi export CEPH_SMP_NCPUS export CEPH_MFLAGS_JOBS="-j$CEPH_SMP_NCPUS" env | sort mkdir build cd build CMAKE=cmake ${CMAKE} .. \ -DCMAKE_INSTALL_PREFIX=%{_prefix} \ -DCMAKE_INSTALL_LIBDIR=%{_libdir} \ -DCMAKE_INSTALL_LIBEXECDIR=%{_libexecdir} \ -DCMAKE_INSTALL_LOCALSTATEDIR=%{_localstatedir} \ -DCMAKE_INSTALL_SYSCONFDIR=%{_sysconfdir} \ -DCMAKE_INSTALL_MANDIR=%{_mandir} \ -DCMAKE_INSTALL_DOCDIR=%{_docdir}/ceph \ -DCMAKE_INSTALL_INCLUDEDIR=%{_includedir} \ -DCMAKE_INSTALL_SYSTEMD_SERVICEDIR=%{_unitdir} \ -DWITH_MANPAGE=ON \ -DWITH_PYTHON3=%{python3_version} \ -DWITH_MGR_DASHBOARD_FRONTEND=OFF \ %if 0%{without ceph_test_package} -DWITH_TESTS=OFF \ %endif %if 0%{with cephfs_java} -DWITH_CEPHFS_JAVA=ON \ %endif %if 0%{with selinux} -DWITH_SELINUX=ON \ %endif %if %{with lttng} -DWITH_LTTNG=ON \ -DWITH_BABELTRACE=ON \ %else -DWITH_LTTNG=OFF \ -DWITH_BABELTRACE=OFF \ %endif $CEPH_EXTRA_CMAKE_ARGS \ %if 0%{with ocf} -DWITH_OCF=ON \ %endif %if 0%{with cephfs_shell} -DWITH_CEPHFS_SHELL=ON \ %endif %if 0%{with libradosstriper} -DWITH_LIBRADOSSTRIPER=ON \ %else -DWITH_LIBRADOSSTRIPER=OFF \ %endif %if 0%{with amqp_endpoint} -DWITH_RADOSGW_AMQP_ENDPOINT=ON \ %else -DWITH_RADOSGW_AMQP_ENDPOINT=OFF \ %endif %if 0%{with kafka_endpoint} -DWITH_RADOSGW_KAFKA_ENDPOINT=ON \ %else -DWITH_RADOSGW_KAFKA_ENDPOINT=OFF \ %endif %if 0%{without lua_packages} -DWITH_RADOSGW_LUA_PACKAGES=OFF \ %endif %if 0%{with zbd} -DWITH_ZBD=ON \ %endif %if 0%{with cmake_verbose_logging} -DCMAKE_VERBOSE_MAKEFILE=ON \ %endif %if 0%{with rbd_rwl_cache} -DWITH_RBD_RWL=ON \ %endif %if 0%{with rbd_ssd_cache} -DWITH_RBD_SSD_CACHE=ON \ %endif %if 0%{?_system_pmdk} -DWITH_SYSTEM_PMDK:BOOL=ON \ %endif -DBOOST_J=$CEPH_SMP_NCPUS \ -DWITH_GRAFANA=ON %if %{with cmake_verbose_logging} cat ./CMakeFiles/CMakeOutput.log cat ./CMakeFiles/CMakeError.log %endif make "$CEPH_MFLAGS_JOBS" %if 0%{with make_check} %check # run in-tree unittests cd build ctest "$CEPH_MFLAGS_JOBS" %endif %install pushd build make DESTDIR=%{buildroot} install # we have dropped sysvinit bits rm -f %{buildroot}/%{_sysconfdir}/init.d/ceph popd %if 0%{with seastar} # package crimson-osd with the name of ceph-osd install -m 0755 %{buildroot}%{_bindir}/crimson-osd %{buildroot}%{_bindir}/ceph-osd %endif install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap %if 0%{?fedora} || 0%{?rhel} install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph %endif %if 0%{?suse_version} install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_fillupdir}/sysconfig.%{name} %endif install -m 0644 -D systemd/ceph.tmpfiles.d %{buildroot}%{_tmpfilesdir}/ceph-common.conf install -m 0644 -D systemd/50-ceph.preset %{buildroot}%{_presetdir}/50-ceph.preset mkdir -p %{buildroot}%{_sbindir} install -m 0644 -D src/logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ceph chmod 0644 %{buildroot}%{_docdir}/ceph/sample.ceph.conf install -m 0644 -D COPYING %{buildroot}%{_docdir}/ceph/COPYING install -m 0644 -D etc/sysctl/90-ceph-osd.conf %{buildroot}%{_sysctldir}/90-ceph-osd.conf install -m 0755 -D src/tools/rbd_nbd/rbd-nbd_quiesce %{buildroot}%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce install -m 0755 src/cephadm/cephadm %{buildroot}%{_sbindir}/cephadm mkdir -p %{buildroot}%{_sharedstatedir}/cephadm chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm mkdir -p %{buildroot}%{_sharedstatedir}/cephadm/.ssh chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm/.ssh touch %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys chmod 0600 %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys # firewall templates and /sbin/mount.ceph symlink %if 0%{?suse_version} && !0%{?usrmerged} mkdir -p %{buildroot}/sbin ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph %endif # udev rules install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules # sudoers.d install -m 0440 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl %if 0%{?rhel} >= 8 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/* pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_sbindir}/* %endif #set up placeholder directories mkdir -p %{buildroot}%{_sysconfdir}/ceph mkdir -p %{buildroot}%{_localstatedir}/run/ceph mkdir -p %{buildroot}%{_localstatedir}/log/ceph mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/tmp mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mon mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/osd mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mds mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mgr mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash/posted mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/radosgw mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-osd mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mds mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rgw mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mgr mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd-mirror # prometheus alerts install -m 644 -D monitoring/prometheus/alerts/ceph_default_alerts.yml %{buildroot}/etc/prometheus/ceph/ceph_default_alerts.yml %if 0%{?suse_version} # create __pycache__ directories and their contents %py3_compile %{buildroot}%{python3_sitelib} # hardlink duplicate files under /usr to save space %fdupes %{buildroot}%{_prefix} %endif %if 0%{?rhel} == 8 %py_byte_compile %{__python3} %{buildroot}%{python3_sitelib} %endif %clean rm -rf %{buildroot} ################################################################################# # files and systemd scriptlets ################################################################################# %files %files base %{_bindir}/ceph-crash %{_bindir}/crushtool %{_bindir}/monmaptool %{_bindir}/osdmaptool %{_bindir}/ceph-kvstore-tool %{_bindir}/ceph-run %{_presetdir}/50-ceph.preset %{_sbindir}/ceph-create-keys %dir %{_libexecdir}/ceph %{_libexecdir}/ceph/ceph_common.sh %dir %{_libdir}/rados-classes %{_libdir}/rados-classes/* %dir %{_libdir}/ceph %dir %{_libdir}/ceph/erasure-code %{_libdir}/ceph/erasure-code/libec_*.so* %dir %{_libdir}/ceph/compressor %{_libdir}/ceph/compressor/libceph_*.so* %{_unitdir}/ceph-crash.service %dir %{_libdir}/ceph/crypto %{_libdir}/ceph/crypto/libceph_*.so* %if %{with lttng} %{_libdir}/libos_tp.so* %{_libdir}/libosd_tp.so* %endif %config(noreplace) %{_sysconfdir}/logrotate.d/ceph %if 0%{?fedora} || 0%{?rhel} %config(noreplace) %{_sysconfdir}/sysconfig/ceph %endif %if 0%{?suse_version} %{_fillupdir}/sysconfig.* %endif %{_unitdir}/ceph.target %dir %{python3_sitelib}/ceph_volume %{python3_sitelib}/ceph_volume/* %{python3_sitelib}/ceph_volume-* %{_mandir}/man8/ceph-deploy.8* %{_mandir}/man8/ceph-create-keys.8* %{_mandir}/man8/ceph-run.8* %{_mandir}/man8/crushtool.8* %{_mandir}/man8/osdmaptool.8* %{_mandir}/man8/monmaptool.8* %{_mandir}/man8/ceph-kvstore-tool.8* #set up placeholder directories %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash/posted %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/tmp %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-osd %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mds %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rgw %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mgr %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd-mirror %post base /sbin/ldconfig %if 0%{?suse_version} %fillup_only if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph.target ceph-crash.service >/dev/null 2>&1 || : fi %endif %if 0%{?fedora} || 0%{?rhel} %systemd_post ceph.target ceph-crash.service %endif if [ $1 -eq 1 ] ; then /usr/bin/systemctl start ceph.target ceph-crash.service >/dev/null 2>&1 || : fi %preun base %if 0%{?suse_version} %service_del_preun ceph.target ceph-crash.service %endif %if 0%{?fedora} || 0%{?rhel} %systemd_preun ceph.target ceph-crash.service %endif %postun base /sbin/ldconfig %systemd_postun ceph.target %pre -n cephadm getent group cephadm >/dev/null || groupadd -r cephadm getent passwd cephadm >/dev/null || useradd -r -g cephadm -s /bin/bash -c "cephadm user for mgr/cephadm" -d %{_sharedstatedir}/cephadm cephadm exit 0 %if ! 0%{?suse_version} %postun -n cephadm userdel -r cephadm || true exit 0 %endif %files -n cephadm %{_sbindir}/cephadm %{_mandir}/man8/cephadm.8* %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh %attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys %files common %dir %{_docdir}/ceph %doc %{_docdir}/ceph/sample.ceph.conf %license %{_docdir}/ceph/COPYING %{_bindir}/ceph %{_bindir}/ceph-authtool %{_bindir}/ceph-conf %{_bindir}/ceph-dencoder %{_bindir}/ceph-rbdnamer %{_bindir}/ceph-syn %{_bindir}/cephfs-data-scan %{_bindir}/cephfs-journal-tool %{_bindir}/cephfs-table-tool %{_bindir}/rados %{_bindir}/radosgw-admin %{_bindir}/rbd %{_bindir}/rbd-replay %{_bindir}/rbd-replay-many %{_bindir}/rbdmap %{_sbindir}/mount.ceph %if 0%{?suse_version} && !0%{?usrmerged} /sbin/mount.ceph %endif %if %{with lttng} %{_bindir}/rbd-replay-prep %endif %{_bindir}/ceph-post-file %{_tmpfilesdir}/ceph-common.conf %{_mandir}/man8/ceph-authtool.8* %{_mandir}/man8/ceph-conf.8* %{_mandir}/man8/ceph-dencoder.8* %{_mandir}/man8/ceph-diff-sorted.8* %{_mandir}/man8/ceph-rbdnamer.8* %{_mandir}/man8/ceph-syn.8* %{_mandir}/man8/ceph-post-file.8* %{_mandir}/man8/ceph.8* %{_mandir}/man8/mount.ceph.8* %{_mandir}/man8/rados.8* %{_mandir}/man8/radosgw-admin.8* %{_mandir}/man8/rbd.8* %{_mandir}/man8/rbdmap.8* %{_mandir}/man8/rbd-replay.8* %{_mandir}/man8/rbd-replay-many.8* %{_mandir}/man8/rbd-replay-prep.8* %{_mandir}/man8/rgw-orphan-list.8* %dir %{_datadir}/ceph/ %{_datadir}/ceph/known_hosts_drop.ceph.com %{_datadir}/ceph/id_rsa_drop.ceph.com %{_datadir}/ceph/id_rsa_drop.ceph.com.pub %dir %{_sysconfdir}/ceph/ %config %{_sysconfdir}/bash_completion.d/ceph %config %{_sysconfdir}/bash_completion.d/rados %config %{_sysconfdir}/bash_completion.d/rbd %config %{_sysconfdir}/bash_completion.d/radosgw-admin %config(noreplace) %{_sysconfdir}/ceph/rbdmap %{_unitdir}/rbdmap.service %dir %{_udevrulesdir} %{_udevrulesdir}/50-rbd.rules %attr(3770,ceph,ceph) %dir %{_localstatedir}/log/ceph/ %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/ %pre common CEPH_GROUP_ID=167 CEPH_USER_ID=167 %if 0%{?rhel} || 0%{?fedora} /usr/sbin/groupadd ceph -g $CEPH_GROUP_ID -o -r 2>/dev/null || : /usr/sbin/useradd ceph -u $CEPH_USER_ID -o -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || : %endif %if 0%{?suse_version} if ! getent group ceph >/dev/null ; then CEPH_GROUP_ID_OPTION="" getent group $CEPH_GROUP_ID >/dev/null || CEPH_GROUP_ID_OPTION="-g $CEPH_GROUP_ID" groupadd ceph $CEPH_GROUP_ID_OPTION -r 2>/dev/null || : fi if ! getent passwd ceph >/dev/null ; then CEPH_USER_ID_OPTION="" getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID" useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin 2>/dev/null || : fi usermod -c "Ceph storage service" \ -d %{_localstatedir}/lib/ceph \ -g ceph \ -s /sbin/nologin \ ceph %endif exit 0 %post common %tmpfiles_create %{_tmpfilesdir}/ceph-common.conf %postun common # Package removal cleanup if [ "$1" -eq "0" ] ; then rm -rf %{_localstatedir}/log/ceph rm -rf %{_sysconfdir}/ceph fi %files mds %{_bindir}/ceph-mds %{_mandir}/man8/ceph-mds.8* %{_unitdir}/ceph-mds@.service %{_unitdir}/ceph-mds.target %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mds %post mds %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph-mds@\*.service ceph-mds.target >/dev/null 2>&1 || : fi %endif %if 0%{?fedora} || 0%{?rhel} %systemd_post ceph-mds@\*.service ceph-mds.target %endif if [ $1 -eq 1 ] ; then /usr/bin/systemctl start ceph-mds.target >/dev/null 2>&1 || : fi %preun mds %if 0%{?suse_version} %service_del_preun ceph-mds@\*.service ceph-mds.target %endif %if 0%{?fedora} || 0%{?rhel} %systemd_preun ceph-mds@\*.service ceph-mds.target %endif %postun mds %systemd_postun ceph-mds@\*.service ceph-mds.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then /usr/bin/systemctl try-restart ceph-mds@\*.service > /dev/null 2>&1 || : fi fi %files mgr %{_bindir}/ceph-mgr %dir %{_datadir}/ceph/mgr %{_datadir}/ceph/mgr/mgr_module.* %{_datadir}/ceph/mgr/mgr_util.* %{_unitdir}/ceph-mgr@.service %{_unitdir}/ceph-mgr.target %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mgr %post mgr %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph-mgr@\*.service ceph-mgr.target >/dev/null 2>&1 || : fi %endif %if 0%{?fedora} || 0%{?rhel} %systemd_post ceph-mgr@\*.service ceph-mgr.target %endif if [ $1 -eq 1 ] ; then /usr/bin/systemctl start ceph-mgr.target >/dev/null 2>&1 || : fi %preun mgr %if 0%{?suse_version} %service_del_preun ceph-mgr@\*.service ceph-mgr.target %endif %if 0%{?fedora} || 0%{?rhel} %systemd_preun ceph-mgr@\*.service ceph-mgr.target %endif %postun mgr %systemd_postun ceph-mgr@\*.service ceph-mgr.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then /usr/bin/systemctl try-restart ceph-mgr@\*.service > /dev/null 2>&1 || : fi fi %files mgr-dashboard %{_datadir}/ceph/mgr/dashboard %post mgr-dashboard if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %postun mgr-dashboard if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %files mgr-diskprediction-local %{_datadir}/ceph/mgr/diskprediction_local %post mgr-diskprediction-local if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %postun mgr-diskprediction-local if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %files mgr-modules-core %dir %{_datadir}/ceph/mgr %{_datadir}/ceph/mgr/alerts %{_datadir}/ceph/mgr/balancer %{_datadir}/ceph/mgr/crash %{_datadir}/ceph/mgr/devicehealth %{_datadir}/ceph/mgr/influx %{_datadir}/ceph/mgr/insights %{_datadir}/ceph/mgr/iostat %{_datadir}/ceph/mgr/localpool %{_datadir}/ceph/mgr/mds_autoscaler %{_datadir}/ceph/mgr/mirroring %{_datadir}/ceph/mgr/nfs %{_datadir}/ceph/mgr/orchestrator %{_datadir}/ceph/mgr/osd_perf_query %{_datadir}/ceph/mgr/osd_support %{_datadir}/ceph/mgr/pg_autoscaler %{_datadir}/ceph/mgr/progress %{_datadir}/ceph/mgr/prometheus %{_datadir}/ceph/mgr/rbd_support %{_datadir}/ceph/mgr/restful %{_datadir}/ceph/mgr/selftest %{_datadir}/ceph/mgr/snap_schedule %{_datadir}/ceph/mgr/stats %{_datadir}/ceph/mgr/status %{_datadir}/ceph/mgr/telegraf %{_datadir}/ceph/mgr/telemetry %{_datadir}/ceph/mgr/test_orchestrator %{_datadir}/ceph/mgr/volumes %{_datadir}/ceph/mgr/zabbix %files mgr-rook %{_datadir}/ceph/mgr/rook %post mgr-rook if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %postun mgr-rook if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %files mgr-k8sevents %{_datadir}/ceph/mgr/k8sevents %post mgr-k8sevents if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %postun mgr-k8sevents if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %files mgr-cephadm %{_datadir}/ceph/mgr/cephadm %post mgr-cephadm if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %postun mgr-cephadm if [ $1 -eq 1 ] ; then /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : fi %files mon %{_bindir}/ceph-mon %{_bindir}/ceph-monstore-tool %{_mandir}/man8/ceph-mon.8* %{_unitdir}/ceph-mon@.service %{_unitdir}/ceph-mon.target %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mon %post mon %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph-mon@\*.service ceph-mon.target >/dev/null 2>&1 || : fi %endif %if 0%{?fedora} || 0%{?rhel} %systemd_post ceph-mon@\*.service ceph-mon.target %endif if [ $1 -eq 1 ] ; then /usr/bin/systemctl start ceph-mon.target >/dev/null 2>&1 || : fi %preun mon %if 0%{?suse_version} %service_del_preun ceph-mon@\*.service ceph-mon.target %endif %if 0%{?fedora} || 0%{?rhel} %systemd_preun ceph-mon@\*.service ceph-mon.target %endif %postun mon %systemd_postun ceph-mon@\*.service ceph-mon.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then /usr/bin/systemctl try-restart ceph-mon@\*.service > /dev/null 2>&1 || : fi fi %files fuse %{_bindir}/ceph-fuse %{_mandir}/man8/ceph-fuse.8* %{_sbindir}/mount.fuse.ceph %{_mandir}/man8/mount.fuse.ceph.8* %{_unitdir}/ceph-fuse@.service %{_unitdir}/ceph-fuse.target %files -n cephfs-mirror %{_bindir}/cephfs-mirror %{_mandir}/man8/cephfs-mirror.8* %{_unitdir}/cephfs-mirror@.service %{_unitdir}/cephfs-mirror.target %post -n cephfs-mirror %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset cephfs-mirror@\*.service cephfs-mirror.target >/dev/null 2>&1 || : fi %endif %if 0%{?fedora} || 0%{?rhel} %systemd_post cephfs-mirror@\*.service cephfs-mirror.target %endif if [ $1 -eq 1 ] ; then /usr/bin/systemctl start cephfs-mirror.target >/dev/null 2>&1 || : fi %preun -n cephfs-mirror %if 0%{?suse_version} %service_del_preun cephfs-mirror@\*.service cephfs-mirror.target %endif %if 0%{?fedora} || 0%{?rhel} %systemd_preun cephfs-mirror@\*.service cephfs-mirror.target %endif %postun -n cephfs-mirror %systemd_postun cephfs-mirror@\*.service cephfs-mirror.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then /usr/bin/systemctl try-restart cephfs-mirror@\*.service > /dev/null 2>&1 || : fi fi %files -n rbd-fuse %{_bindir}/rbd-fuse %{_mandir}/man8/rbd-fuse.8* %files -n rbd-mirror %{_bindir}/rbd-mirror %{_mandir}/man8/rbd-mirror.8* %{_unitdir}/ceph-rbd-mirror@.service %{_unitdir}/ceph-rbd-mirror.target %post -n rbd-mirror %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph-rbd-mirror@\*.service ceph-rbd-mirror.target >/dev/null 2>&1 || : fi %endif %if 0%{?fedora} || 0%{?rhel} %systemd_post ceph-rbd-mirror@\*.service ceph-rbd-mirror.target %endif if [ $1 -eq 1 ] ; then /usr/bin/systemctl start ceph-rbd-mirror.target >/dev/null 2>&1 || : fi %preun -n rbd-mirror %if 0%{?suse_version} %service_del_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target %endif %if 0%{?fedora} || 0%{?rhel} %systemd_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target %endif %postun -n rbd-mirror %systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then /usr/bin/systemctl try-restart ceph-rbd-mirror@\*.service > /dev/null 2>&1 || : fi fi %files immutable-object-cache %{_bindir}/ceph-immutable-object-cache %{_mandir}/man8/ceph-immutable-object-cache.8* %{_unitdir}/ceph-immutable-object-cache@.service %{_unitdir}/ceph-immutable-object-cache.target %post immutable-object-cache %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target >/dev/null 2>&1 || : fi %endif %if 0%{?fedora} || 0%{?rhel} %systemd_post ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target %endif if [ $1 -eq 1 ] ; then /usr/bin/systemctl start ceph-immutable-object-cache.target >/dev/null 2>&1 || : fi %preun immutable-object-cache %if 0%{?suse_version} %service_del_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target %endif %if 0%{?fedora} || 0%{?rhel} %systemd_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target %endif %postun immutable-object-cache test -n "$FIRST_ARG" || FIRST_ARG=$1 %systemd_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target if [ $FIRST_ARG -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then /usr/bin/systemctl try-restart ceph-immutable-object-cache@\*.service > /dev/null 2>&1 || : fi fi %files -n rbd-nbd %{_bindir}/rbd-nbd %{_mandir}/man8/rbd-nbd.8* %dir %{_libexecdir}/rbd-nbd %{_libexecdir}/rbd-nbd/rbd-nbd_quiesce %files radosgw %{_bindir}/ceph-diff-sorted %{_bindir}/radosgw %{_bindir}/radosgw-token %{_bindir}/radosgw-es %{_bindir}/radosgw-object-expirer %{_bindir}/rgw-gap-list %{_bindir}/rgw-gap-list-comparator %{_bindir}/rgw-orphan-list %{_libdir}/libradosgw.so* %{_mandir}/man8/radosgw.8* %dir %{_localstatedir}/lib/ceph/radosgw %{_unitdir}/ceph-radosgw@.service %{_unitdir}/ceph-radosgw.target %post radosgw /sbin/ldconfig %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph-radosgw@\*.service ceph-radosgw.target >/dev/null 2>&1 || : fi %endif %if 0%{?fedora} || 0%{?rhel} %systemd_post ceph-radosgw@\*.service ceph-radosgw.target %endif if [ $1 -eq 1 ] ; then /usr/bin/systemctl start ceph-radosgw.target >/dev/null 2>&1 || : fi %preun radosgw %if 0%{?suse_version} %service_del_preun ceph-radosgw@\*.service ceph-radosgw.target %endif %if 0%{?fedora} || 0%{?rhel} %systemd_preun ceph-radosgw@\*.service ceph-radosgw.target %endif %postun radosgw /sbin/ldconfig %systemd_postun ceph-radosgw@\*.service ceph-radosgw.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then /usr/bin/systemctl try-restart ceph-radosgw@\*.service > /dev/null 2>&1 || : fi fi %files osd %{_bindir}/ceph-clsinfo %{_bindir}/ceph-bluestore-tool %{_bindir}/ceph-erasure-code-tool %{_bindir}/ceph-objectstore-tool %{_bindir}/ceph-osdomap-tool %{_bindir}/ceph-osd %{_libexecdir}/ceph/ceph-osd-prestart.sh %{_sbindir}/ceph-volume %{_sbindir}/ceph-volume-systemd %{_mandir}/man8/ceph-clsinfo.8* %{_mandir}/man8/ceph-osd.8* %{_mandir}/man8/ceph-bluestore-tool.8* %{_mandir}/man8/ceph-volume.8* %{_mandir}/man8/ceph-volume-systemd.8* %{_unitdir}/ceph-osd@.service %{_unitdir}/ceph-osd.target %{_unitdir}/ceph-volume@.service %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd %config(noreplace) %{_sysctldir}/90-ceph-osd.conf %{_sysconfdir}/sudoers.d/ceph-osd-smartctl %post osd %if 0%{?suse_version} if [ $1 -eq 1 ] ; then /usr/bin/systemctl preset ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target >/dev/null 2>&1 || : fi %endif %if 0%{?fedora} || 0%{?rhel} %systemd_post ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target %endif if [ $1 -eq 1 ] ; then /usr/bin/systemctl start ceph-osd.target >/dev/null 2>&1 || : fi %if 0%{?sysctl_apply} %sysctl_apply 90-ceph-osd.conf %else /usr/lib/systemd/systemd-sysctl %{_sysctldir}/90-ceph-osd.conf > /dev/null 2>&1 || : %endif %preun osd %if 0%{?suse_version} %service_del_preun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target %endif %if 0%{?fedora} || 0%{?rhel} %systemd_preun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target %endif %postun osd %systemd_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then /usr/bin/systemctl try-restart ceph-osd@\*.service ceph-volume@\*.service > /dev/null 2>&1 || : fi fi %if 0%{with seastar} %files crimson-osd %{_bindir}/crimson-osd %endif %if %{with ocf} %files resource-agents %dir %{_prefix}/lib/ocf %dir %{_prefix}/lib/ocf/resource.d %dir %{_prefix}/lib/ocf/resource.d/ceph %attr(0755,-,-) %{_prefix}/lib/ocf/resource.d/ceph/rbd %endif %files -n librados2 %{_libdir}/librados.so.* %dir %{_libdir}/ceph %{_libdir}/ceph/libceph-common.so.* %if %{with lttng} %{_libdir}/librados_tp.so.* %endif %dir %{_sysconfdir}/ceph %post -n librados2 -p /sbin/ldconfig %postun -n librados2 -p /sbin/ldconfig %files -n librados-devel %dir %{_includedir}/rados %{_includedir}/rados/librados.h %{_includedir}/rados/rados_types.h %{_libdir}/librados.so %if %{with lttng} %{_libdir}/librados_tp.so %endif %{_bindir}/librados-config %{_mandir}/man8/librados-config.8* %files -n libradospp-devel %dir %{_includedir}/rados %{_includedir}/rados/buffer.h %{_includedir}/rados/buffer_fwd.h %{_includedir}/rados/crc32c.h %{_includedir}/rados/inline_memory.h %{_includedir}/rados/librados.hpp %{_includedir}/rados/librados_fwd.hpp %{_includedir}/rados/page.h %{_includedir}/rados/rados_types.hpp %files -n python%{python3_pkgversion}-rados %{python3_sitearch}/rados.cpython*.so %{python3_sitearch}/rados-*.egg-info %files -n libcephsqlite %{_libdir}/libcephsqlite.so %post -n libcephsqlite -p /sbin/ldconfig %postun -n libcephsqlite -p /sbin/ldconfig %files -n libcephsqlite-devel %{_includedir}/libcephsqlite.h %if 0%{with libradosstriper} %files -n libradosstriper1 %{_libdir}/libradosstriper.so.* %post -n libradosstriper1 -p /sbin/ldconfig %postun -n libradosstriper1 -p /sbin/ldconfig %files -n libradosstriper-devel %dir %{_includedir}/radosstriper %{_includedir}/radosstriper/libradosstriper.h %{_includedir}/radosstriper/libradosstriper.hpp %{_libdir}/libradosstriper.so %endif %files -n librbd1 %{_libdir}/librbd.so.* %if %{with lttng} %{_libdir}/librbd_tp.so.* %endif %dir %{_libdir}/ceph/librbd %{_libdir}/ceph/librbd/libceph_*.so* %post -n librbd1 -p /sbin/ldconfig %postun -n librbd1 -p /sbin/ldconfig %files -n librbd-devel %dir %{_includedir}/rbd %{_includedir}/rbd/librbd.h %{_includedir}/rbd/librbd.hpp %{_includedir}/rbd/features.h %{_libdir}/librbd.so %if %{with lttng} %{_libdir}/librbd_tp.so %endif %files -n librgw2 %{_libdir}/librgw.so.* %if %{with lttng} %{_libdir}/librgw_op_tp.so.* %{_libdir}/librgw_rados_tp.so.* %endif %post -n librgw2 -p /sbin/ldconfig %postun -n librgw2 -p /sbin/ldconfig %files -n librgw-devel %dir %{_includedir}/rados %{_includedir}/rados/librgw.h %{_includedir}/rados/rgw_file.h %{_libdir}/librgw.so %if %{with lttng} %{_libdir}/librgw_op_tp.so %{_libdir}/librgw_rados_tp.so %endif %files -n python%{python3_pkgversion}-rgw %{python3_sitearch}/rgw.cpython*.so %{python3_sitearch}/rgw-*.egg-info %files -n python%{python3_pkgversion}-rbd %{python3_sitearch}/rbd.cpython*.so %{python3_sitearch}/rbd-*.egg-info %files -n libcephfs2 %{_libdir}/libcephfs.so.* %dir %{_sysconfdir}/ceph %post -n libcephfs2 -p /sbin/ldconfig %postun -n libcephfs2 -p /sbin/ldconfig %files -n libcephfs-devel %dir %{_includedir}/cephfs %{_includedir}/cephfs/libcephfs.h %{_includedir}/cephfs/ceph_ll_client.h %dir %{_includedir}/cephfs/metrics %{_includedir}/cephfs/metrics/Types.h %{_libdir}/libcephfs.so %if %{with jaeger} %files -n libjaeger %{_libdir}/libopentracing.so.* %{_libdir}/libthrift.so.* %{_libdir}/libjaegertracing.so.* %post -n libjaeger -p /sbin/ldconfig %postun -n libjaeger -p /sbin/ldconfig %endif %files -n python%{python3_pkgversion}-cephfs %{python3_sitearch}/cephfs.cpython*.so %{python3_sitearch}/cephfs-*.egg-info %{python3_sitelib}/ceph_volume_client.py %{python3_sitelib}/__pycache__/ceph_volume_client.cpython*.py* %files -n python%{python3_pkgversion}-ceph-argparse %{python3_sitelib}/ceph_argparse.py %{python3_sitelib}/__pycache__/ceph_argparse.cpython*.py* %{python3_sitelib}/ceph_daemon.py %{python3_sitelib}/__pycache__/ceph_daemon.cpython*.py* %files -n python%{python3_pkgversion}-ceph-common %{python3_sitelib}/ceph %{python3_sitelib}/ceph-*.egg-info %if 0%{with cephfs_shell} %files -n cephfs-shell %{python3_sitelib}/cephfs_shell-*.egg-info %{_bindir}/cephfs-shell %endif %files -n cephfs-top %{python3_sitelib}/cephfs_top-*.egg-info %{_bindir}/cephfs-top %{_mandir}/man8/cephfs-top.8* %if 0%{with ceph_test_package} %files -n ceph-test %{_bindir}/ceph-client-debug %{_bindir}/ceph_bench_log %{_bindir}/ceph_kvstorebench %{_bindir}/ceph_multi_stress_watch %{_bindir}/ceph_erasure_code_benchmark %{_bindir}/ceph_omapbench %{_bindir}/ceph_objectstore_bench %{_bindir}/ceph_perf_objectstore %{_bindir}/ceph_perf_local %{_bindir}/ceph_perf_msgr_client %{_bindir}/ceph_perf_msgr_server %{_bindir}/ceph_psim %{_bindir}/ceph_radosacl %{_bindir}/ceph_rgw_jsonparser %{_bindir}/ceph_rgw_multiparser %{_bindir}/ceph_scratchtool %{_bindir}/ceph_scratchtoolpp %{_bindir}/ceph_test_* %{_bindir}/ceph-coverage %{_bindir}/ceph-debugpack %{_bindir}/ceph-dedup-tool %if 0%{with seastar} %{_bindir}/crimson-store-nbd %endif %{_mandir}/man8/ceph-debugpack.8* %dir %{_libdir}/ceph %{_libdir}/ceph/ceph-monstore-update-crush.sh %endif %if 0%{with cephfs_java} %files -n libcephfs_jni1 %{_libdir}/libcephfs_jni.so.* %post -n libcephfs_jni1 -p /sbin/ldconfig %postun -n libcephfs_jni1 -p /sbin/ldconfig %files -n libcephfs_jni-devel %{_libdir}/libcephfs_jni.so %files -n cephfs-java %{_javadir}/libcephfs.jar %{_javadir}/libcephfs-test.jar %endif %files -n rados-objclass-devel %dir %{_includedir}/rados %{_includedir}/rados/objclass.h %if 0%{with selinux} %files selinux %attr(0600,root,root) %{_datadir}/selinux/packages/ceph.pp %{_datadir}/selinux/devel/include/contrib/ceph.if %{_mandir}/man8/ceph_selinux.8* %post selinux # backup file_contexts before update . /etc/selinux/config FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre # Install the policy /usr/sbin/semodule -i %{_datadir}/selinux/packages/ceph.pp # Load the policy if SELinux is enabled if ! /usr/sbin/selinuxenabled; then # Do not relabel if selinux is not enabled exit 0 fi if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then # Do not relabel if file contexts did not change exit 0 fi # Check whether the daemons are running /usr/bin/systemctl status ceph.target > /dev/null 2>&1 STATUS=$? # Stop the daemons if they were running if test $STATUS -eq 0; then /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 fi # Relabel the files fix for first package install /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null rm -f ${FILE_CONTEXT}.pre # The fixfiles command won't fix label for /var/run/ceph /usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1 # Start the daemons iff they were running before if test $STATUS -eq 0; then /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : fi exit 0 %postun selinux if [ $1 -eq 0 ]; then # backup file_contexts before update . /etc/selinux/config FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre # Remove the module /usr/sbin/semodule -n -r ceph > /dev/null 2>&1 # Reload the policy if SELinux is enabled if ! /usr/sbin/selinuxenabled ; then # Do not relabel if SELinux is not enabled exit 0 fi # Check whether the daemons are running /usr/bin/systemctl status ceph.target > /dev/null 2>&1 STATUS=$? # Stop the daemons if they were running if test $STATUS -eq 0; then /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 fi /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null rm -f ${FILE_CONTEXT}.pre # The fixfiles command won't fix label for /var/run/ceph /usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1 # Start the daemons if they were running before if test $STATUS -eq 0; then /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : fi fi exit 0 %endif %files grafana-dashboards %if 0%{?suse_version} %attr(0755,root,root) %dir %{_sysconfdir}/grafana %attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards %endif %attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/ceph-dashboard %config %{_sysconfdir}/grafana/dashboards/ceph-dashboard/* %doc monitoring/grafana/dashboards/README %doc monitoring/grafana/README.md %files prometheus-alerts %if 0%{?suse_version} %attr(0755,root,root) %dir %{_sysconfdir}/prometheus %endif %attr(0755,root,root) %dir %{_sysconfdir}/prometheus/ceph %config %{_sysconfdir}/prometheus/ceph/ceph_default_alerts.yml %changelog * Thu Jan 27 2022 Ceph Jenkins 2:16.2.0-152 - test/rbd_mirror: drop redundant MockJournaler instances (rhbz#2047279) - rbd-mirror: fix races in snapshot-based mirroring deletion propagation (rhbz#2047279) - rbd-mirror: don't default replay_requires_remote_image() implementation (rhbz#2047279) - rbd-mirror: untangle StateBuilder::is_linked() overloads (rhbz#2047279) - rbd-mirror: drop redundant initialization of StateBuilder members (rhbz#2047279) - qa/suites/rbd: test case for one-way snapshot-based mirroring (rhbz#2047279) - rbd-mirror: fix a couple of brainos in log messages (rhbz#2047279) - rbd-mirror: unbreak one-way snapshot-based mirroring (rhbz#2047279) - rbd-mirror: fix potential async op tracker leak in start_image_replayers (rhbz#2047279) - rbd-mirror: remove image_map next_state if sets to the same state (rhbz#2047279) - rbd-mirror: handle disabling/creating image in PrepareLocalImageRequest (rhbz#2047279) - rbd-mirror: fix bootstrap sequence while the image is removed (rhbz#2047279) - rbd-mirror: remove image_mapped condition to remove image_map (rhbz#2047279) - cls/rbd: prevent image_status when mirror image is not created (rhbz#2047279) - rbd-mirror: add image_map cleanup in LoadRequest (rhbz#2047279) - qa/rbd-mirror: add OMAP cleanup checks (rhbz#2047279) - rbd-mirror: remove mirror image at shut_down when there is no images (rhbz#2047279) - rbd-mirror: add mirror status removal on ImageReplayer shutdown (rhbz#2047279) - cls/rbd: add mirror_image_status_remove on client (rhbz#2047279) - rbd-mirror: fix mirror image removal (rhbz#2047279) * Wed Jan 19 2022 Ceph Jenkins 2:16.2.0-151 - rgw: fix etags for multipart uploads too (rhbz#2011454) - rgw: refactor and revise tag fixes (rhbz#2011454) - rgw: defensive etag handling fixes (rhbz#2011454) - rgw: etag handling - do not append nul here. (rhbz#2011454) - rgw: remove unused routine in rgw_admin (rhbz#2011454) - rgw: fix up etags upon reading (rhbz#2011454) * Fri Jan 07 2022 Ceph Jenkins 2:16.2.0-150 - rgw: resolving crash in rgw caused while running test s3tests_boto3.functional.test_s3.test_bucket_policy_put_obj_copy_source. src_object needs to be used in place of s->src_object in RGWCopyObj::verify_permission(). (rhbz#2037741) * Tue Jan 04 2022 Ceph Jenkins 2:16.2.0-149 - mds: do not assert when receiving a unknow metric type (rhbz#2030451) * Wed Dec 08 2021 Ceph Jenkins 2:16.2.0-148 - Update patches * Tue Nov 23 2021 Ceph Jenkins 2:16.2.0-147 - mgr/dashboard: Update monitoring stack doc link (rhbz#2017449) * Fri Nov 19 2021 Ceph Jenkins 2:16.2.0-146 - mon: MonMap: do not increase mon_info_t's compatv in stretch mode, for real (rhbz#2025079) * Tue Nov 09 2021 Ceph Jenkins 2:16.2.0-145 - rgw: url_decode before parsing copysource in copyobject (rhbz#2021387) * Thu Nov 04 2021 Ceph Jenkins 2:16.2.0-144 - mgr/cephadm: make scheduler able to accomodate offline/maintenance hosts (rhbz#1985401) * Thu Oct 14 2021 Ceph Jenkins 2:16.2.0-143 - rgw: under fips, set flag to allow md5 in select rgw ops - for review (rhbz#2002220) - rgw: under fips, set flag to allow md5 in select rgw ops (rhbz#2002220) * Wed Oct 13 2021 Ceph Jenkins 2:16.2.0-142 - mgr/dashboard: Incorrect MTU mismatch warning (rhbz#1998114) * Fri Oct 08 2021 Ceph Jenkins 2:16.2.0-141 - doc: update stretch mode documentation for new commands (rhbz#1995874) - mon: dump tiebreaker_mon in MonMap (rhbz#1995874) - mon: udpate MonMap::last_changed on stretch commands which didn't (rhbz#1995874) - mon: remove deleted monitors from the disallowed_leaders set (rhbz#1995874) - mon: don't let users remove the tiebreaker monitor from the map (rhbz#1995874) - mon: add "mon set_new_tiebreaker" command, so stretch clusters can replace it (rhbz#1995874) - mon: remove an incomplete and unnecessary wait_for_readable() bailout (rhbz#1995874) * Thu Sep 30 2021 Ceph Jenkins 2:16.2.0-140 - mon: MMonProbe: direct MMonJoin messages to the leader, instead of the first mon (rhbz#2001494) - mon: set_healthy_stretch_mode in update_from_paxos, not random leader calls! (rhbz#2001494) - mon: maintain stretch_recovery_triggered in new OSDMon::set_*_stretch_mode (rhbz#2001494) - mon: add a set_recovery_stretch_mode function (rhbz#2001494) - mon: rename maybe_engage_stretch_mode to try_engage_stretch_mode (rhbz#2001494) - osd: osdmap: do not assert target_v when encoding under stretch mode (rhbz#2001494) - mon: let users specify a crush location on boot, and send it in MMonJoin (rhbz#2001494) - mon: messages: Extend MMonJoin so it can provide a crush_location on join (rhbz#2001494) - mon: if in stretch mode, don't allow new mons to join without a location (rhbz#2001494) - mon: notify_new_monmap in all the places we update our monmap (rhbz#2001494) * Wed Sep 29 2021 Ceph Jenkins 2:16.2.0-139 - test: Add test for mgr hang when osd is full (rhbz#1910272) - mgr: Set client_check_pool_perm to false (rhbz#1910272) - mds: Add full caps to avoid osd full check (rhbz#1910272) - qa: add test for standby-replay marking rank damaged (rhbz#2002891) - MDSMonitor: handle damaged from standby-replay (rhbz#2002891) - mds: add config to mark rank damaged in standby-replay (rhbz#2002891) - include: unset std::hex after printing CompatSet (rhbz#2002891) - mds: refactor iterator lookup (rhbz#2002891) - mds: harden rank lookup (rhbz#2002891) - mds: add MDSMap method for creating null MDSMap (rhbz#2002891) - mds: only update beacon epoch if newer (rhbz#2002891) - mds: harden standby_mds lookup (rhbz#2002891) - mon/FSCommands: accept generic ostream rather than stringstream (rhbz#2002891) - include: add less verbose CompatSet dump (rhbz#2002891) - include: add dump operator for Feature (rhbz#2002891) - include: add const qualifier to appropriate CompatSet methods (rhbz#2002891) - mon/MDSMonitor.cc: fix join fscid not applied with pending fsmap at boot (rhbz#2002891) * Wed Sep 29 2021 Ceph Jenkins 2:16.2.0-138 - mds: avoid journaling overhead for ceph.dir.subvolume for no-op case (rhbz#1973832) * Wed Sep 29 2021 Ceph Jenkins 2:16.2.0-137 - mgr/{prometheus,restful}: Fix url generation again (rhbz#1975338) - mgr/cephadm: fix generation of wrong IPv6 urls (rhbz#1975338) - mgr/restful: Fix url generation for IPv6 hosts (rhbz#1975338) - mgr/prometheus: Fix url generation for IPv6 hosts (rhbz#1975338) - mgr/dashboard: Fix redirect to active MGR (rhbz#1975338) - pybind/mgr: move build_url from dashboard to mgr_util (rhbz#1975338) * Mon Sep 27 2021 Ceph Jenkins 2:16.2.0-136 - auth,mon: don't log "unable to find a keyring" error when key is given (rhbz#1981186) * Mon Sep 27 2021 Ceph Jenkins 2:16.2.0-135 - librbd: propagate CEPH_OSD_FLAG_FULL_TRY from IoCtx to IOContext (rhbz#1969301) - neorados: introduce per-IOContext op flags (rhbz#1969301) - neorados: always apply per-Op op flags (rhbz#1969301) - librados/IoCtxImpl: apply extra_op_flags to watches and notifies (rhbz#1969301) - librados/IoCtxImpl: preserve extra_op_flags when duping (rhbz#1969301) * Fri Sep 24 2021 Ceph Jenkins 2:16.2.0-134 - rgw/sts: code to check IAM policy and return an appropriate error incase Resource specified in the IAM policy is incorrect and is discarded. The IAM policy can be a resource policy or an identity policy. This is for policies that have already been set. (rhbz#2007451) - rgw/sts: code for returning an error when an IAM policy resource belongs to someone else's tenant. (rhbz#2007451) * Fri Sep 24 2021 Thomas Serlin 2:16.2.0-133 - mgr/volumes: Fix a race during clone cancel (rhbz#1980920) - mgr/volumes: Fail subvolume removal if it's in progress (rhbz#1980920) - doc: fix `daemon status` interface (exclude file system name) (rhbz#1988338) - test: adjust mirroring tests for `daemon status` change (rhbz#1988338) - mgr/mirroring: `daemon status` command does not require file system name (rhbz#1988338) - tools/cephfs_mirror: fix lock declaratie/locking (rhbz#2002140) - cephfs-mirror: shutdown ClusterWatcher on termination (rhbz#2002140) - mon/MDSMonitor: check fscid exists for legacy case (rhbz#1976915) - client: check if a mds rank is `up` before fetching connection addr (rhbz#1976914) * Thu Sep 23 2021 Ceph Jenkins 2:16.2.0-132 - mgr/dashboard: Update downstream doc links (rhbz#2000862) * Wed Sep 22 2021 Ken Dreyer - 2:16.2.0-131 - link to tcmalloc on s390x (rhbz#2006887) * Wed Sep 22 2021 Ceph Jenkins 2:16.2.0-130 - cephadm: Fix bootstrap error with IPv6 mon-ip (rhbz#2002639) * Tue Sep 21 2021 Ceph Jenkins 2:16.2.0-129 - mon: check mdsmap is resizeable before promoting standby-replay (rhbz#2002398) * Tue Sep 21 2021 Ceph Jenkins 2:16.2.0-128 - mds: check rejoin_ack_gather before enter rejoin_gather_finish (rhbz#1989271) * Thu Sep 16 2021 Ceph Jenkins 2:16.2.0-127 - mgr/cephadm: Fix OSD replacement in hosts with FQDN host name (rhbz#1954503) * Tue Sep 14 2021 Ceph Jenkins 2:16.2.0-126 - mgr/cephadm: update downstream monitoring tags (rhbz#1996090) - mgr/cephadm: fix --service-type flag in orch ls when service has id (rhbz#1964951) * Tue Sep 14 2021 Ceph Jenkins 2:16.2.0-125 - mgr/cephadm: Don't allow stopping full mgr, mon or osd services (rhbz#1976820) * Fri Sep 10 2021 Ceph Jenkins 2:16.2.0-124 - mds: create file system with specific ID (rhbz#1975608) * Thu Sep 09 2021 Ceph Jenkins 2:16.2.0-123 - librbd: drop ValidatePoolRequest::m_op_work_queue (rhbz#2000434) - librbd: fix pool validation lockup (rhbz#2000434) * Thu Sep 09 2021 Ceph Jenkins 2:16.2.0-122 - mgr/prometheus:Improve the pool metadata (rhbz#2002557) * Wed Sep 08 2021 Ceph Jenkins 2:16.2.0-121 - kv/RocksDBStore: Add handling of block_cache option for resharding (rhbz#1982456) - common/options: Set osd_client_message_cap to 256. (rhbz#1988165) * Tue Sep 07 2021 Ceph Jenkins 2:16.2.0-120 - mgr/DaemonServer.cc: prevent integer underflow that is triggered by large increases to pg_num/pgp_num (rhbz#2001152) * Thu Sep 02 2021 Ceph Jenkins 2:16.2.0-119 - Update patches * Thu Sep 02 2021 Ceph Jenkins 2:16.2.0-118 - pybind/mgr/stats: check if cmdtag is a valid key in client_metadata (rhbz#1979520) * Wed Aug 18 2021 Ceph Jenkins 2:16.2.0-117 - rgw: fix sts memory leak (rhbz#1994616) * Mon Aug 16 2021 Ceph Jenkins 2:16.2.0-116 - test/store_test: more testing for deferred writes (rhbz#1991677) - os/bluestore: enforce one more non-inclusive comparision against prefer_deferred_size (rhbz#1991677) - os/bluestore: Better handling of deferred write trigger (rhbz#1991677) - os/bluestore: account for alignment with max_blob_size when splitting write I/O into chunks. (rhbz#1991677) - os/bluestore: introduce l_bluestore_write_deferred_bytes perf counter. (rhbz#1991677) - os/bluestore: use non-inclusive comparision against prefer_deferred_size (rhbz#1991677) - os/bluestore: improve logging around deferred writes (rhbz#1991677) - os/bluestore: fix missing 'l_bluestore_write_deferred' perf counter increment (rhbz#1991677) - os/bluestore: cosmetic cleanups (rhbz#1991677) * Wed Aug 11 2021 Ceph Jenkins 2:16.2.0-115 - cls/cmpomap: empty values are 0 in U64 comparisons (rhbz#1992445) * Mon Aug 09 2021 Ceph Jenkins 2:16.2.0-114 - rgw: default auth_client_required=cephx (rhbz#1981682) - radosgw-admin: 'sync status' is not behind if there are no mdlog entries (rhbz#1989552) * Fri Aug 06 2021 Ceph Jenkins 2:16.2.0-113 - rgw: default ms_mon_client_mode = secure (rhbz#1981682) * Wed Aug 04 2021 Ceph Jenkins 2:16.2.0-112 - rgw: add the condition of lock mode conversion to PutObjRentention (rhbz#1989752) * Wed Aug 04 2021 Ceph Jenkins 2:16.2.0-111 - doc/cephadm: Add RGW ssl (rhbz#1981682) * Wed Aug 04 2021 Ceph Jenkins 2:16.2.0-110 - mgr/dashboard: show perf. counters for rgw svc. on Cluster > Hosts (rhbz#1987674) - mgr/dashboard: Cluster > Hosts > Performance Counters: do not show context bar. (rhbz#1987674) * Wed Aug 04 2021 Ceph Jenkins 2:16.2.0-109 - mgr/dashboard: fix ssl cert validation for rgw service creation (rhbz#1987338) * Mon Jul 26 2021 Ceph Jenkins 2:16.2.0-108 - cephadm: Introduce unit.stop (rhbz#1979449) - cephadm: don't log on exit. (rhbz#1979449) - cephadm: `stats` might return `--` for containers (rhbz#1979449) - cephadm: exec: also search for old cname (rhbz#1979449) - cephadm: dashes: use both cnames for `inspect` (rhbz#1979449) - cephadm: Add CephContainer.for_daemon() (rhbz#1979449) - cephadm: use dashes for container names (rhbz#1979449) * Fri Jul 23 2021 Ceph Jenkins 2:16.2.0-107 - test: add test for checking readd after remove for a directory path (rhbz#1982069) - cephfs-mirror: record directory path cancel in DirRegistry (rhbz#1982069) - cephfs-mirror: complete context when a mirror instance is not failed or blocklisted (rhbz#1982069) * Wed Jul 21 2021 Ceph Jenkins 2:16.2.0-106 - Update patches * Tue Jul 20 2021 Ceph Jenkins 2:16.2.0-105 - rgw/notifications: send correct size in case of delete marker creation (rhbz#1982532) - rgw/notification: send current time in complete multipart upload event (rhbz#1979564) - rgw/http/notifications: support content type in HTTP POST messages (rhbz#1943619) * Mon Jul 19 2021 Ceph Jenkins 2:16.2.0-104 - rgw: allow ordered bucket listing to work when many filtered out entries (rhbz#1975939) - rgw: allow CLSRGWConcurrentIO to handle "advancing" retries (rhbz#1975939) - rgw: de-conflate shard_id and request_id in CLSRGWConcurrentIO (rhbz#1975939) - rgw: bucket index list produces incorrect result when non-ascii entries (rhbz#1983838) - test/cls_rgw: make bi_list test not rely on osd_max_omap_entries_per_request (rhbz#1983838) - test/cls_rgw: test bi_list for objects with non-ascii names (rhbz#1983838) - cls/rgw: look for plane entries in non-ascii plain namespace too (rhbz#1983838) * Fri Jul 16 2021 Ceph Jenkins 2:16.2.0-103 - rgw: radosgw-admin errors if marker not specified on data/mdlog trim (rhbz#1982275) * Wed Jul 14 2021 Ceph Jenkins 2:16.2.0-102 - rgw: Robust notify invalidates on cache timeout (rhbz#1978125) - rgw: distribute() takes RGWCacheNotifyInfo (rhbz#1978125) - qa/rgw: run multisite tests with metadata sync error injection (rhbz#1978251) - rgw: limit concurrency of metadata sync (rhbz#1978251) - rgw: metadata sync treats all errors as 'transient' (rhbz#1978251) * Wed Jul 14 2021 Ceph Jenkins 2:16.2.0-101 - rgw: Don't segfault on datalog trim (rhbz#1982275) * Wed Jul 14 2021 Ceph Jenkins 2:16.2.0-100 - rgw/notifications: cache object size to avoid accessing invalid memoery (rhbz#1960648) - rgw/notification: send correct size in COPY events (rhbz#1960648) * Wed Jul 14 2021 Ceph Jenkins 2:16.2.0-99 - mgr/dashboard: remove usage of 'rgw_frontend_ssl_key' (rhbz#1981688) * Wed Jul 07 2021 Thomas Serlin - 2:16.2.0-98 - ceph.spec: disable rbd persistent writeback cache in ssd mode * Tue Jul 06 2021 Ceph Jenkins 2:16.2.0-97 - ceph.spec.in, common/options: disable rbd persistent writeback cache in ssd mode (rhbz#1976223) * Tue Jul 06 2021 Ceph Jenkins 2:16.2.0-96 - mgr/orchestrator: DG loads properly the unmanaged attribute (rhbz#1896693) * Sun Jul 04 2021 Ceph Jenkins 2:16.2.0-95 - rgw/notification: fixing the "persistent=false" flag (rhbz#1933677) * Sun Jul 04 2021 Ceph Jenkins 2:16.2.0-94 - rgw/notification: support version-id for all event types (rhbz#1960640) * Sun Jul 04 2021 Ceph Jenkins 2:16.2.0-93 - rgw/notifications: support v4 auth for topics and notifications (rhbz#1954910) * Sun Jul 04 2021 Ceph Jenkins 2:16.2.0-92 - rgw/notification: make notifications agnostic of bucket reshard (rhbz#1962235) * Sat Jul 03 2021 Ceph Jenkins 2:16.2.0-91 - rgw/notifications: support metadata filter in CompleteMultipartUpload events (rhbz#1963047) - rgw/notifications: support metadata filter in COPY events (rhbz#1963047) * Fri Jul 02 2021 Ceph Jenkins 2:16.2.0-90 - cephadm: specify addr on bootstrap's host add (rhbz#1976086) * Fri Jul 02 2021 Ceph Jenkins 2:16.2.0-89 - mgr: move GIL helpers to Gil.{h,cc} (rhbz#1975879) - mgr/ActivePyModules: streamline mgr_ips a bit (rhbz#1975879) - mgr/ActivePyModules: don't take unnecessary lock (rhbz#1975879) - pybind/mgr/mgr_module: use get("mgr_ips") for standby get_mgr_ip() (rhbz#1975879) - mgr: add get() for standby modules (rhbz#1975879) - mgr/dashboard: User database migration has been cut out (rhbz#1978341) - rgw/notifications: delete bucket notification object when empty * Wed Jun 30 2021 Ceph Jenkins 2:16.2.0-88 - osd/scrub: replace a ceph_assert() with a test (rhbz#1977802) * Wed Jun 30 2021 Ceph Jenkins 2:16.2.0-87 - logrotate: include cephfs-mirror daemon (rhbz#1967916) - cephfs-mirror: reopen logs on SIGHUP (rhbz#1967916) * Tue Jun 29 2021 Ceph Jenkins 2:16.2.0-86 - mgr/cephadm: induce retune of osd memory on osd creation (rhbz#1939354) - mgr/cephadm: autotune osd memory (rhbz#1939354) - common/options: add osd_memory_target_autotune (rhbz#1939354) * Mon Jun 28 2021 Ceph Jenkins 2:16.2.0-85 - mon/OSDMonitor: drop stale failure_info even if can_mark_down() (rhbz#1905339) * Fri Jun 25 2021 Ceph Jenkins 2:16.2.0-84 - cephadm: set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 (rhbz#1970597) - cephfs-mirror: silence warnings when connecting via mon host (rhbz#1970549) - test: add test to verify adding an active peer back to source (rhbz#1976121) - pybind/mirroring: disallow adding a active peer back to source (rhbz#1976121) - pybind/cephfs: interface to fetch file system id (rhbz#1976121) - test: update log-ignorelist for fs:mirror test (rhbz#1976121) * Thu Jun 24 2021 Ceph Jenkins 2:16.2.0-83 - mgr/dashboard: disable NFSv3 support in dashboard (rhbz#1975788) * Thu Jun 24 2021 Ceph Jenkins 2:16.2.0-82 - mgr/dashboard: Alertmanager fails to POST alerts (rhbz#1974708) * Wed Jun 23 2021 Ceph Jenkins 2:16.2.0-81 - RGW - Bucket Remove Op: Pass in user (rhbz#1965169) * Wed Jun 23 2021 Ceph Jenkins 2:16.2.0-80 - rgw: avoid infinite loop when deleting a bucket (rhbz#1975385) * Mon Jun 21 2021 Ceph Jenkins 2:16.2.0-79 - mon: Sanely set the default CRUSH rule when creating pools in stretch mode (rhbz#1954923) * Sat Jun 19 2021 Ceph Jenkins 2:16.2.0-78 - mgr/cephadm: Use GA downstream container image locations (rhbz#1896495) * Fri Jun 18 2021 Ceph Jenkins 2:16.2.0-77 - Update patches * Fri Jun 18 2021 Ceph Jenkins 2:16.2.0-76 - Update patches * Fri Jun 18 2021 Ceph Jenkins 2:16.2.0-75 - mgr/nfs: do not depend on cephadm.utils (rhbz#1958236) - mgr/cephadm/inventory: do not try to resolve current mgr host (rhbz#1958236) - pybind/mgr/mgr_module: make get_mgr_ip() return mgr's IP from mgrmap (rhbz#1958236) - mgr/restful: use get_mgr_ip() instead of hostname (rhbz#1958236) - cephadm: stop passing --no-hosts to podman (rhbz#1958236) - mgr/nfs: use host.addr for backend IP where possible (rhbz#1958236) - mgr/cephadm: convert host addr if non-IP to IP (rhbz#1958236) - mgr/dashboard,prometheus: new method of getting mgr IP (rhbz#1958236) - doc/cephadm: remove any reference to the use of DNS or /etc/hosts (rhbz#1958236) - mgr/cephadm: use known host addr (rhbz#1958236) - mgr/cephadm: resolve IP at 'orch host add' time (rhbz#1958236) - mgr/cephadm: apply hostname/addr checks to 'orch host set-addr' too (rhbz#1958236) * Thu Jun 17 2021 Ceph Jenkins 2:16.2.0-74 - rgw: when deleted obj removed in versioned bucket, extra del-marker added (rhbz#1958284) * Thu Jun 17 2021 Ceph Jenkins 2:16.2.0-73 - mgr/dashboard: Fix 500 error while exiting out of maintenance (rhbz#1973061) * Fri Jun 11 2021 Ceph Jenkins 2:16.2.0-72 - cephadm: add --zap-osds argument to rm_cluster (rhbz#1881192) - cephadm: implement zap-osds --fsid ... command (rhbz#1881192) * Fri Jun 11 2021 Ceph Jenkins 2:16.2.0-71 - mgr/dashboard: Fix for doc link pointing to pacific instead of 5 (rhbz#1970763) * Thu Jun 10 2021 Ceph Jenkins 2:16.2.0-70 - mgr/pybind/snap_schedule: return valid json for 'status' command (rhbz#1956619) - mgr/pybind/snap_schedule: do not fail when no fs snapshot schedules are available (rhbz#1956619) * Thu Jun 10 2021 Ceph Jenkins 2:16.2.0-69 - RGW - Don't move attrs before setting them (rhbz#1962125) * Thu Jun 10 2021 Ceph Jenkins 2:16.2.0-68 - rados: increase osd_max_write_op_reply_len default to 64 bytes (rhbz#1737163) * Wed Jun 09 2021 Ceph Jenkins 2:16.2.0-67 - Update patches * Wed Jun 09 2021 Ceph Jenkins 2:16.2.0-66 - test: add test to verify incremental snapshot updates (rhbz#1954681) - cephfs-mirror: synchronize file mode (rhbz#1954681) - cephfs-mirror: transfer snapshot diffs whenever possible (rhbz#1954681) - cephfs-mirror: remove hardcoded metadata count when creating snapshot (rhbz#1954681) - cephfs-mirror: adjust PeerReplayer::cleanup_remote_dir() for purging any remote (sub)directory (rhbz#1954681) - mds: introduce ceph.mirror.dirty_snap_id vxattr (rhbz#1954681) - cephfs-mirror: allow connecting to local cluster using mon address (rhbz#1969934) - test: test to verify dir path removal when no mirror daemons are running (rhbz#1969916) - pybind/mirroring: advance state machine from stalled state (rhbz#1969916) - pybind/mirroring: start from correct state during policy init (rhbz#1969916) - test: adjust mirroring test to use changed `daemon status` JSON (rhbz#1956620) - doc: adjust `daemon status` section with changed JSON (rhbz#1956620) - pybind/mirroring: sanitize `daemon status` JSON (rhbz#1956620) - test: disable mirroring module for certain tests (rhbz#1956620) - doc: document cephfs-mirror configuration options (rhbz#1969896) - cephfs-mirror: use sensible mount timeout when mounting local/remote fs (rhbz#1969896) - test: add tests for settting mount timeout (rhbz#1969896) - pybind/cephfs: add interface to set mount timeout (rhbz#1969896) - libcephfs: add interface to set mount timeout (rhbz#1969896) - include: define AT_REMOVEDIR on Windows (rhbz#1956341) - test: tests for *at() libcephfs APIs (rhbz#1956341) - test: cleanup files/dirs before finishing test (rhbz#1956341) - client: non-at APIs call at-APIs with CEPHFS_AT_FDCWD as file descriptor (rhbz#1956341) - libcephfs: introduce basic *at() calls (rhbz#1956341) - client: remove redundant caps_issued_mask() call in Client::fstatx() (rhbz#1956341) - test: add test to validate snap synchronization with parent directory snapshots (rhbz#1959419) - cephfs-mirror: ignore parent directory snapshots when building snap map (rhbz#1959419) - doc: document CephFS Snapshot Mirroring (rhbz#1969896) - doc: detail `fs snapshot mirror daemon status` mgr command (rhbz#1969896) - doc: s///g (rhbz#1969896) * Wed Jun 09 2021 Ceph Jenkins 2:16.2.0-65 - cephadm: improve is_container_running() (rhbz#1969302) * Tue Jun 08 2021 Ceph Jenkins 2:16.2.0-64 - mgr/dashboard: cannot create 'ha-rgw' service for dashboard 5.0 (rhbz#1968397) - mgr/cephadm:fix alerts sent to wrong URL (rhbz#1832807) * Tue Jun 08 2021 Ceph Jenkins 2:16.2.0-63 - qa/tasks/nfs: add test to check if cmds fail on not passing required arguments (rhbz#1951969) - mgr/nfs: fix flake8 missing whitespace around parameter equals error (rhbz#1951969) - mgr/nfs: annotate _cmd_nfs_* methods return value (rhbz#1951969) - doc/cephfs/nfs: add section about ganesha logs (rhbz#1951969) - doc/cephfs/nfs: Replace volume/nfs with nfs (rhbz#1951969) - doc/cephfs/nfs: add note about export management with volume/nfs interface only (rhbz#1951969) - spec: add nfs to spec file (rhbz#1951969) - mgr/nfs: Don't enable nfs module by default (rhbz#1951969) - mgr/nfs: check for invalid chars in cluster id (rhbz#1951969) - mgr/nfs: Use CLICommand wrapper (rhbz#1951969) - mgr/nfs: reorg nfs files (rhbz#1951969) - mgr/nfs: Check if transport or protocol are list instance (rhbz#1951969) - mgr/nfs: reorg cluster class and common helper methods (rhbz#1951969) - mgr/nfs: move common export helper methods to ExportMgr class (rhbz#1951969) - mgr/nfs: move validate methods into new ValidateExport class (rhbz#1951969) - mgr/nfs: add custom exception module (rhbz#1951969) - mgr/nfs: create new module for export utils (rhbz#1951969) - mgr/nfs: rename fs dir to export (rhbz#1951969) - mgr/volumes/nfs: Move nfs code out of volumes plugin (rhbz#1951969) - doc: add note about removal of the `cephfs` nfs cluster type (rhbz#1951969) - mgr/volumes/nfs: drop `type` param during cluster create (rhbz#1951969) * Mon Jun 07 2021 Ceph Jenkins 2:16.2.0-62 - test/allocator_replay_test: make allocator type configurable (rhbz#1968582) - os/bluestore: fix unexpected ENOSPC in Avl/Hybrid allocators. (rhbz#1968582) * Mon Jun 07 2021 Ceph Jenkins 2:16.2.0-61 - Update patches * Mon Jun 07 2021 Ceph Jenkins 2:16.2.0-60 - mgr/dashboard: cannot create 'ha-rgw' service for dashboard 5.0 (rhbz#1968397) * Thu Jun 03 2021 Ceph Jenkins 2:16.2.0-59 - mds/scrub: write root inode backtrace at creation (rhbz#1794781) * Thu Jun 03 2021 Ceph Jenkins 2:16.2.0-58 - rgw: completion of multipart upload leaves delete marker (rhbz#1961516) * Thu Jun 03 2021 Ceph Jenkins 2:16.2.0-57 - mgr/dashboard: fix bucket objects and size calculations (rhbz#1963965) * Thu Jun 03 2021 Ceph Jenkins 2:16.2.0-56 - librbd: don't stop at the first unremovable image when purging (rhbz#1966687) - rbd: combined error message for expected Trash::purge() errors (rhbz#1966687) - rbd: propagate Trash::purge() result (rhbz#1966687) * Thu Jun 03 2021 Ceph Jenkins 2:16.2.0-55 - mon,doc: deprecate CephFS min_compat_client (rhbz#1958131) - doc: add alternate_name cephfs feature to table (rhbz#1958131) * Wed Jun 02 2021 Ceph Jenkins 2:16.2.0-54 - osd: compute OSD's space usage ratio via raw space utilization (rhbz#1962481) * Wed Jun 02 2021 Ceph Jenkins 2:16.2.0-53 - rgw: fix spelling of eTag in S3 message structure (rhbz#1966676) - rgw: remove s3: prefix in eventName value of s3 event message structure (rhbz#1966676) * Wed Jun 02 2021 Ceph Jenkins 2:16.2.0-52 - qa: increase fragmentation to improve uniform distribution (rhbz#1964224) * Wed Jun 02 2021 Ceph Jenkins 2:16.2.0-51 - rgw: parse tenant name out of rgwx-bucket-instance (rhbz#1959804) * Wed Jun 02 2021 Ceph Jenkins 2:16.2.0-50 - mgr/dashboard: show RGW tenant user id correctly in 'NFS create export' form. (rhbz#1962175) * Wed Jun 02 2021 Ceph Jenkins 2:16.2.0-49 - mgr/dashboard: drop container image name and id from services list (rhbz#1958090) * Wed Jun 02 2021 Ceph Jenkins 2:16.2.0-48 - mgr/dashboard: pass Grafana datasource in URL (rhbz#1963849) * Wed Jun 02 2021 Ceph Jenkins 2:16.2.0-47 - cephfs-top: set the cursor to be invisible (rhbz#1951059) - cephfs-top: self-adapt the display according the window size (rhbz#1951059) - cephfs-top: use the default window object from curses.wrapper() (rhbz#1951059) - cephfs-top: improve the output (rhbz#1951059) * Wed May 26 2021 Ceph Jenkins 2:16.2.0-46 - mgr/dashboard: fix API docs link (rhbz#1964925) * Tue May 25 2021 Ceph Jenkins 2:16.2.0-45 - mgr/cephadm: fix issue with missing prometheus alerts (rhbz#1832807) - mgr/cephadm: Use `cephfs-mirror` caps profile (rhbz#1957607) * Tue May 25 2021 Ceph Jenkins 2:16.2.0-44 - rgw: Fix datalog error introduced in fix (rhbz#1951079) * Tue May 25 2021 Ceph Jenkins 2:16.2.0-43 - librbd/mirror/snapshot: avoid UnlinkPeerRequest with a unlinked peer (rhbz#1964146) - rbd-mirror: fix segfault in snapshot replayer shutdown (rhbz#1963974) * Mon May 24 2021 Ceph Jenkins 2:16.2.0-42 - rgw: Simplify log shard probing and err on the side of omap (rhbz#1951079) * Mon May 24 2021 Ceph Jenkins 2:16.2.0-41 - doc/rbd/rbd-persistent-write-back-cache: clarify some config settings (rhbz#1959153) - doc/rbd/rbd-persistent-write-back-cache: update "Cache Status" section (rhbz#1959153) - rbd: don't attempt to interpret image cache state json (rhbz#1959153) - librbd/cache/pwl: fix parsing of cache_type in create_image_cache_state() (rhbz#1959153) - librbd/cache/pwl: log image_cache_state after it is fetched, not before (rhbz#1959153) * Wed May 19 2021 Ceph Jenkins 2:16.2.0-40 - mgr/dashboard: update frontend deps due to security vulnerabilities. (rhbz#1960252) - mgr/dashboard: get SASS vars values in TS through CssHelper. (rhbz#1960252) * Wed May 19 2021 Ceph Jenkins 2:16.2.0-39 - mgr/dashboard: fix OSDs Host details/overview grafana graphs (rhbz#1956511) - dashboard: Fixed name clash when hostname similar to anaother (rhbz#1956511) - radosgw-admin: skip GC init on read-only admin ops (rhbz#1959452) * Mon May 17 2021 Ceph Jenkins 2:16.2.0-38 - mgr/dashboard: fix base-href: revert it to previous approach (rhbz#1957213) * Sat May 15 2021 Ceph Jenkins 2:16.2.0-37 - mgr/dashboard: fix cookie injection issue (rhbz#1960697) * Thu May 13 2021 Ceph Jenkins 2:16.2.0-36 - rgw: crash on multipart upload to bucket with policy (rhbz#1960262) * Thu May 13 2021 Ceph Jenkins 2:16.2.0-35 - mgr/cephadm: add timeout when removing iscsi gateway.conf (rhbz#1914726) - mgr/cephadm: Purge iscsi configuration from pool and ceph config (rhbz#1914726) * Tue May 11 2021 Ceph Jenkins 2:16.2.0-34 - mgr/dashboard: fix bucket versioning when locking is enabled (rhbz#1952143) - mgr/dashboard:OSDs placement text is unreadable (rhbz#1954692) - mgr/dashboard: Remove username, password fileds from -Cluster/Manager Modules/dashboard (rhbz#1934994) - mgr/dashboard: fix base-href: revert it to previous approach (rhbz#1957213) * Fri May 07 2021 Ceph Jenkins 2:16.2.0-33 - python-common: use OrderedDict instead of Set to remove duplicates from host labels list (rhbz#1950980) * Fri May 07 2021 Ceph Jenkins 2:16.2.0-32 - mgr/cephadm: ceph-volume verbose only when fails (rhbz#1948717) * Thu May 06 2021 Ceph Jenkins 2:16.2.0-31 - mgr/dashboard: Host Maintenance Follow ups (rhbz#1954728) * Thu May 06 2021 Ceph Jenkins 2:16.2.0-30 - mgr/dashboard: fix set-ssl-certificate{,-key} commands (rhbz#1954645) * Tue May 04 2021 Ceph Jenkins 2:16.2.0-29 - cephadm:persist the grafana.db file (rhbz#1942456) * Tue May 04 2021 Ceph Jenkins 2:16.2.0-28 - mgr/orchestrator: remove IMAGE ID from 'orch ls' (rhbz#1898196) - mgr/orchestrator: report osds as osd.unmanaged as appropriate (rhbz#1898196) - mgr/cephadm: rewrite/simplify describe_service (rhbz#1898196) * Mon May 03 2021 Ceph Jenkins 2:16.2.0-27 - rgw: fix bucket object listing when marker matches prefix (rhbz#1956394) - rgw: radosgw_admin remove bucket not purging past 1,000 objects (rhbz#1956520) * Fri Apr 30 2021 Ceph Jenkins 2:16.2.0-26 - osd: drop entry in failure_pending when resetting stale peer (rhbz#1905339) - osd: mark HeartbeatInfo::is_stale() and friends "const" (rhbz#1905339) - mon/OSDMonitor: drop stale failure_info (rhbz#1905339) - mon/OSDMonitor: restructure OSDMonitor::check_failures() loop (rhbz#1905339) - mon/OSDMonitor: extract get_grace_time() (rhbz#1905339) - mon/OSDMonitor: do not return old failure report when updating it (rhbz#1905339) - mon/OSDMonitor: do not return no_reply() again (rhbz#1905339) - mon/Monitor: early return if routed request is not found (rhbz#1905339) * Fri Apr 30 2021 Ceph Jenkins 2:16.2.0-25 - pybind/mgr/devicehealth: fix command name (rhbz#1939405) * Fri Apr 30 2021 Ceph Jenkins 2:16.2.0-24 - common/options/global.yaml.in: increase default value of bluestore_cache_trim_max_skip_pinned (rhbz#1931504) * Fri Apr 30 2021 Ceph Jenkins 2:16.2.0-23 - rgw: add latency to the request summary of an op (rhbz#1881304) * Thu Apr 29 2021 Ceph Jenkins 2:16.2.0-22 - osd/PeeringState: fix acting_set_writeable min_size check (rhbz#1946828) - osd/PeeringState: fix get_backfill_priority min_size comparison (rhbz#1946828) * Thu Apr 29 2021 Ceph Jenkins 2:16.2.0-21 - rgw: fix for mfa resync crash when supplied with only one totp_pin. (rhbz#1947862) - mon: Modifying trim logic to change paxos_service_trim_max dynamically (rhbz#1943357) - mon: Adding variables for Paxos trim 1. Define variables for paxos_service_trim_min and paxos_service_trim_max. 2. Use them in place of g_conf()→paxos_service_trim_min and g_conf()→paxos_service_trim_max (rhbz#1943357) * Wed Apr 28 2021 Ceph Jenkins 2:16.2.0-20 - mgr/orchestrator: validate lists in spec jsons (rhbz#1944045 rhbz#1946462) * Wed Apr 28 2021 Ceph Jenkins 2:16.2.0-19 - rgw: permit logging of list-bucket (and any other no-bucket op) (rhbz#1929387) * Wed Apr 28 2021 Ceph Jenkins 2:16.2.0-18 - mgr/cephadm: fix orch host add with multiple labels and no addr (rhbz#1929563) - mgr/cephadm: fix nfs-rgw stray daemon (rhbz#1952189) - pybind/ceph_argparse: print --format flag name in help descs (rhbz#1928084) - mgr/cephadm: don't list non ceph daemons as needing upgrade in upgrade check (rhbz#1949957) - mgr/cephadm: place maximum on placement count based on host count (rhbz#1909831) * Wed Apr 28 2021 Ceph Jenkins 2:16.2.0-17 - mgr/dashboard: fix broken feature toggles (rhbz#1946625) * Mon Apr 26 2021 Ceph Jenkins 2:16.2.0-16 - mds/scrub: background scrub error fixes (rhbz#1794781) - mds: skip the buffer in UnknownPayload::decode() (rhbz#1948674) * Mon Apr 26 2021 Ceph Jenkins 2:16.2.0-15 - mgr/dashboard: update documentation about creating NFS export. (rhbz#1939480) - mgr/dashboard: fix duplicated rows when creating NFS export. (rhbz#1939480) - mgr/dashboard: fix errors when creating NFS export. (rhbz#1941996) * Mon Apr 26 2021 Ceph Jenkins 2:16.2.0-14 - mgr/dashboard: Remove username and password from request body (rhbz#1951353) * Fri Apr 23 2021 Ceph Jenkins 2:16.2.0-13 - vstart.sh: disable "auth_allow_insecure_global_id_reclaim" (rhbz#1952206) - auth/cephx: make KeyServer::build_session_auth_info() less confusing (rhbz#1952206) - auth/cephx: cap ticket validity by expiration of "next" key (rhbz#1952206) - auth/cephx: drop redundant KeyServerData::get_service_secret() overload (rhbz#1952206) - qa/standalone: default to disable insecure global id reclaim (rhbz#1952206) - qa/suites/upgrade/octopus-x: disable insecure global_id reclaim health warnings (rhbz#1952206) - qa/tasks/ceph[adm].conf[.template]: disable insecure global_id reclaim health alerts (rhbz#1952206) - cephadm: set auth_allow_insecure_global_id_reclaim for mon on bootstrap (rhbz#1952206) - mon/HealthMonitor: raise AUTH_INSECURE_GLOBAL_ID_RENEWAL[_ALLOWED] (rhbz#1952206) - auth/cephx: ignore CEPH_ENTITY_TYPE_AUTH in requested keys (rhbz#1952206) - auth/cephx: rotate auth tickets less often (rhbz#1952206) - mon: fail fast when unauthorized global_id (re)use is disallowed (rhbz#1952206) - auth/cephx: option to disallow unauthorized global_id (re)use (rhbz#1952206) - auth/cephx: make cephx_decode_ticket() take a const ticket_blob (rhbz#1952206) - auth/AuthServiceHandler: keep track of global_id and whether it is new (rhbz#1952206) - auth/AuthServiceHandler: build_cephx_response_header() is cephx-specific (rhbz#1952206) - auth/AuthServiceHandler: drop unused start_session() args (rhbz#1952206) - mon/MonClient: drop global_id arg from _add_conn() and _add_conns() (rhbz#1952206) - mon/MonClient: reset auth state in shutdown() (rhbz#1952206) - mon/MonClient: preserve auth state on reconnects (rhbz#1952206) - mon/MonClient: claim active_con's auth explicitly (rhbz#1952206) - mon/MonClient: resurrect "waiting for monmap|config" timeouts (rhbz#1952206) - qa/tasks/ceph.conf: shorten cephx TTL for testing (rhbz#1952206) * Wed Apr 21 2021 Ceph Jenkins 2:16.2.0-12 - common/options.cc: turn off bluestore_warn_on_no_per_pool_omap by default (rhbz#1952193) * Tue Apr 20 2021 Ceph Jenkins 2:16.2.0-11 - cephadm: fix failure when using --apply-spec and --shh-user (rhbz#1936964) * Wed Apr 14 2021 Ceph Jenkins 2:16.2.0-10 - rgw/notification: fix persistent notification hang when ack-levl=none (rhbz#1947802) * Wed Apr 14 2021 Ceph Jenkins 2:16.2.0-9 - mgr/dashboard: Revoke read-only user's access to Manager modules (rhbz#1937363) * Tue Apr 13 2021 Ken Dreyer - 2:16.2.0-8 - ceph-immutable-object-cache requires ceph-base (rhbz#1944933) * Tue Apr 13 2021 Ceph Jenkins 2:16.2.0-7 - Update patches * Tue Apr 13 2021 Ceph Jenkins 2:16.2.0-6 - packaging: require ceph-common for immutable object cache daemon (rhbz#1944933) * Fri Apr 09 2021 Ceph Jenkins 2:16.2.0-5 - mgr/cephadm: don't have upgrade fail if "." in patch section of version (rhbz#1944978) - mgr/dashboard: Unable to login to ceph dashboard until clearing cookies (rhbz#1913580) * Wed Apr 07 2021 Ceph Jenkins 2:16.2.0-4 - mgr/dashboard: Fix for broken User management role cloning (rhbz#1940025) * Tue Apr 06 2021 Ceph Jenkins 2:16.2.0-3 - pybind/mgr/volumes: log mutex locks to help debug deadlocks (rhbz#1946259) - mgr/pybind/volumes: avoid acquiring lock for thread count updates (rhbz#1946259) - qa: bump debugging for mgr (rhbz#1946259) - mgr: add debug output for commands dispatched (rhbz#1946259) - client: fire the finish_cap_snap() after buffer being flushed (rhbz#1946260) - client: simplify the iterating code (rhbz#1946260) - client: remove unused _flushed_cap_snap (rhbz#1946260) - client: clean up the code (rhbz#1946260) - client: rebuild bl to avoid too many vector(> IOV_MAX) (rhbz#1946261) * Tue Apr 06 2021 Ceph Jenkins 2:16.2.0-2 - mgr/dashboard: Improve descriptions in some parts of the dashboard (rhbz#1940062 rhbz#1939451) - rgw/multisite: handle case when empty marker is provided (rhbz#1946390) - rgw: Fix probe failure on OSDs not suporting FIFO. (rhbz#1946390) - rgw: Make empty datalog fifo markers empty strings (rhbz#1946390) - rgw: Try to prune empties even if no empties found (rhbz#1946390) - rgw: Wait until a generation has been empty for an hour to delete (rhbz#1946390) - rgw: Leave the zero'th shard of the zero'th generation for cls_lock (rhbz#1946390) - rgw: Don't swallow errors in datalog async trim (rhbz#1946390) - rgw: Fix cursor handling in DataLogBackends::list (rhbz#1946390) - rgw: Prune datalog generations in the renew loop (rhbz#1946390) - rgw: Use LazyFIFO in data changes log (rhbz#1946390) - rgw: Add LazyFIFO to keep from blasting an op-per-shard on startup (rhbz#1946390) - cls/fifo: Don't error in the log if we're being probed for existence (rhbz#1946390) - rgw: Add and trim datalog generations (rhbz#1946390) - rgw: Actually pull logbacking_generations into datalog (rhbz#1946390) - rgw: Clamp FIFO trim to head (rhbz#1946390) - rgw: Lay groundwork for multigenerational datalog (rhbz#1946390) - rgw: Add rgw_complete_aio_completion() (rhbz#1946390) - rgw: Generational support for logback switching (rhbz#1946390) - rgw: Logback generation data structures (rhbz#1946390) - rgw/datalog: make get_oid take generation (rhbz#1946390) - rgw: Move get_oid back to RGWDataChangesLog (rhbz#1946390) - rgw/datalog: Pass IoCtx in, don't have each backend make its own (rhbz#1946390) - rgw: Use refactored log backing tools (rhbz#1946390) - rgw: Factor out tool to deal with different log backing (rhbz#1946390) - rgw: Add AioCompletion* versions for the rest of the FIFO methods (rhbz#1946390) - cls/log: Take const references of things you won't modify (rhbz#1946390) * Mon Apr 05 2021 Ken Dreyer - 2:16.2.0-1 - Update to v16.2.0 release