# then the device is accepted. Be careful mixing 'a' and 'r' patterns,
# as the combination might produce unexpected results (test changes.)
# Run vgscan after changing the filter to regenerate the cache.
- # See the use_lvmetad comment for a special case regarding filters.
#
# Example
# Accept every block device:
# Configuration option devices/global_filter.
# Limit the block devices that are used by LVM system components.
# Because devices/filter may be overridden from the command line, it is
- # not suitable for system-wide device filtering, e.g. udev and lvmetad.
+ # not suitable for system-wide device filtering, e.g. udev.
# Use global_filter to hide devices from these LVM system components.
# The syntax is the same as devices/filter. Devices rejected by
# global_filter are not opened by LVM.
# global_filter = [ "a|.*/|" ]
global_filter = [ "a|^/dev/md[0-9]*$|", "r/.*/" ]
- # Configuration option devices/cache_dir.
- # Directory in which to store the device cache file.
- # The results of filtering are cached on disk to avoid rescanning dud
- # devices (which can take a very long time). By default this cache is
- # stored in a file named .cache. It is safe to delete this file; the
- # tools regenerate it. If obtain_device_list_from_udev is enabled, the
- # list of devices is obtained from udev and any existing .cache file
- # is removed.
- cache_dir = "/run/lvm"
-
- # Configuration option devices/cache_file_prefix.
- # A prefix used before the .cache file name. See devices/cache_dir.
- cache_file_prefix = ""
-
- # Configuration option devices/write_cache_state.
- # Enable/disable writing the cache file. See devices/cache_dir.
- write_cache_state = 1
-
# Configuration option devices/types.
# List of additional acceptable block device types.
# These are of device type names from /proc/devices, followed by the
# present on the system. sysfs must be part of the kernel and mounted.)
sysfs_scan = 1
+ # Configuration option devices/scan_lvs.
+ # Scan LVM LVs for layered PVs.
+ scan_lvs = 1
+
# Configuration option devices/multipath_component_detection.
# Ignore devices that are components of DM multipath devices.
multipath_component_detection = 1
fw_raid_component_detection = 0
# Configuration option devices/md_chunk_alignment.
- # Align PV data blocks with md device's stripe-width.
+ # Align the start of a PV data area with md device's stripe-width.
# This applies if a PV is placed directly on an md device.
+ # default_data_alignment will be overriden if it is not aligned
+ # with the value detected for this setting.
+ # This setting is overriden by data_alignment_detection,
+ # data_alignment, and the --dataalignment option.
md_chunk_alignment = 1
# Configuration option devices/default_data_alignment.
- # Default alignment of the start of a PV data area in MB.
- # If set to 0, a value of 64KiB will be used.
- # Set to 1 for 1MiB, 2 for 2MiB, etc.
+ # Align the start of a PV data area with this number of MiB.
+ # Set to 1 for 1MiB, 2 for 2MiB, etc. Set to 0 to disable.
+ # This setting is overriden by data_alignment and the --dataalignment
+ # option.
# This configuration option has an automatic default value.
# default_data_alignment = 1
# Configuration option devices/data_alignment_detection.
- # Detect PV data alignment based on sysfs device information.
+ # Align the start of a PV data area with sysfs io properties.
# The start of a PV data area will be a multiple of minimum_io_size or
# optimal_io_size exposed in sysfs. minimum_io_size is the smallest
# request the device can perform without incurring a read-modify-write
# preferred unit of receiving I/O, e.g. MD stripe width.
# minimum_io_size is used if optimal_io_size is undefined (0).
# If md_chunk_alignment is enabled, that detects the optimal_io_size.
- # This setting takes precedence over md_chunk_alignment.
+ # default_data_alignment and md_chunk_alignment will be overriden
+ # if they are not aligned with the value detected for this setting.
+ # This setting is overriden by data_alignment and the --dataalignment
+ # option.
data_alignment_detection = 1
# Configuration option devices/data_alignment.
- # Alignment of the start of a PV data area in KiB.
- # If a PV is placed directly on an md device and md_chunk_alignment or
- # data_alignment_detection are enabled, then this setting is ignored.
- # Otherwise, md_chunk_alignment and data_alignment_detection are
- # disabled if this is set. Set to 0 to use the default alignment or the
- # page size, if larger.
+ # Align the start of a PV data area with this number of KiB.
+ # When non-zero, this setting overrides default_data_alignment.
+ # Set to 0 to disable, in which case default_data_alignment
+ # is used to align the first PE in units of MiB.
+ # This setting is overriden by the --dataalignment option.
data_alignment = 0
# Configuration option devices/data_alignment_offset_detection.
- # Detect PV data alignment offset based on sysfs device information.
- # The start of a PV aligned data area will be shifted by the
+ # Shift the start of an aligned PV data area based on sysfs information.
+ # After a PV data area is aligned, it will be shifted by the
# alignment_offset exposed in sysfs. This offset is often 0, but may
# be non-zero. Certain 4KiB sector drives that compensate for windows
# partitioning will have an alignment_offset of 3584 bytes (sector 7
# is the lowest aligned logical block, the 4KiB sectors start at
# LBA -1, and consequently sector 63 is aligned on a 4KiB boundary).
- # pvcreate --dataalignmentoffset will skip this detection.
+ # This setting is overriden by the --dataalignmentoffset option.
data_alignment_offset_detection = 1
# Configuration option devices/ignore_suspended_devices.
# different way, making them a better choice for VG stacking.
ignore_lvm_mirrors = 1
- # Configuration option devices/disable_after_error_count.
- # Number of I/O errors after which a device is skipped.
- # During each LVM operation, errors received from each device are
- # counted. If the counter of a device exceeds the limit set here,
- # no further I/O is sent to that device for the remainder of the
- # operation. Setting this to 0 disables the counters altogether.
- disable_after_error_count = 0
-
# Configuration option devices/require_restorefile_with_uuid.
# Allow use of pvcreate --uuid without requiring --restorefile.
require_restorefile_with_uuid = 1
maximise_cling = 1
# Configuration option allocation/use_blkid_wiping.
- # Use blkid to detect existing signatures on new PVs and LVs.
+ # Use blkid to detect and erase existing signatures on new PVs and LVs.
# The blkid library can detect more signatures than the native LVM
# detection code, but may take longer. LVM needs to be compiled with
# blkid wiping support for this setting to apply. LVM native detection
# Configuration option allocation/raid_stripe_all_devices.
# Stripe across all PVs when RAID stripes are not specified.
- # If enabled, all PVs in the VG or on the command line are used for raid0/4/5/6/10
- # when the command does not specify the number of stripes to use.
+ # If enabled, all PVs in the VG or on the command line are used for
+ # raid0/4/5/6/10 when the command does not specify the number of
+ # stripes to use.
# This was the default behaviour until release 2.02.162.
# This configuration option has an automatic default value.
# raid_stripe_all_devices = 0
# Cache pool metadata and data will always use different PVs.
cache_pool_metadata_require_separate_pvs = 0
+ # Configuration option allocation/cache_metadata_format.
+ # Sets default metadata format for new cache.
+ #
+ # Accepted values:
+ # 0 Automatically detected best available format
+ # 1 Original format
+ # 2 Improved 2nd. generation format
+ #
+ # This configuration option has an automatic default value.
+ # cache_metadata_format = 0
+
# Configuration option allocation/cache_mode.
# The default cache mode used for new cache.
#
# Configuration option allocation/cache_policy.
# The default cache policy used for new cache volume.
- # Since kernel 4.2 the default policy is smq (Stochastic multique),
+ # Since kernel 4.2 the default policy is smq (Stochastic multiqueue),
# otherwise the older mq (Multiqueue) policy is selected.
# This configuration option does not have a default value defined.
# Default physical extent size in KiB to use for new VGs.
# This configuration option has an automatic default value.
# physical_extent_size = 4096
+
+ # Configuration option allocation/vdo_use_compression.
+ # Enables or disables compression when creating a VDO volume.
+ # Compression may be disabled if necessary to maximize performance
+ # or to speed processing of data that is unlikely to compress.
+ # This configuration option has an automatic default value.
+ # vdo_use_compression = 1
+
+ # Configuration option allocation/vdo_use_deduplication.
+ # Enables or disables deduplication when creating a VDO volume.
+ # Deduplication may be disabled in instances where data is not expected
+ # to have good deduplication rates but compression is still desired.
+ # This configuration option has an automatic default value.
+ # vdo_use_deduplication = 1
+
+ # Configuration option allocation/vdo_emulate_512_sectors.
+ # Specifies that the VDO volume is to emulate a 512 byte block device.
+ # This configuration option has an automatic default value.
+ # vdo_emulate_512_sectors = 0
+
+ # Configuration option allocation/vdo_block_map_cache_size_mb.
+ # Specifies the amount of memory in MiB allocated for caching block map
+ # pages for VDO volume. The value must be a multiple of 4096 and must be
+ # at least 128MiB and less than 16TiB. The cache must be at least 16MiB
+ # per logical thread. Note that there is a memory overhead of 15%.
+ # This configuration option has an automatic default value.
+ # vdo_block_map_cache_size_mb = 128
+
+ # Configuration option allocation/vdo_block_map_period.
+ # Tunes the quantity of block map updates that can accumulate
+ # before cache pages are flushed to disk. The value must be
+ # at least 1 and less then 16380.
+ # A lower value means shorter recovery time but lower performance.
+ # This configuration option has an automatic default value.
+ # vdo_block_map_period = 16380
+
+ # Configuration option allocation/vdo_check_point_frequency.
+ # The default check point frequency for VDO volume.
+ # This configuration option has an automatic default value.
+ # vdo_check_point_frequency = 0
+
+ # Configuration option allocation/vdo_use_sparse_index.
+ # Enables sparse indexing for VDO volume.
+ # This configuration option has an automatic default value.
+ # vdo_use_sparse_index = 0
+
+ # Configuration option allocation/vdo_index_memory_size_mb.
+ # Specifies the amount of index memory in MiB for VDO volume.
+ # The value must be at least 256MiB and at most 1TiB.
+ # This configuration option has an automatic default value.
+ # vdo_index_memory_size_mb = 256
+
+ # Configuration option allocation/vdo_use_read_cache.
+ # Enables or disables the read cache within the VDO volume.
+ # The cache should be enabled if write workloads are expected
+ # to have high levels of deduplication, or for read intensive
+ # workloads of highly compressible data.
+ # This configuration option has an automatic default value.
+ # vdo_use_read_cache = 0
+
+ # Configuration option allocation/vdo_read_cache_size_mb.
+ # Specifies the extra VDO volume read cache size in MiB.
+ # This space is in addition to a system-defined minimum.
+ # The value must be less then 16TiB and 1.12 MiB of memory
+ # will be used per MiB of read cache specified, per bio thread.
+ # This configuration option has an automatic default value.
+ # vdo_read_cache_size_mb = 0
+
+ # Configuration option allocation/vdo_slab_size_mb.
+ # Specifies the size in MiB of the increment by which a VDO is grown.
+ # Using a smaller size constrains the total maximum physical size
+ # that can be accommodated. Must be a power of two between 128MiB and 32GiB.
+ # This configuration option has an automatic default value.
+ # vdo_slab_size_mb = 2048
+
+ # Configuration option allocation/vdo_ack_threads.
+ # Specifies the number of threads to use for acknowledging
+ # completion of requested VDO I/O operations.
+ # The value must be at in range [0..100].
+ # This configuration option has an automatic default value.
+ # vdo_ack_threads = 1
+
+ # Configuration option allocation/vdo_bio_threads.
+ # Specifies the number of threads to use for submitting I/O
+ # operations to the storage device of VDO volume.
+ # The value must be in range [1..100]
+ # Each additional thread after the first will use an additional 18MiB of RAM,
+ # plus 1.12 MiB of RAM per megabyte of configured read cache size.
+ # This configuration option has an automatic default value.
+ # vdo_bio_threads = 1
+
+ # Configuration option allocation/vdo_bio_rotation.
+ # Specifies the number of I/O operations to enqueue for each bio-submission
+ # thread before directing work to the next. The value must be in range [1..1024].
+ # This configuration option has an automatic default value.
+ # vdo_bio_rotation = 64
+
+ # Configuration option allocation/vdo_cpu_threads.
+ # Specifies the number of threads to use for CPU-intensive work such as
+ # hashing or compression for VDO volume. The value must be in range [1..100]
+ # This configuration option has an automatic default value.
+ # vdo_cpu_threads = 2
+
+ # Configuration option allocation/vdo_hash_zone_threads.
+ # Specifies the number of threads across which to subdivide parts of the VDO
+ # processing based on the hash value computed from the block data.
+ # The value must be at in range [0..100].
+ # vdo_hash_zone_threads, vdo_logical_threads and vdo_physical_threads must be
+ # either all zero or all non-zero.
+ # This configuration option has an automatic default value.
+ # vdo_hash_zone_threads = 1
+
+ # Configuration option allocation/vdo_logical_threads.
+ # Specifies the number of threads across which to subdivide parts of the VDO
+ # processing based on the hash value computed from the block data.
+ # A logical thread count of 9 or more will require explicitly specifying
+ # a sufficiently large block map cache size, as well.
+ # The value must be in range [0..100].
+ # vdo_hash_zone_threads, vdo_logical_threads and vdo_physical_threads must be
+ # either all zero or all non-zero.
+ # This configuration option has an automatic default value.
+ # vdo_logical_threads = 1
+
+ # Configuration option allocation/vdo_physical_threads.
+ # Specifies the number of threads across which to subdivide parts of the VDO
+ # processing based on physical block addresses.
+ # Each additional thread after the first will use an additional 10MiB of RAM.
+ # The value must be in range [0..16].
+ # vdo_hash_zone_threads, vdo_logical_threads and vdo_physical_threads must be
+ # either all zero or all non-zero.
+ # This configuration option has an automatic default value.
+ # vdo_physical_threads = 1
+
+ # Configuration option allocation/vdo_write_policy.
+ # Specifies the write policy:
+ # auto - VDO will check the storage device and determine whether it supports flushes.
+ # If it does, VDO will run in async mode, otherwise it will run in sync mode.
+ # sync - Writes are acknowledged only after data is stably written.
+ # This policy is not supported if the underlying storage is not also synchronous.
+ # async - Writes are acknowledged after data has been cached for writing to stable storage.
+ # Data which has not been flushed is not guaranteed to persist in this mode.
+ # This configuration option has an automatic default value.
+ # vdo_write_policy = "auto"
}
# Configuration section log.
# Select log messages by class.
# Some debugging messages are assigned to a class and only appear in
# debug output if the class is listed here. Classes currently
- # available: memory, devices, activation, allocation, lvmetad,
+ # available: memory, devices, io, activation, allocation,
# metadata, cache, locking, lvmpolld. Use "all" to see everything.
- debug_classes = [ "memory", "devices", "activation", "allocation", "lvmetad", "metadata", "cache", "locking", "lvmpolld", "dbus" ]
+ debug_classes = [ "memory", "devices", "io", "activation", "allocation", "metadata", "cache", "locking", "lvmpolld", "dbus" ]
}
# Configuration section backup.
# Configuration option global/units.
# Default value for --units argument.
- units = "h"
+ units = "r"
# Configuration option global/si_unit_consistency.
# Distinguish between powers of 1024 and 1000 bytes.
# the error messages.
activation = 1
- # Configuration option global/fallback_to_lvm1.
- # Try running LVM1 tools if LVM cannot communicate with DM.
- # This option only applies to 2.4 kernels and is provided to help
- # switch between device-mapper kernels and LVM1 kernels. The LVM1
- # tools need to be installed with .lvm1 suffices, e.g. vgscan.lvm1.
- # They will stop working once the lvm2 on-disk metadata format is used.
- # This configuration option has an automatic default value.
- # fallback_to_lvm1 = 0
-
- # Configuration option global/format.
- # The default metadata format that commands should use.
- # The -M 1|2 option overrides this setting.
- #
- # Accepted values:
- # lvm1
- # lvm2
- #
- # This configuration option has an automatic default value.
- # format = "lvm2"
-
- # Configuration option global/format_libraries.
- # Shared libraries that process different metadata formats.
- # If support for LVM1 metadata was compiled as a shared library use
- # format_libraries = "liblvm2format1.so"
- # This configuration option does not have a default value defined.
-
# Configuration option global/segment_libraries.
# This configuration option does not have a default value defined.
# Location of /etc system configuration directory.
etc = "/etc"
- # Configuration option global/locking_type.
- # Type of locking to use.
- #
- # Accepted values:
- # 0
- # Turns off locking. Warning: this risks metadata corruption if
- # commands run concurrently.
- # 1
- # LVM uses local file-based locking, the standard mode.
- # 2
- # LVM uses the external shared library locking_library.
- # 3
- # LVM uses built-in clustered locking with clvmd.
- # This is incompatible with lvmetad. If use_lvmetad is enabled,
- # LVM prints a warning and disables lvmetad use.
- # 4
- # LVM uses read-only locking which forbids any operations that
- # might change metadata.
- # 5
- # Offers dummy locking for tools that do not need any locks.
- # You should not need to set this directly; the tools will select
- # when to use it instead of the configured locking_type.
- # Do not use lvmetad or the kernel device-mapper driver with this
- # locking type. It is used by the --readonly option that offers
- # read-only access to Volume Group metadata that cannot be locked
- # safely because it belongs to an inaccessible domain and might be
- # in use, for example a virtual machine image or a disk that is
- # shared by a clustered machine.
- #
- locking_type = 1
-
# Configuration option global/wait_for_locks.
# When disabled, fail if a lock request would block.
wait_for_locks = 1
- # Configuration option global/fallback_to_clustered_locking.
- # Attempt to use built-in cluster locking if locking_type 2 fails.
- # If using external locking (type 2) and initialisation fails, with
- # this enabled, an attempt will be made to use the built-in clustered
- # locking. Disable this if using a customised locking_library.
- fallback_to_clustered_locking = 1
-
- # Configuration option global/fallback_to_local_locking.
- # Use locking_type 1 (local) if locking_type 2 or 3 fail.
- # If an attempt to initialise type 2 or type 3 locking failed, perhaps
- # because cluster components such as clvmd are not running, with this
- # enabled, an attempt will be made to use local file-based locking
- # (type 1). If this succeeds, only commands against local VGs will
- # proceed. VGs marked as clustered will be ignored.
- fallback_to_local_locking = 1
-
# Configuration option global/locking_dir.
# Directory to use for LVM command file locks.
# Local non-LV directory that holds file-based locks while commands are
# Search this directory first for shared libraries.
# This configuration option does not have a default value defined.
- # Configuration option global/locking_library.
- # The external locking library to use for locking_type 2.
- # This configuration option has an automatic default value.
- # locking_library = "liblvm2clusterlock.so"
-
# Configuration option global/abort_on_internal_errors.
# Abort a command that encounters an internal error.
# Treat any internal errors as fatal errors, aborting the process that
# encountered the internal error. Please only enable for debugging.
abort_on_internal_errors = 0
- # Configuration option global/detect_internal_vg_cache_corruption.
- # Internal verification of VG structures.
- # Check if CRC matches when a parsed VG is used multiple times. This
- # is useful to catch unexpected changes to cached VG structures.
- # Please only enable for debugging.
- detect_internal_vg_cache_corruption = 0
-
# Configuration option global/metadata_read_only.
# No operations that change on-disk metadata are permitted.
# Additionally, read-only commands that encounter metadata in need of
# This configuration option has an automatic default value.
# lvdisplay_shows_full_device_path = 0
- # Configuration option global/use_lvmetad.
- # Use lvmetad to cache metadata and reduce disk scanning.
- # When enabled (and running), lvmetad provides LVM commands with VG
- # metadata and PV state. LVM commands then avoid reading this
- # information from disks which can be slow. When disabled (or not
- # running), LVM commands fall back to scanning disks to obtain VG
- # metadata. lvmetad is kept updated via udev rules which must be set
- # up for LVM to work correctly. (The udev rules should be installed
- # by default.) Without a proper udev setup, changes in the system's
- # block device configuration will be unknown to LVM, and ignored
- # until a manual 'pvscan --cache' is run. If lvmetad was running
- # while use_lvmetad was disabled, it must be stopped, use_lvmetad
- # enabled, and then started. When using lvmetad, LV activation is
- # switched to an automatic, event-based mode. In this mode, LVs are
- # activated based on incoming udev events that inform lvmetad when
- # PVs appear on the system. When a VG is complete (all PVs present),
- # it is auto-activated. The auto_activation_volume_list setting
- # controls which LVs are auto-activated (all by default.)
- # When lvmetad is updated (automatically by udev events, or directly
- # by pvscan --cache), devices/filter is ignored and all devices are
- # scanned by default. lvmetad always keeps unfiltered information
- # which is provided to LVM commands. Each LVM command then filters
- # based on devices/filter. This does not apply to other, non-regexp,
- # filtering settings: component filters such as multipath and MD
- # are checked during pvscan --cache. To filter a device and prevent
- # scanning from the LVM system entirely, including lvmetad, use
- # devices/global_filter.
- use_lvmetad = 0
-
- # Configuration option global/lvmetad_update_wait_time.
- # The number of seconds a command will wait for lvmetad update to finish.
- # After waiting for this period, a command will not use lvmetad, and
- # will revert to disk scanning.
- # This configuration option has an automatic default value.
- # lvmetad_update_wait_time = 10
+ # Configuration option global/event_activation.
+ # Activate LVs based on system-generated device events.
+ # When a device appears on the system, a system-generated event runs
+ # the pvscan command to activate LVs if the new PV completes the VG.
+ # Use auto_activation_volume_list to select which LVs should be
+ # activated from these events (the default is all.)
+ # When event_activation is disabled, the system will generally run
+ # a direct activation command to activate LVs in complete VGs.
+ event_activation = 1
+
+ # Configuration option global/use_aio.
+ # Use async I/O when reading and writing devices.
+ # This configuration option has an automatic default value.
+ # use_aio = 1
# Configuration option global/use_lvmlockd.
# Use lvmlockd for locking among hosts using LVM on shared storage.
# Configuration option global/cache_disabled_features.
# Features to not use in the cache driver.
# This can be helpful for testing, or to avoid using a feature that is
- # causing problems. Features include: policy_mq, policy_smq.
+ # causing problems. Features include: policy_mq, policy_smq, metadata2.
#
# Example
# cache_disabled_features = [ "policy_smq" ]
# This configuration option has an automatic default value.
# cache_repair_options = [ "" ]
+ # Configuration option global/vdo_format_executable.
+ # The full path to the vdoformat command.
+ # LVM uses this command to initial data volume for VDO type logical volume
+ # This configuration option has an automatic default value.
+ # vdo_format_executable = "autodetect"
+
+ # Configuration option global/vdo_format_options.
+ # List of options passed added to standard vdoformat command.
+ # This configuration option has an automatic default value.
+ # vdo_format_options = [ "" ]
+
+ # Configuration option global/fsadm_executable.
+ # The full path to the fsadm command.
+ # LVM uses this command to help with lvresize -r operations.
+ # This configuration option has an automatic default value.
+ # fsadm_executable = "/sbin/fsadm"
+
# Configuration option global/system_id_source.
# The method LVM uses to set the local system ID.
# Volume Groups can also be given a system ID (by vgcreate, vgchange,
# Configuration option activation/missing_stripe_filler.
# Method to fill missing stripes when activating an incomplete LV.
# Using 'error' will make inaccessible parts of the device return I/O
- # errors on access. You can instead use a device path, in which case,
+ # errors on access. Using 'zero' will return success (and zero) on I/O
+ # You can instead use a device path, in which case,
# that device will be used in place of missing stripes. Using anything
# other than 'error' with mirrored or snapshotted volumes is likely to
# result in data corruption.
# Configuration option activation/raid_region_size.
# Size in KiB of each raid or mirror synchronization region.
- # For raid or mirror segment types, this is the amount of data that is
- # copied at once when initializing, or moved at once by pvmove.
- raid_region_size = 512
+ # The clean/dirty state of data is tracked for each region.
+ # The value is rounded down to a power of two if necessary, and
+ # is ignored if it is not a multiple of the machine memory page size.
+ raid_region_size = 2048
# Configuration option activation/error_when_full.
# Return errors if a thin pool runs out of space.
#
thin_pool_autoextend_percent = 20
+ # Configuration option activation/vdo_pool_autoextend_threshold.
+ # Auto-extend a VDO pool when its usage exceeds this percent.
+ # Setting this to 100 disables automatic extension.
+ # The minimum value is 50 (a smaller value is treated as 50.)
+ # Also see vdo_pool_autoextend_percent.
+ # Automatic extension requires dmeventd to be monitoring the LV.
+ #
+ # Example
+ # Using 70% autoextend threshold and 20% autoextend size, when a 10G
+ # VDO pool exceeds 7G, it is extended to 12G, and when it exceeds
+ # 8.4G, it is extended to 14.4G:
+ # vdo_pool_autoextend_threshold = 70
+ #
+ vdo_pool_autoextend_threshold = 100
+
+ # Configuration option activation/vdo_pool_autoextend_percent.
+ # Auto-extending a VDO pool adds this percent extra space.
+ # The amount of additional space added to a VDO pool is this
+ # percent of its current size.
+ #
+ # Example
+ # Using 70% autoextend threshold and 20% autoextend size, when a 10G
+ # VDO pool exceeds 7G, it is extended to 12G, and when it exceeds
+ # 8.4G, it is extended to 14.4G:
+ # This configuration option has an automatic default value.
+ # vdo_pool_autoextend_percent = 20
+
# Configuration option activation/mlock_filter.
# Do not mlock these memory areas.
# While activating devices, I/O to devices being (re)configured is
# vgmetadatacopies = 0
# Configuration option metadata/pvmetadatasize.
- # Approximate number of sectors to use for each metadata copy.
- # VGs with large numbers of PVs or LVs, or VGs containing complex LV
- # structures, may need additional space for VG metadata. The metadata
- # areas are treated as circular buffers, so unused space becomes filled
- # with an archive of the most recent previous versions of the metadata.
+ # The default size of the metadata area in units of 512 byte sectors.
+ # The metadata area begins at an offset of the page size from the start
+ # of the device. The first PE is by default at 1 MiB from the start of
+ # the device. The space between these is the default metadata area size.
+ # The actual size of the metadata area may be larger than what is set
+ # here due to default_data_alignment making the first PE a MiB multiple.
+ # The metadata area begins with a 512 byte header and is followed by a
+ # circular buffer used for VG metadata text. The maximum size of the VG
+ # metadata is about half the size of the metadata buffer. VGs with large
+ # numbers of PVs or LVs, or VGs containing complex LV structures, may need
+ # additional space for VG metadata. The --metadatasize option overrides
+ # this setting.
+ # This configuration option does not have a default value defined.
# This configuration option has an automatic default value.
- # pvmetadatasize = 255
# Configuration option metadata/pvmetadataignore.
# Ignore metadata areas on a new PV.
# This configuration option is advanced.
# This configuration option has an automatic default value.
# stripesize = 64
-
- # Configuration option metadata/dirs.
- # Directories holding live copies of text format metadata.
- # These directories must not be on logical volumes!
- # It's possible to use LVM with a couple of directories here,
- # preferably on different (non-LV) filesystems, and with no other
- # on-disk metadata (pvmetadatacopies = 0). Or this can be in addition
- # to on-disk metadata areas. The feature was originally added to
- # simplify testing and is not supported under low memory situations -
- # the machine could lock up. Never edit any files in these directories
- # by hand unless you are absolutely sure you know what you are doing!
- # Use the supplied toolset to make changes (e.g. vgcfgrestore).
- #
- # Example
- # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
- #
- # This configuration option is advanced.
- # This configuration option does not have a default value defined.
# }
# Configuration section report.
# warning is repeated when 85%, 90% and 95% of the pool is filled.
thin_library = "libdevmapper-event-lvm2thin.so"
+ # Configuration option dmeventd/thin_command.
+ # The plugin runs command with each 5% increment when thin-pool data volume
+ # or metadata volume gets above 50%.
+ # Command which starts with 'lvm ' prefix is internal lvm command.
+ # You can write your own handler to customise behaviour in more details.
+ # User handler is specified with the full path starting with '/'.
+ # This configuration option has an automatic default value.
+ # thin_command = "lvm lvextend --use-policies"
+
+ # Configuration option dmeventd/vdo_library.
+ # The library dmeventd uses when monitoring a VDO pool device.
+ # libdevmapper-event-lvm2vdo.so monitors the filling of a pool
+ # and emits a warning through syslog when the usage exceeds 80%. The
+ # warning is repeated when 85%, 90% and 95% of the pool is filled.
+ # This configuration option has an automatic default value.
+ # vdo_library = "libdevmapper-event-lvm2vdo.so"
+
+ # Configuration option dmeventd/vdo_command.
+ # The plugin runs command with each 5% increment when VDO pool volume
+ # gets above 50%.
+ # Command which starts with 'lvm ' prefix is internal lvm command.
+ # You can write your own handler to customise behaviour in more details.
+ # User handler is specified with the full path starting with '/'.
+ # This configuration option has an automatic default value.
+ # vdo_command = "lvm lvextend --use-policies"
+
# Configuration option dmeventd/executable.
# The full path to the dmeventd binary.
# This configuration option has an automatic default value.