Subversion Repositories configs

Rev

Rev 4 | Rev 34 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4 - 1
# This is an example configuration file for the LVM2 system.
2
# It contains the default settings that would be used if there was no
3
# /etc/lvm/lvm.conf file.
4
#
5
# Refer to 'man lvm.conf' for further information including the file layout.
6
#
7
# To put this file in a different directory and override /etc/lvm set
8
# the environment variable LVM_SYSTEM_DIR before running the tools.
9
#
10
# N.B. Take care that each setting only appears once if uncommenting
11
# example settings in this file.
12
 
13
# This section allows you to set the way the configuration settings are handled.
14
config {
15
 
16
    # If enabled, any LVM2 configuration mismatch is reported.
17
    # This implies checking that the configuration key is understood
18
    # by LVM2 and that the value of the key is of a proper type.
19
    # If disabled, any configuration mismatch is ignored and default
20
    # value is used instead without any warning (a message about the
21
    # configuration key not being found is issued in verbose mode only).
22
    checks = 1
23
 
24
    # If enabled, any configuration mismatch aborts the LVM2 process.
25
    abort_on_errors = 0
26
 
27
    # Directory where LVM looks for configuration profiles.
28
    profile_dir = "/etc/lvm/profile"
29
}
30
 
31
# This section allows you to configure which block devices should
32
# be used by the LVM system.
33
devices {
34
 
35
    # Where do you want your volume groups to appear ?
36
    dir = "/dev"
37
 
38
    # An array of directories that contain the device nodes you wish
39
    # to use with LVM2.
40
    scan = [ "/dev" ]
41
 
42
    # If set, the cache of block device nodes with all associated symlinks
43
    # will be constructed out of the existing udev database content.
44
    # This avoids using and opening any inapplicable non-block devices or
45
    # subdirectories found in the device directory. This setting is applied
46
    # to udev-managed device directory only, other directories will be scanned
47
    # fully. LVM2 needs to be compiled with udev support for this setting to
48
    # take effect. N.B. Any device node or symlink not managed by udev in
49
    # udev directory will be ignored with this setting on.
50
    obtain_device_list_from_udev = 0
51
 
52
    # If several entries in the scanned directories correspond to the
53
    # same block device and the tools need to display a name for device,
54
    # all the pathnames are matched against each item in the following
55
    # list of regular expressions in turn and the first match is used.
9 - 56
 
57
    # By default no preferred names are defined.
4 - 58
    # preferred_names = [ ]
59
 
60
    # Try to avoid using undescriptive /dev/dm-N names, if present.
61
    preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
62
 
9 - 63
    # In case no prefererred name matches or if preferred_names are not
64
    # defined at all, builtin rules are used to determine the preference.
65
    #
66
    # The first builtin rule checks path prefixes and it gives preference
67
    # based on this ordering (where "dev" depends on devices/dev setting):
68
    #   /dev/mapper > /dev/disk > /dev/dm-* > /dev/block
69
    #
70
    # If the ordering above cannot be applied, the path with fewer slashes
71
    # gets preference then.
72
    #
73
    # If the number of slashes is the same, a symlink gets preference.
74
    #
75
    # Finally, if all the rules mentioned above are not applicable,
76
    # lexicographical order is used over paths and the smallest one
77
    # of all gets preference.
78
 
79
 
4 - 80
    # A filter that tells LVM2 to only use a restricted set of devices.
81
    # The filter consists of an array of regular expressions.  These
82
    # expressions can be delimited by a character of your choice, and
83
    # prefixed with either an 'a' (for accept) or 'r' (for reject).
84
    # The first expression found to match a device name determines if
85
    # the device will be accepted or rejected (ignored).  Devices that
86
    # don't match any patterns are accepted.
87
 
88
    # Be careful if there there are symbolic links or multiple filesystem
89
    # entries for the same device as each name is checked separately against
90
    # the list of patterns.  The effect is that if the first pattern in the
91
    # list to match a name is an 'a' pattern for any of the names, the device
92
    # is accepted; otherwise if the first pattern in the list to match a name
93
    # is an 'r' pattern for any of the names it is rejected; otherwise it is
94
    # accepted.
95
 
96
    # Don't have more than one filter line active at once: only one gets used.
97
 
98
    # Run vgscan after you change this parameter to ensure that
99
    # the cache file gets regenerated (see below).
100
    # If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
101
 
9 - 102
    # If lvmetad is used, then see "A note about device filtering while
103
    # lvmetad is used" comment that is attached to global/use_lvmetad setting.
4 - 104
 
105
    # By default we accept every block device:
9 - 106
    # filter = [ "a/.*/" ]
4 - 107
 
108
    # Exclude the cdrom drive
109
    # filter = [ "r|/dev/cdrom|" ]
110
 
111
    # When testing I like to work with just loopback devices:
112
    # filter = [ "a/loop/", "r/.*/" ]
113
 
114
    # Or maybe all loops and ide drives except hdc:
115
    # filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
116
 
117
    # Use anchors if you want to be really specific
118
    # filter = [ "a|^/dev/hda8$|", "r/.*/" ]
119
 
120
    # Since "filter" is often overridden from command line, it is not suitable
121
    # for system-wide device filtering (udev rules, lvmetad). To hide devices
122
    # from LVM-specific udev processing and/or from lvmetad, you need to set
123
    # global_filter. The syntax is the same as for normal "filter"
124
    # above. Devices that fail the global_filter are not even opened by LVM.
125
 
126
    # global_filter = []
127
 
128
    # The results of the filtering are cached on disk to avoid
129
    # rescanning dud devices (which can take a very long time).
130
    # By default this cache is stored in the /etc/lvm/cache directory
131
    # in a file called '.cache'.
132
    # It is safe to delete the contents: the tools regenerate it.
133
    # (The old setting 'cache' is still respected if neither of
134
    # these new ones is present.)
135
    # N.B. If obtain_device_list_from_udev is set to 1 the list of
136
    # devices is instead obtained from udev and any existing .cache
137
    # file is removed.
138
    cache_dir = "/etc/lvm/cache"
139
    cache_file_prefix = ""
140
 
141
    # You can turn off writing this cache file by setting this to 0.
142
    write_cache_state = 1
143
 
144
    # Advanced settings.
145
 
146
    # List of pairs of additional acceptable block device types found
147
    # in /proc/devices with maximum (non-zero) number of partitions.
148
    # types = [ "fd", 16 ]
149
 
150
    # If sysfs is mounted (2.6 kernels) restrict device scanning to
151
    # the block devices it believes are valid.
152
    # 1 enables; 0 disables.
153
    sysfs_scan = 1
154
 
155
    # By default, LVM2 will ignore devices used as component paths
156
    # of device-mapper multipath devices.
157
    # 1 enables; 0 disables.
158
    multipath_component_detection = 1
159
 
160
    # By default, LVM2 will ignore devices used as components of
161
    # software RAID (md) devices by looking for md superblocks.
162
    # 1 enables; 0 disables.
163
    md_component_detection = 1
164
 
165
    # By default, if a PV is placed directly upon an md device, LVM2
166
    # will align its data blocks with the md device's stripe-width.
167
    # 1 enables; 0 disables.
168
    md_chunk_alignment = 1
169
 
170
    # Default alignment of the start of a data area in MB.  If set to 0,
171
    # a value of 64KB will be used.  Set to 1 for 1MiB, 2 for 2MiB, etc.
172
    # default_data_alignment = 1
173
 
174
    # By default, the start of a PV's data area will be a multiple of
175
    # the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
176
    # - minimum_io_size - the smallest request the device can perform
177
    #   w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
178
    # - optimal_io_size - the device's preferred unit of receiving I/O
179
    #   (e.g. MD's stripe width)
180
    # minimum_io_size is used if optimal_io_size is undefined (0).
181
    # If md_chunk_alignment is enabled, that detects the optimal_io_size.
182
    # This setting takes precedence over md_chunk_alignment.
183
    # 1 enables; 0 disables.
184
    data_alignment_detection = 1
185
 
186
    # Alignment (in KB) of start of data area when creating a new PV.
187
    # md_chunk_alignment and data_alignment_detection are disabled if set.
188
    # Set to 0 for the default alignment (see: data_alignment_default)
189
    # or page size, if larger.
190
    data_alignment = 0
191
 
192
    # By default, the start of the PV's aligned data area will be shifted by
193
    # the 'alignment_offset' exposed in sysfs.  This offset is often 0 but
194
    # may be non-zero; e.g.: certain 4KB sector drives that compensate for
195
    # windows partitioning will have an alignment_offset of 3584 bytes
196
    # (sector 7 is the lowest aligned logical block, the 4KB sectors start
197
    # at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
198
    # But note that pvcreate --dataalignmentoffset will skip this detection.
199
    # 1 enables; 0 disables.
200
    data_alignment_offset_detection = 1
201
 
202
    # If, while scanning the system for PVs, LVM2 encounters a device-mapper
203
    # device that has its I/O suspended, it waits for it to become accessible.
204
    # Set this to 1 to skip such devices.  This should only be needed
205
    # in recovery situations.
206
    ignore_suspended_devices = 0
207
 
208
    # ignore_lvm_mirrors:  Introduced in version 2.02.104
209
    # This setting determines whether logical volumes of "mirror" segment
210
    # type are scanned for LVM labels.  This affects the ability of
211
    # mirrors to be used as physical volumes.  If 'ignore_lvm_mirrors'
212
    # is set to '1', it becomes impossible to create volume groups on top
213
    # of mirror logical volumes - i.e. to stack volume groups on mirrors.
214
    #
215
    # Allowing mirror logical volumes to be scanned (setting the value to '0')
216
    # can potentially cause LVM processes and I/O to the mirror to become
217
    # blocked.  This is due to the way that the "mirror" segment type handles
218
    # failures.  In order for the hang to manifest itself, an LVM command must
219
    # be run just after a failure and before the automatic LVM repair process
220
    # takes place OR there must be failures in multiple mirrors in the same
221
    # volume group at the same time with write failures occurring moments
222
    # before a scan of the mirror's labels.
223
    #
224
    # Note that these scanning limitations do not apply to the LVM RAID
225
    # types, like "raid1".  The RAID segment types handle failures in a
226
    # different way and are not subject to possible process or I/O blocking.
227
    #
228
    # It is encouraged that users set 'ignore_lvm_mirrors' to 1 if they
229
    # are using the "mirror" segment type.  Users that require volume group
230
    # stacking on mirrored logical volumes should consider using the "raid1"
231
    # segment type.  The "raid1" segment type is not available for
232
    # active/active clustered volume groups.
233
    #
234
    # Set to 1 to disallow stacking and thereby avoid a possible deadlock.
9 - 235
    ignore_lvm_mirrors = 1
4 - 236
 
237
    # During each LVM operation errors received from each device are counted.
238
    # If the counter of a particular device exceeds the limit set here, no
239
    # further I/O is sent to that device for the remainder of the respective
240
    # operation. Setting the parameter to 0 disables the counters altogether.
241
    disable_after_error_count = 0
242
 
243
    # Allow use of pvcreate --uuid without requiring --restorefile.
244
    require_restorefile_with_uuid = 1
245
 
246
    # Minimum size (in KB) of block devices which can be used as PVs.
247
    # In a clustered environment all nodes must use the same value.
248
    # Any value smaller than 512KB is ignored.
249
 
250
    # Ignore devices smaller than 2MB such as floppy drives.
251
    pv_min_size = 2048
252
 
253
    # The original built-in setting was 512 up to and including version 2.02.84.
254
    # pv_min_size = 512
255
 
256
    # Issue discards to a logical volumes's underlying physical volume(s) when
257
    # the logical volume is no longer using the physical volumes' space (e.g.
258
    # lvremove, lvreduce, etc).  Discards inform the storage that a region is
259
    # no longer in use.  Storage that supports discards advertise the protocol
260
    # specific way discards should be issued by the kernel (TRIM, UNMAP, or
261
    # WRITE SAME with UNMAP bit set).  Not all storage will support or benefit
262
    # from discards but SSDs and thinly provisioned LUNs generally do.  If set
263
    # to 1, discards will only be issued if both the storage and kernel provide
264
    # support.
265
    # 1 enables; 0 disables.
266
    issue_discards = 0
267
}
268
 
269
# This section allows you to configure the way in which LVM selects
270
# free space for its Logical Volumes.
271
allocation {
272
 
273
    # When searching for free space to extend an LV, the "cling"
274
    # allocation policy will choose space on the same PVs as the last
275
    # segment of the existing LV.  If there is insufficient space and a
276
    # list of tags is defined here, it will check whether any of them are
277
    # attached to the PVs concerned and then seek to match those PV tags
278
    # between existing extents and new extents.
279
    # Use the special tag "@*" as a wildcard to match any PV tag.
280
 
281
    # Example: LVs are mirrored between two sites within a single VG.
282
    # PVs are tagged with either @site1 or @site2 to indicate where
283
    # they are situated.
284
 
285
    # cling_tag_list = [ "@site1", "@site2" ]
286
    # cling_tag_list = [ "@*" ]
287
 
288
    # Changes made in version 2.02.85 extended the reach of the 'cling'
289
    # policies to detect more situations where data can be grouped
290
    # onto the same disks.  Set this to 0 to revert to the previous
291
    # algorithm.
292
    maximise_cling = 1
293
 
9 - 294
    # Whether to use blkid library instead of native LVM2 code to detect
295
    # any existing signatures while creating new Physical Volumes and
296
    # Logical Volumes. LVM2 needs to be compiled with blkid wiping support
297
    # for this setting to take effect.
298
    #
299
    # LVM2 native detection code is currently able to recognize these signatures:
300
    #   - MD device signature
301
    #   - swap signature
302
    #   - LUKS signature
303
    # To see the list of signatures recognized by blkid, check the output
304
    # of 'blkid -k' command. The blkid can recognize more signatures than
305
    # LVM2 native detection code, but due to this higher number of signatures
306
    # to be recognized, it can take more time to complete the signature scan.
307
    use_blkid_wiping = 1
308
 
309
    # Set to 1 to wipe any signatures found on newly-created Logical Volumes
310
    # automatically in addition to zeroing of the first KB on the LV
311
    # (controlled by the -Z/--zero y option).
312
    # The command line option -W/--wipesignatures takes precedence over this
313
    # setting.
314
    # The default is to wipe signatures when zeroing.
315
    #
316
    wipe_signatures_when_zeroing_new_lvs = 1
317
 
4 - 318
    # Set to 1 to guarantee that mirror logs will always be placed on
319
    # different PVs from the mirror images.  This was the default
320
    # until version 2.02.85.
321
    mirror_logs_require_separate_pvs = 0
322
 
9 - 323
    # Set to 1 to guarantee that cache_pool metadata will always be
324
    # placed on  different PVs from the cache_pool data.
325
    cache_pool_metadata_require_separate_pvs = 0
326
 
327
    # Specify the minimal chunk size (in kiB) for cache pool volumes.
328
    # Using a chunk_size that is too large can result in wasteful use of
329
    # the cache, where small reads and writes can cause large sections of
330
    # an LV to be mapped into the cache.  However, choosing a chunk_size
331
    # that is too small can result in more overhead trying to manage the
332
    # numerous chunks that become mapped into the cache.  The former is
333
    # more of a problem than the latter in most cases, so we default to
334
    # a value that is on the smaller end of the spectrum.  Supported values
335
    # range from 32(kiB) to 1048576 in multiples of 32.
336
    # cache_pool_chunk_size = 64
337
 
4 - 338
    # Set to 1 to guarantee that thin pool metadata will always
339
    # be placed on different PVs from the pool data.
340
    thin_pool_metadata_require_separate_pvs = 0
341
 
9 - 342
    # Specify chunk size calculation policy for thin pool volumes.
343
    # Possible options are:
344
    # "generic"        - if thin_pool_chunk_size is defined, use it.
345
    #                    Otherwise, calculate the chunk size based on
346
    #                    estimation and device hints exposed in sysfs:
347
    #                    the minimum_io_size. The chunk size is always
348
    #                    at least 64KiB.
349
    #
350
    # "performance"    - if thin_pool_chunk_size is defined, use it.
351
    # 			 Otherwise, calculate the chunk size for
352
    # 			 performance based on device hints exposed in
353
    # 			 sysfs: the optimal_io_size. The chunk size is
354
    # 			 always at least 512KiB.
355
    # thin_pool_chunk_size_policy = "generic"
356
 
4 - 357
    # Specify the minimal chunk size (in KB) for thin pool volumes.
9 - 358
    # Use of the larger chunk size may improve performance for plain
4 - 359
    # thin volumes, however using them for snapshot volumes is less efficient,
360
    # as it consumes more space and takes extra time for copying.
361
    # When unset, lvm tries to estimate chunk size starting from 64KB
362
    # Supported values are in range from 64 to 1048576.
363
    # thin_pool_chunk_size = 64
364
 
9 - 365
    # Specify discards behaviour of the thin pool volume.
4 - 366
    # Select one of  "ignore", "nopassdown", "passdown"
367
    # thin_pool_discards = "passdown"
368
 
369
    # Set to 0, to disable zeroing of thin pool data chunks before their
370
    # first use.
371
    # N.B. zeroing larger thin pool chunk size degrades performance.
372
    # thin_pool_zero = 1
373
}
374
 
375
# This section that allows you to configure the nature of the
376
# information that LVM2 reports.
377
log {
378
 
379
    # Controls the messages sent to stdout or stderr.
380
    # There are three levels of verbosity, 3 being the most verbose.
381
    verbose = 0
382
 
383
    # Set to 1 to suppress all non-essential messages from stdout.
384
    # This has the same effect as -qq.
385
    # When this is set, the following commands still produce output:
386
    # dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
387
    # pvs, version, vgcfgrestore -l, vgdisplay, vgs.
388
    # Non-essential messages are shifted from log level 4 to log level 5
389
    # for syslog and lvm2_log_fn purposes.
390
    # Any 'yes' or 'no' questions not overridden by other arguments
391
    # are suppressed and default to 'no'.
392
    silent = 0
393
 
394
    # Should we send log messages through syslog?
395
    # 1 is yes; 0 is no.
396
    syslog = 1
397
 
398
    # Should we log error and debug messages to a file?
399
    # By default there is no log file.
400
    #file = "/var/log/lvm2.log"
401
 
402
    # Should we overwrite the log file each time the program is run?
403
    # By default we append.
404
    overwrite = 0
405
 
406
    # What level of log messages should we send to the log file and/or syslog?
407
    # There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
408
    # 7 is the most verbose (LOG_DEBUG).
409
    level = 0
410
 
411
    # Format of output messages
412
    # Whether or not (1 or 0) to indent messages according to their severity
413
    indent = 1
414
 
415
    # Whether or not (1 or 0) to display the command name on each line output
416
    command_names = 0
417
 
418
    # A prefix to use before the message text (but after the command name,
419
    # if selected).  Default is two spaces, so you can see/grep the severity
420
    # of each message.
421
    prefix = "  "
422
 
423
    # To make the messages look similar to the original LVM tools use:
424
    #   indent = 0
425
    #   command_names = 1
426
    #   prefix = " -- "
427
 
428
    # Set this if you want log messages during activation.
429
    # Don't use this in low memory situations (can deadlock).
430
    # activation = 0
431
 
432
    # Some debugging messages are assigned to a class and only appear
433
    # in debug output if the class is listed here.
434
    # Classes currently available:
435
    #   memory, devices, activation, allocation, lvmetad, metadata, cache,
436
    #   locking
437
    # Use "all" to see everything.
438
    debug_classes = [ "memory", "devices", "activation", "allocation",
439
		      "lvmetad", "metadata", "cache", "locking" ]
440
}
441
 
442
# Configuration of metadata backups and archiving.  In LVM2 when we
443
# talk about a 'backup' we mean making a copy of the metadata for the
444
# *current* system.  The 'archive' contains old metadata configurations.
9 - 445
# Backups are stored in a human readable text format.
4 - 446
backup {
447
 
448
    # Should we maintain a backup of the current metadata configuration ?
449
    # Use 1 for Yes; 0 for No.
450
    # Think very hard before turning this off!
451
    backup = 1
452
 
453
    # Where shall we keep it ?
454
    # Remember to back up this directory regularly!
455
    backup_dir = "/etc/lvm/backup"
456
 
457
    # Should we maintain an archive of old metadata configurations.
458
    # Use 1 for Yes; 0 for No.
459
    # On by default.  Think very hard before turning this off.
460
    archive = 1
461
 
462
    # Where should archived files go ?
463
    # Remember to back up this directory regularly!
464
    archive_dir = "/etc/lvm/archive"
465
 
466
    # What is the minimum number of archive files you wish to keep ?
467
    retain_min = 10
468
 
469
    # What is the minimum time you wish to keep an archive file for ?
470
    retain_days = 30
471
}
472
 
473
# Settings for the running LVM2 in shell (readline) mode.
474
shell {
475
 
476
    # Number of lines of history to store in ~/.lvm_history
477
    history_size = 100
478
}
479
 
480
 
481
# Miscellaneous global LVM2 settings
482
global {
483
    # The file creation mask for any files and directories created.
484
    # Interpreted as octal if the first digit is zero.
485
    umask = 077
486
 
487
    # Allow other users to read the files
488
    #umask = 022
489
 
490
    # Enabling test mode means that no changes to the on disk metadata
491
    # will be made.  Equivalent to having the -t option on every
492
    # command.  Defaults to off.
493
    test = 0
494
 
495
    # Default value for --units argument
496
    units = "h"
497
 
498
    # Since version 2.02.54, the tools distinguish between powers of
499
    # 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
500
    # KB, MB, GB).
501
    # If you have scripts that depend on the old behaviour, set this to 0
502
    # temporarily until you update them.
503
    si_unit_consistency = 1
504
 
9 - 505
    # Whether or not to display unit suffix for sizes. This setting has
506
    # no effect if the units are in human-readable form (global/units="h")
507
    # in which case the suffix is always displayed.
508
    suffix = 1
509
 
4 - 510
    # Whether or not to communicate with the kernel device-mapper.
511
    # Set to 0 if you want to use the tools to manipulate LVM metadata
512
    # without activating any logical volumes.
513
    # If the device-mapper kernel driver is not present in your kernel
514
    # setting this to 0 should suppress the error messages.
515
    activation = 1
516
 
517
    # If we can't communicate with device-mapper, should we try running
518
    # the LVM1 tools?
519
    # This option only applies to 2.4 kernels and is provided to help you
520
    # switch between device-mapper kernels and LVM1 kernels.
521
    # The LVM1 tools need to be installed with .lvm1 suffices
522
    # e.g. vgscan.lvm1 and they will stop working after you start using
523
    # the new lvm2 on-disk metadata format.
524
    # The default value is set when the tools are built.
525
    # fallback_to_lvm1 = 0
526
 
527
    # The default metadata format that commands should use - "lvm1" or "lvm2".
528
    # The command line override is -M1 or -M2.
529
    # Defaults to "lvm2".
530
    # format = "lvm2"
531
 
532
    # Location of proc filesystem
533
    proc = "/proc"
534
 
535
    # Type of locking to use. Defaults to local file-based locking (1).
536
    # Turn locking off by setting to 0 (dangerous: risks metadata corruption
537
    # if LVM2 commands get run concurrently).
538
    # Type 2 uses the external shared library locking_library.
539
    # Type 3 uses built-in clustered locking.
540
    # Type 4 uses read-only locking which forbids any operations that might
541
    # change metadata.
9 - 542
    # Type 5 offers dummy locking for tools that do not need any locks.
543
    # You should not need to set this directly: the tools will select when
544
    # to use it instead of the configured locking_type.  Do not use lvmetad or
545
    # the kernel device-mapper driver with this locking type.
546
    # It is used by the --readonly option that offers read-only access to
547
    # Volume Group metadata that cannot be locked safely because it belongs to
548
    # an inaccessible domain and might be in use, for example a virtual machine
549
    # image or a disk that is shared by a clustered machine.
550
    #
551
    # N.B. Don't use lvmetad with locking type 3 as lvmetad is not yet
552
    # supported in clustered environment. If use_lvmetad=1 and locking_type=3
553
    # is set at the same time, LVM always issues a warning message about this
554
    # and then it automatically disables lvmetad use.
4 - 555
    locking_type = 1
556
 
557
    # Set to 0 to fail when a lock request cannot be satisfied immediately.
558
    wait_for_locks = 1
559
 
560
    # If using external locking (type 2) and initialisation fails,
561
    # with this set to 1 an attempt will be made to use the built-in
562
    # clustered locking.
563
    # If you are using a customised locking_library you should set this to 0.
564
    fallback_to_clustered_locking = 1
565
 
566
    # If an attempt to initialise type 2 or type 3 locking failed, perhaps
567
    # because cluster components such as clvmd are not running, with this set
568
    # to 1 an attempt will be made to use local file-based locking (type 1).
569
    # If this succeeds, only commands against local volume groups will proceed.
570
    # Volume Groups marked as clustered will be ignored.
571
    fallback_to_local_locking = 1
572
 
573
    # Local non-LV directory that holds file-based locks while commands are
574
    # in progress.  A directory like /tmp that may get wiped on reboot is OK.
575
    locking_dir = "/var/lock/lvm"
576
 
577
    # Whenever there are competing read-only and read-write access requests for
578
    # a volume group's metadata, instead of always granting the read-only
579
    # requests immediately, delay them to allow the read-write requests to be
580
    # serviced.  Without this setting, write access may be stalled by a high
581
    # volume of read-only requests.
582
    # NB. This option only affects locking_type = 1 viz. local file-based
583
    # locking.
584
    prioritise_write_locks = 1
585
 
586
    # Other entries can go here to allow you to load shared libraries
587
    # e.g. if support for LVM1 metadata was compiled as a shared library use
588
    #   format_libraries = "liblvm2format1.so"
589
    # Full pathnames can be given.
590
 
591
    # Search this directory first for shared libraries.
592
    #   library_dir = "/lib"
593
 
594
    # The external locking library to load if locking_type is set to 2.
595
    #   locking_library = "liblvm2clusterlock.so"
596
 
597
    # Treat any internal errors as fatal errors, aborting the process that
598
    # encountered the internal error. Please only enable for debugging.
599
    abort_on_internal_errors = 0
600
 
601
    # Check whether CRC is matching when parsed VG is used multiple times.
602
    # This is useful to catch unexpected internal cached volume group
603
    # structure modification. Please only enable for debugging.
604
    detect_internal_vg_cache_corruption = 0
605
 
606
    # If set to 1, no operations that change on-disk metadata will be permitted.
607
    # Additionally, read-only commands that encounter metadata in need of repair
608
    # will still be allowed to proceed exactly as if the repair had been
609
    # performed (except for the unchanged vg_seqno).
610
    # Inappropriate use could mess up your system, so seek advice first!
611
    metadata_read_only = 0
612
 
613
    # 'mirror_segtype_default' defines which segtype will be used when the
614
    # shorthand '-m' option is used for mirroring.  The possible options are:
615
    #
616
    # "mirror" - The original RAID1 implementation provided by LVM2/DM.  It is
617
    # 	         characterized by a flexible log solution (core, disk, mirrored)
618
    #		 and by the necessity to block I/O while reconfiguring in the
619
    #		 event of a failure.
620
    #
621
    #		 There is an inherent race in the dmeventd failure handling
622
    #		 logic with snapshots of devices using this type of RAID1 that
623
    #		 in the worst case could cause a deadlock.
624
    #		   Ref: https://bugzilla.redhat.com/show_bug.cgi?id=817130#c10
625
    #
626
    # "raid1"  - This implementation leverages MD's RAID1 personality through
627
    # 	       	 device-mapper.  It is characterized by a lack of log options.
628
    #		 (A log is always allocated for every device and they are placed
629
    #		 on the same device as the image - no separate devices are
630
    #		 required.)  This mirror implementation does not require I/O
631
    #		 to be blocked in the kernel in the event of a failure.
632
    #		 This mirror implementation is not cluster-aware and cannot be
633
    #		 used in a shared (active/active) fashion in a cluster.
634
    #
635
    # Specify the '--type <mirror|raid1>' option to override this default
636
    # setting.
637
    mirror_segtype_default = "mirror"
638
 
639
    # 'raid10_segtype_default' determines the segment types used by default
640
    # when the '--stripes/-i' and '--mirrors/-m' arguments are both specified
641
    # during the creation of a logical volume.
642
    # Possible settings include:
643
    #
644
    # "raid10" - This implementation leverages MD's RAID10 personality through
645
    #            device-mapper.
646
    #
647
    # "mirror" - LVM will layer the 'mirror' and 'stripe' segment types.  It
648
    #            will do this by creating a mirror on top of striped sub-LVs;
649
    #            effectively creating a RAID 0+1 array.  This is suboptimal
9 - 650
    #            in terms of providing redundancy and performance. Changing to
4 - 651
    #            this setting is not advised.
652
    # Specify the '--type <raid10|mirror>' option to override this default
653
    # setting.
654
    raid10_segtype_default = "mirror"
655
 
656
    # The default format for displaying LV names in lvdisplay was changed
657
    # in version 2.02.89 to show the LV name and path separately.
658
    # Previously this was always shown as /dev/vgname/lvname even when that
659
    # was never a valid path in the /dev filesystem.
660
    # Set to 1 to reinstate the previous format.
661
    #
662
    # lvdisplay_shows_full_device_path = 0
663
 
664
    # Whether to use (trust) a running instance of lvmetad. If this is set to
665
    # 0, all commands fall back to the usual scanning mechanisms. When set to 1
666
    # *and* when lvmetad is running (automatically instantiated by making use of
667
    # systemd's socket-based service activation or run as an initscripts service
668
    # or run manually), the volume group metadata and PV state flags are obtained
669
    # from the lvmetad instance and no scanning is done by the individual
670
    # commands. In a setup with lvmetad, lvmetad udev rules *must* be set up for
671
    # LVM to work correctly. Without proper udev rules, all changes in block
672
    # device configuration will be *ignored* until a manual 'pvscan --cache'
673
    # is performed. These rules are installed by default.
674
    #
675
    # If lvmetad has been running while use_lvmetad was 0, it MUST be stopped
676
    # before changing use_lvmetad to 1 and started again afterwards.
677
    #
678
    # If using lvmetad, the volume activation is also switched to automatic
679
    # event-based mode. In this mode, the volumes are activated based on
680
    # incoming udev events that automatically inform lvmetad about new PVs
681
    # that appear in the system. Once the VG is complete (all the PVs are
682
    # present), it is auto-activated. The activation/auto_activation_volume_list
683
    # setting controls which volumes are auto-activated (all by default).
9 - 684
    #
685
    # A note about device filtering while lvmetad is used:
686
    # When lvmetad is updated (either automatically based on udev events
687
    # or directly by pvscan --cache <device> call), the devices/filter
688
    # is ignored and all devices are scanned by default. The lvmetad always
689
    # keeps unfiltered information which is then provided to LVM commands
690
    # and then each LVM command does the filtering based on devices/filter
691
    # setting itself.
692
    # To prevent scanning devices completely, even when using lvmetad,
693
    # the devices/global_filter must be used.
694
    # N.B. Don't use lvmetad with locking type 3 as lvmetad is not yet
695
    # supported in clustered environment. If use_lvmetad=1 and locking_type=3
696
    # is set at the same time, LVM always issues a warning message about this
697
    # and then it automatically disables lvmetad use.
4 - 698
    use_lvmetad = 0
699
 
700
    # Full path of the utility called to check that a thin metadata device
701
    # is in a state that allows it to be used.
702
    # Each time a thin pool needs to be activated or after it is deactivated
703
    # this utility is executed. The activation will only proceed if the utility
704
    # has an exit status of 0.
705
    # Set to "" to skip this check.  (Not recommended.)
706
    # The thin tools are available as part of the device-mapper-persistent-data
707
    # package from https://github.com/jthornber/thin-provisioning-tools.
708
    #
709
    # thin_check_executable = "/usr/sbin/thin_check"
710
 
711
    # Array of string options passed with thin_check command. By default,
712
    # option "-q" is for quiet output.
713
    # With thin_check version 2.1 or newer you can add "--ignore-non-fatal-errors"
9 - 714
    # to let it pass through ignorable errors and fix them later.
715
    # With thin_check version 3.2 or newer you should add
716
    # "--clear-needs-check-flag".
4 - 717
    #
9 - 718
    # thin_check_options = [ "-q", "--clear-needs-check-flag" ]
4 - 719
 
720
    # Full path of the utility called to repair a thin metadata device
721
    # is in a state that allows it to be used.
722
    # Each time a thin pool needs repair this utility is executed.
723
    # See thin_check_executable how to obtain binaries.
724
    #
725
    # thin_repair_executable = "/usr/sbin/thin_repair"
726
 
727
    # Array of extra string options passed with thin_repair command.
728
    # thin_repair_options = [ "" ]
729
 
730
    # Full path of the utility called to dump thin metadata content.
731
    # See thin_check_executable how to obtain binaries.
732
    #
733
    # thin_dump_executable = "/usr/sbin/thin_dump"
734
 
735
    # If set, given features are not used by thin driver.
736
    # This can be helpful not just for testing, but i.e. allows to avoid
737
    # using problematic implementation of some thin feature.
738
    # Features:
739
    #   block_size
740
    #   discards
741
    #   discards_non_power_2
742
    #   external_origin
743
    #   metadata_resize
9 - 744
    #   external_origin_extend
4 - 745
    #
746
    # thin_disabled_features = [ "discards", "block_size" ]
9 - 747
 
748
    # Full path of the utility called to check that a cache metadata device
749
    # is in a state that allows it to be used.
750
    # Each time a cached LV needs to be used or after it is deactivated
751
    # this utility is executed. The activation will only proceed if the utility
752
    # has an exit status of 0.
753
    # Set to "" to skip this check.  (Not recommended.)
754
    # The cache tools are available as part of the device-mapper-persistent-data
755
    # package from https://github.com/jthornber/thin-provisioning-tools.
756
    #
757
    # cache_check_executable = "/usr/sbin/cache_check"
758
 
759
    # Array of string options passed with cache_check command. By default,
760
    # option "-q" is for quiet output.
761
    #
762
    # cache_check_options = [ "-q" ]
763
 
764
    # Full path of the utility called to repair a cache metadata device.
765
    # Each time a cache metadata needs repair this utility is executed.
766
    # See cache_check_executable how to obtain binaries.
767
    #
768
    # cache_repair_executable = "/usr/sbin/cache_repair"
769
 
770
    # Array of extra string options passed with cache_repair command.
771
    # cache_repair_options = [ "" ]
772
 
773
    # Full path of the utility called to dump cache metadata content.
774
    # See cache_check_executable how to obtain binaries.
775
    #
776
    # cache_dump_executable = "/usr/sbin/cache_dump"
4 - 777
}
778
 
779
activation {
780
    # Set to 1 to perform internal checks on the operations issued to
781
    # libdevmapper.  Useful for debugging problems with activation.
782
    # Some of the checks may be expensive, so it's best to use this
783
    # only when there seems to be a problem.
784
    checks = 0
785
 
786
    # Set to 0 to disable udev synchronisation (if compiled into the binaries).
787
    # Processes will not wait for notification from udev.
788
    # They will continue irrespective of any possible udev processing
789
    # in the background.  You should only use this if udev is not running
790
    # or has rules that ignore the devices LVM2 creates.
791
    # The command line argument --nodevsync takes precedence over this setting.
792
    # If set to 1 when udev is not running, and there are LVM2 processes
793
    # waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
794
    udev_sync = 1
795
 
796
    # Set to 0 to disable the udev rules installed by LVM2 (if built with
797
    # --enable-udev_rules). LVM2 will then manage the /dev nodes and symlinks
798
    # for active logical volumes directly itself.
799
    # N.B. Manual intervention may be required if this setting is changed
800
    # while any logical volumes are active.
801
    udev_rules = 1
802
 
803
    # Set to 1 for LVM2 to verify operations performed by udev. This turns on
804
    # additional checks (and if necessary, repairs) on entries in the device
805
    # directory after udev has completed processing its events.
806
    # Useful for diagnosing problems with LVM2/udev interactions.
807
    verify_udev_operations = 0
808
 
809
    # If set to 1 and if deactivation of an LV fails, perhaps because
810
    # a process run from a quick udev rule temporarily opened the device,
811
    # retry the operation for a few seconds before failing.
812
    retry_deactivation = 1
813
 
814
    # How to fill in missing stripes if activating an incomplete volume.
815
    # Using "error" will make inaccessible parts of the device return
816
    # I/O errors on access.  You can instead use a device path, in which
817
    # case, that device will be used to in place of missing stripes.
818
    # But note that using anything other than "error" with mirrored
819
    # or snapshotted volumes is likely to result in data corruption.
820
    missing_stripe_filler = "error"
821
 
822
    # The linear target is an optimised version of the striped target
823
    # that only handles a single stripe.  Set this to 0 to disable this
824
    # optimisation and always use the striped target.
825
    use_linear_target = 1
826
 
827
    # How much stack (in KB) to reserve for use while devices suspended
828
    # Prior to version 2.02.89 this used to be set to 256KB
829
    reserved_stack = 64
830
 
831
    # How much memory (in KB) to reserve for use while devices suspended
832
    reserved_memory = 8192
833
 
834
    # Nice value used while devices suspended
835
    process_priority = -18
836
 
837
    # If volume_list is defined, each LV is only activated if there is a
838
    # match against the list.
839
    #
840
    #   "vgname" and "vgname/lvname" are matched exactly.
841
    #   "@tag" matches any tag set in the LV or VG.
842
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
843
    #
844
    # If any host tags exist but volume_list is not defined, a default
845
    # single-entry list containing "@*" is assumed.
846
    #
847
    # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
848
 
849
    # If auto_activation_volume_list is defined, each LV that is to be
850
    # activated with the autoactivation option (--activate ay/-a ay) is
851
    # first checked against the list. There are two scenarios in which
852
    # the autoactivation option is used:
853
    #
854
    #   - automatic activation of volumes based on incoming PVs. If all the
855
    #     PVs making up a VG are present in the system, the autoactivation
856
    #     is triggered. This requires lvmetad (global/use_lvmetad=1) and udev
857
    #     to be running. In this case, "pvscan --cache -aay" is called
858
    #     automatically without any user intervention while processing
859
    #     udev events. Please, make sure you define auto_activation_volume_list
860
    #     properly so only the volumes you want and expect are autoactivated.
861
    #
862
    #   - direct activation on command line with the autoactivation option.
863
    #     In this case, the user calls "vgchange --activate ay/-a ay" or
864
    #     "lvchange --activate ay/-a ay" directly.
865
    #
866
    # By default, the auto_activation_volume_list is not defined and all
867
    # volumes will be activated either automatically or by using --activate ay/-a ay.
868
    #
869
    # N.B. The "activation/volume_list" is still honoured in all cases so even
870
    # if the VG/LV passes the auto_activation_volume_list, it still needs to
871
    # pass the volume_list for it to be activated in the end.
872
 
873
    # If auto_activation_volume_list is defined but empty, no volumes will be
874
    # activated automatically and --activate ay/-a ay will do nothing.
875
    #
876
    # auto_activation_volume_list = []
877
 
878
    # If auto_activation_volume_list is defined and it's not empty, only matching
879
    # volumes will be activated either automatically or by using --activate ay/-a ay.
880
    #
881
    #   "vgname" and "vgname/lvname" are matched exactly.
882
    #   "@tag" matches any tag set in the LV or VG.
883
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
884
    #
885
    # auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
886
 
887
    # If read_only_volume_list is defined, each LV that is to be activated
888
    # is checked against the list, and if it matches, it as activated
889
    # in read-only mode.  (This overrides '--permission rw' stored in the
890
    # metadata.)
891
    #
892
    #   "vgname" and "vgname/lvname" are matched exactly.
893
    #   "@tag" matches any tag set in the LV or VG.
894
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
895
    #
896
    # read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
897
 
898
    # Each LV can have an 'activation skip' flag stored persistently against it.
899
    # During activation, this flag is used to decide whether such an LV is skipped.
900
    # The 'activation skip' flag can be set during LV creation and by default it
901
    # is automatically set for thin snapshot LVs. The 'auto_set_activation_skip'
902
    # enables or disables this automatic setting of the flag while LVs are created.
903
    # auto_set_activation_skip = 1
904
 
905
    # For RAID or 'mirror' segment types, 'raid_region_size' is the
9 - 906
    # size (in KiB) of each:
4 - 907
    # - synchronization operation when initializing
908
    # - each copy operation when performing a 'pvmove' (using 'mirror' segtype)
909
    # This setting has replaced 'mirror_region_size' since version 2.02.99
910
    raid_region_size = 512
911
 
912
    # Setting to use when there is no readahead value stored in the metadata.
913
    #
914
    # "none" - Disable readahead.
915
    # "auto" - Use default value chosen by kernel.
916
    readahead = "auto"
917
 
918
    # 'raid_fault_policy' defines how a device failure in a RAID logical
919
    # volume is handled.  This includes logical volumes that have the following
920
    # segment types: raid1, raid4, raid5*, and raid6*.
921
    #
922
    # In the event of a failure, the following policies will determine what
923
    # actions are performed during the automated response to failures (when
924
    # dmeventd is monitoring the RAID logical volume) and when 'lvconvert' is
925
    # called manually with the options '--repair' and '--use-policies'.
926
    #
927
    # "warn"	- Use the system log to warn the user that a device in the RAID
928
    # 		  logical volume has failed.  It is left to the user to run
929
    #		  'lvconvert --repair' manually to remove or replace the failed
930
    #		  device.  As long as the number of failed devices does not
931
    #		  exceed the redundancy of the logical volume (1 device for
932
    #		  raid4/5, 2 for raid6, etc) the logical volume will remain
933
    #		  usable.
934
    #
935
    # "allocate" - Attempt to use any extra physical volumes in the volume
936
    # 		  group as spares and replace faulty devices.
937
    #
938
    raid_fault_policy = "warn"
939
 
940
    # 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
941
    # how a device failure affecting a mirror (of "mirror" segment type) is
942
    # handled.  A mirror is composed of mirror images (copies) and a log.
943
    # A disk log ensures that a mirror does not need to be re-synced
944
    # (all copies made the same) every time a machine reboots or crashes.
945
    #
946
    # In the event of a failure, the specified policy will be used to determine
947
    # what happens. This applies to automatic repairs (when the mirror is being
948
    # monitored by dmeventd) and to manual lvconvert --repair when
949
    # --use-policies is given.
950
    #
951
    # "remove" - Simply remove the faulty device and run without it.  If
952
    #            the log device fails, the mirror would convert to using
953
    #            an in-memory log.  This means the mirror will not
954
    #            remember its sync status across crashes/reboots and
955
    #            the entire mirror will be re-synced.  If a
956
    #            mirror image fails, the mirror will convert to a
957
    #            non-mirrored device if there is only one remaining good
958
    #            copy.
959
    #
960
    # "allocate" - Remove the faulty device and try to allocate space on
961
    #            a new device to be a replacement for the failed device.
962
    #            Using this policy for the log is fast and maintains the
963
    #            ability to remember sync state through crashes/reboots.
964
    #            Using this policy for a mirror device is slow, as it
965
    #            requires the mirror to resynchronize the devices, but it
966
    #            will preserve the mirror characteristic of the device.
967
    #            This policy acts like "remove" if no suitable device and
968
    #            space can be allocated for the replacement.
969
    #
970
    # "allocate_anywhere" - Not yet implemented. Useful to place the log device
971
    #            temporarily on same physical volume as one of the mirror
972
    #            images. This policy is not recommended for mirror devices
973
    #            since it would break the redundant nature of the mirror. This
974
    #            policy acts like "remove" if no suitable device and space can
975
    #            be allocated for the replacement.
976
 
977
    mirror_log_fault_policy = "allocate"
978
    mirror_image_fault_policy = "remove"
979
 
980
    # 'snapshot_autoextend_threshold' and 'snapshot_autoextend_percent' define
981
    # how to handle automatic snapshot extension. The former defines when the
982
    # snapshot should be extended: when its space usage exceeds this many
983
    # percent. The latter defines how much extra space should be allocated for
984
    # the snapshot, in percent of its current size.
985
    #
986
    # For example, if you set snapshot_autoextend_threshold to 70 and
987
    # snapshot_autoextend_percent to 20, whenever a snapshot exceeds 70% usage,
988
    # it will be extended by another 20%. For a 1G snapshot, using up 700M will
989
    # trigger a resize to 1.2G. When the usage exceeds 840M, the snapshot will
990
    # be extended to 1.44G, and so on.
991
    #
992
    # Setting snapshot_autoextend_threshold to 100 disables automatic
993
    # extensions. The minimum value is 50 (A setting below 50 will be treated
994
    # as 50).
995
 
996
    snapshot_autoextend_threshold = 100
997
    snapshot_autoextend_percent = 20
998
 
999
    # 'thin_pool_autoextend_threshold' and 'thin_pool_autoextend_percent' define
1000
    # how to handle automatic pool extension. The former defines when the
1001
    # pool should be extended: when its space usage exceeds this many
1002
    # percent. The latter defines how much extra space should be allocated for
1003
    # the pool, in percent of its current size.
1004
    #
1005
    # For example, if you set thin_pool_autoextend_threshold to 70 and
1006
    # thin_pool_autoextend_percent to 20, whenever a pool exceeds 70% usage,
1007
    # it will be extended by another 20%. For a 1G pool, using up 700M will
1008
    # trigger a resize to 1.2G. When the usage exceeds 840M, the pool will
1009
    # be extended to 1.44G, and so on.
1010
    #
1011
    # Setting thin_pool_autoextend_threshold to 100 disables automatic
1012
    # extensions. The minimum value is 50 (A setting below 50 will be treated
1013
    # as 50).
1014
 
1015
    thin_pool_autoextend_threshold = 100
1016
    thin_pool_autoextend_percent = 20
1017
 
1018
    # While activating devices, I/O to devices being (re)configured is
1019
    # suspended, and as a precaution against deadlocks, LVM2 needs to pin
1020
    # any memory it is using so it is not paged out.  Groups of pages that
1021
    # are known not to be accessed during activation need not be pinned
1022
    # into memory.  Each string listed in this setting is compared against
1023
    # each line in /proc/self/maps, and the pages corresponding to any
1024
    # lines that match are not pinned.  On some systems locale-archive was
1025
    # found to make up over 80% of the memory used by the process.
1026
    # mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]
1027
 
1028
    # Set to 1 to revert to the default behaviour prior to version 2.02.62
1029
    # which used mlockall() to pin the whole process's memory while activating
1030
    # devices.
1031
    use_mlockall = 0
1032
 
1033
    # Monitoring is enabled by default when activating logical volumes.
1034
    # Set to 0 to disable monitoring or use the --ignoremonitoring option.
1035
    monitoring = 1
1036
 
1037
    # When pvmove or lvconvert must wait for the kernel to finish
1038
    # synchronising or merging data, they check and report progress
1039
    # at intervals of this number of seconds.  The default is 15 seconds.
1040
    # If this is set to 0 and there is only one thing to wait for, there
1041
    # are no progress reports, but the process is awoken immediately the
1042
    # operation is complete.
1043
    polling_interval = 15
9 - 1044
 
1045
    # 'activation_mode' determines how Logical Volumes are activated if
1046
    # any devices are missing.  Possible settings are:
1047
    #
1048
    #	"complete" -  Only allow activation of an LV if all of the Physical
1049
    #		      Volumes it uses are present.  Other PVs in the Volume
1050
    #		      Group may be missing.
1051
    #
1052
    #	"degraded" -  Like "complete", but additionally RAID Logical Volumes of
1053
    #		      segment type raid1, raid4, raid5, radid6 and raid10 will
1054
    #		      be activated if there is no data loss, i.e. they have
1055
    #		      sufficient redundancy to present the entire addressable
1056
    #		      range of the Logical Volume.
1057
    #
1058
    #	"partial"  -  Allows the activation of any Logical Volume even if
1059
    #		      a missing or failed PV could cause data loss with a
1060
    #		      portion of the Logical Volume inaccessible.
1061
    #		      This setting should not normally be used, but may
1062
    #		      sometimes assist with data recovery.
1063
    #
1064
    # This setting was introduced in LVM version 2.02.108.  It corresponds
1065
    # with the '--activationmode' option for lvchange and vgchange.
1066
    activation_mode = "degraded"
4 - 1067
}
1068
 
9 - 1069
# Report settings.
1070
#
1071
# report {
1072
    # Align columns on report output.
1073
    # aligned=1
4 - 1074
 
9 - 1075
    # When buffered reporting is used, the report's content is appended
1076
    # incrementally to include each object being reported until the report
1077
    # is flushed to output which normally happens at the end of command
1078
    # execution. Otherwise, if buffering is not used, each object is
1079
    # reported as soon as its processing is finished.
1080
    # buffered=1
1081
 
1082
    # Show headings for columns on report.
1083
    # headings=1
1084
 
1085
    # A separator to use on report after each field.
1086
    # separator=" "
1087
 
1088
    # A separator to use for list items when reported.
1089
    # list_item_separator=","
1090
 
1091
    # Use a field name prefix for each field reported.
1092
    # prefixes=0
1093
 
1094
    # Quote field values when using field name prefixes.
1095
    # quoted=1
1096
 
1097
    # Output each column as a row. If set, this also implies report/prefixes=1.
1098
    # colums_as_rows=0
1099
 
1100
    # Use binary values "0" or "1" instead of descriptive literal values for
1101
    # columns that have exactly two valid values to report (not counting the
1102
    # "unknown" value which denotes that the value could not be determined).
1103
    #
1104
    # binary_values_as_numeric = 0
1105
 
1106
    # Comma separated list of columns to sort by when reporting 'lvm devtypes' command.
1107
    # See 'lvm devtypes -o help' for the list of possible fields.
1108
    # devtypes_sort="devtype_name"
1109
 
1110
    # Comma separated list of columns to report for 'lvm devtypes' command.
1111
    # See 'lvm devtypes -o help' for the list of possible fields.
1112
    # devtypes_cols="devtype_name,devtype_max_partitions,devtype_description"
1113
 
1114
    # Comma separated list of columns to report for 'lvm devtypes' command in verbose mode.
1115
    # See 'lvm devtypes -o help' for the list of possible fields.
1116
    # devtypes_cols_verbose="devtype_name,devtype_max_partitions,devtype_description"
1117
 
1118
    # Comma separated list of columns to sort by when reporting 'lvs' command.
1119
    # See 'lvs -o help' for the list of possible fields.
1120
    # lvs_sort="vg_name,lv_name"
1121
 
1122
    # Comma separated list of columns to report for 'lvs' command.
1123
    # See 'lvs -o help' for the list of possible fields.
1124
    # lvs_cols="lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,metadata_percent,move_pv,mirror_log,copy_percent,convert_lv"
1125
 
1126
    # Comma separated list of columns to report for 'lvs' command in verbose mode.
1127
    # See 'lvs -o help' for the list of possible fields.
1128
    # lvs_cols_verbose="lv_name,vg_name,seg_count,lv_attr,lv_size,lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,pool_lv,origin,data_percent,metadata_percent,move_pv,copy_percent,mirror_log,convert
1129
 
1130
    # Comma separated list of columns to sort by when reporting 'vgs' command.
1131
    # See 'vgs -o help' for the list of possible fields.
1132
    # vgs_sort="vg_name"
1133
 
1134
    # Comma separated list of columns to report for 'vgs' command.
1135
    # See 'vgs -o help' for the list of possible fields.
1136
    # vgs_cols="vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free"
1137
 
1138
    # Comma separated list of columns to report for 'vgs' command in verbose mode.
1139
    # See 'vgs -o help' for the list of possible fields.
1140
    # vgs_cols_verbose="vg_name,vg_attr,vg_extent_size,pv_count,lv_count,snap_count,vg_size,vg_free,vg_uuid,vg_profile"
1141
 
1142
    # Comma separated list of columns to sort by when reporting 'pvs' command.
1143
    # See 'pvs -o help' for the list of possible fields.
1144
    # pvs_sort="pv_name"
1145
 
1146
    # Comma separated list of columns to report for 'pvs' command.
1147
    # See 'pvs -o help' for the list of possible fields.
1148
    # pvs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free"
1149
 
1150
    # Comma separated list of columns to report for 'pvs' command in verbose mode.
1151
    # See 'pvs -o help' for the list of possible fields.
1152
    # pvs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,dev_size,pv_uuid"
1153
 
1154
    # Comma separated list of columns to sort by when reporting 'lvs --segments' command.
1155
    # See 'lvs --segments -o help' for the list of possible fields.
1156
    # segs_sort="vg_name,lv_name,seg_start"
1157
 
1158
    # Comma separated list of columns to report for 'lvs --segments' command.
1159
    # See 'lvs --segments  -o help' for the list of possible fields.
1160
    # segs_cols="lv_name,vg_name,lv_attr,stripes,segtype,seg_size"
1161
 
1162
    # Comma separated list of columns to report for 'lvs --segments' command in verbose mode.
1163
    # See 'lvs --segments -o help' for the list of possible fields.
1164
    # segs_cols_verbose="lv_name,vg_name,lv_attr,seg_start,seg_size,stripes,segtype,stripesize,chunksize"
1165
 
1166
    # Comma separated list of columns to sort by when reporting 'pvs --segments' command.
1167
    # See 'pvs --segments -o help' for the list of possible fields.
1168
    # pvsegs_sort="pv_name,pvseg_start"
1169
 
1170
    # Comma separated list of columns to sort by when reporting 'pvs --segments' command.
1171
    # See 'pvs --segments -o help' for the list of possible fields.
1172
    # pvsegs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size"
1173
 
1174
    # Comma separated list of columns to sort by when reporting 'pvs --segments' command in verbose mode.
1175
    # See 'pvs --segments -o help' for the list of possible fields.
1176
    # pvsegs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size,lv_name,seg_start_pe,segtype,seg_pe_ranges"
1177
#}
1178
 
4 - 1179
####################
1180
# Advanced section #
1181
####################
1182
 
1183
# Metadata settings
1184
#
1185
# metadata {
1186
    # Default number of copies of metadata to hold on each PV.  0, 1 or 2.
1187
    # You might want to override it from the command line with 0
1188
    # when running pvcreate on new PVs which are to be added to large VGs.
1189
 
1190
    # pvmetadatacopies = 1
1191
 
1192
    # Default number of copies of metadata to maintain for each VG.
1193
    # If set to a non-zero value, LVM automatically chooses which of
1194
    # the available metadata areas to use to achieve the requested
1195
    # number of copies of the VG metadata.  If you set a value larger
1196
    # than the the total number of metadata areas available then
1197
    # metadata is stored in them all.
1198
    # The default value of 0 ("unmanaged") disables this automatic
1199
    # management and allows you to control which metadata areas
1200
    # are used at the individual PV level using 'pvchange
1201
    # --metadataignore y/n'.
1202
 
1203
    # vgmetadatacopies = 0
1204
 
1205
    # Approximate default size of on-disk metadata areas in sectors.
1206
    # You should increase this if you have large volume groups or
1207
    # you want to retain a large on-disk history of your metadata changes.
1208
 
1209
    # pvmetadatasize = 255
1210
 
1211
    # List of directories holding live copies of text format metadata.
1212
    # These directories must not be on logical volumes!
1213
    # It's possible to use LVM2 with a couple of directories here,
1214
    # preferably on different (non-LV) filesystems, and with no other
1215
    # on-disk metadata (pvmetadatacopies = 0). Or this can be in
1216
    # addition to on-disk metadata areas.
1217
    # The feature was originally added to simplify testing and is not
1218
    # supported under low memory situations - the machine could lock up.
1219
    #
1220
    # Never edit any files in these directories by hand unless you
1221
    # you are absolutely sure you know what you are doing! Use
1222
    # the supplied toolset to make changes (e.g. vgcfgrestore).
1223
 
1224
    # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
1225
#}
1226
 
1227
# Event daemon
1228
#
1229
dmeventd {
1230
    # mirror_library is the library used when monitoring a mirror device.
1231
    #
1232
    # "libdevmapper-event-lvm2mirror.so" attempts to recover from
1233
    # failures.  It removes failed devices from a volume group and
1234
    # reconfigures a mirror as necessary. If no mirror library is
1235
    # provided, mirrors are not monitored through dmeventd.
1236
 
1237
    mirror_library = "libdevmapper-event-lvm2mirror.so"
1238
 
1239
    # snapshot_library is the library used when monitoring a snapshot device.
1240
    #
1241
    # "libdevmapper-event-lvm2snapshot.so" monitors the filling of
1242
    # snapshots and emits a warning through syslog when the use of
1243
    # the snapshot exceeds 80%. The warning is repeated when 85%, 90% and
1244
    # 95% of the snapshot is filled.
1245
 
1246
    snapshot_library = "libdevmapper-event-lvm2snapshot.so"
1247
 
1248
    # thin_library is the library used when monitoring a thin device.
1249
    #
1250
    # "libdevmapper-event-lvm2thin.so" monitors the filling of
1251
    # pool and emits a warning through syslog when the use of
1252
    # the pool exceeds 80%. The warning is repeated when 85%, 90% and
1253
    # 95% of the pool is filled.
1254
 
1255
    thin_library = "libdevmapper-event-lvm2thin.so"
1256
 
1257
    # Full path of the dmeventd binary.
1258
    #
1259
    # executable = "/sbin/dmeventd"
1260
}