Subversion Repositories configs

Rev

Rev 9 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4 - 1
# This is an example configuration file for the LVM2 system.
2
# It contains the default settings that would be used if there was no
3
# /etc/lvm/lvm.conf file.
4
#
5
# Refer to 'man lvm.conf' for further information including the file layout.
6
#
7
# To put this file in a different directory and override /etc/lvm set
8
# the environment variable LVM_SYSTEM_DIR before running the tools.
9
#
10
# N.B. Take care that each setting only appears once if uncommenting
11
# example settings in this file.
12
 
13
# This section allows you to set the way the configuration settings are handled.
14
config {
15
 
16
    # If enabled, any LVM2 configuration mismatch is reported.
17
    # This implies checking that the configuration key is understood
18
    # by LVM2 and that the value of the key is of a proper type.
19
    # If disabled, any configuration mismatch is ignored and default
20
    # value is used instead without any warning (a message about the
21
    # configuration key not being found is issued in verbose mode only).
22
    checks = 1
23
 
24
    # If enabled, any configuration mismatch aborts the LVM2 process.
25
    abort_on_errors = 0
26
 
27
    # Directory where LVM looks for configuration profiles.
28
    profile_dir = "/etc/lvm/profile"
29
}
30
 
31
# This section allows you to configure which block devices should
32
# be used by the LVM system.
33
devices {
34
 
35
    # Where do you want your volume groups to appear ?
36
    dir = "/dev"
37
 
38
    # An array of directories that contain the device nodes you wish
39
    # to use with LVM2.
40
    scan = [ "/dev" ]
41
 
42
    # If set, the cache of block device nodes with all associated symlinks
43
    # will be constructed out of the existing udev database content.
44
    # This avoids using and opening any inapplicable non-block devices or
45
    # subdirectories found in the device directory. This setting is applied
46
    # to udev-managed device directory only, other directories will be scanned
47
    # fully. LVM2 needs to be compiled with udev support for this setting to
48
    # take effect. N.B. Any device node or symlink not managed by udev in
49
    # udev directory will be ignored with this setting on.
50
    obtain_device_list_from_udev = 0
51
 
52
    # If several entries in the scanned directories correspond to the
53
    # same block device and the tools need to display a name for device,
54
    # all the pathnames are matched against each item in the following
55
    # list of regular expressions in turn and the first match is used.
56
    # preferred_names = [ ]
57
 
58
    # Try to avoid using undescriptive /dev/dm-N names, if present.
59
    preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
60
 
61
    # A filter that tells LVM2 to only use a restricted set of devices.
62
    # The filter consists of an array of regular expressions.  These
63
    # expressions can be delimited by a character of your choice, and
64
    # prefixed with either an 'a' (for accept) or 'r' (for reject).
65
    # The first expression found to match a device name determines if
66
    # the device will be accepted or rejected (ignored).  Devices that
67
    # don't match any patterns are accepted.
68
 
69
    # Be careful if there there are symbolic links or multiple filesystem
70
    # entries for the same device as each name is checked separately against
71
    # the list of patterns.  The effect is that if the first pattern in the
72
    # list to match a name is an 'a' pattern for any of the names, the device
73
    # is accepted; otherwise if the first pattern in the list to match a name
74
    # is an 'r' pattern for any of the names it is rejected; otherwise it is
75
    # accepted.
76
 
77
    # Don't have more than one filter line active at once: only one gets used.
78
 
79
    # Run vgscan after you change this parameter to ensure that
80
    # the cache file gets regenerated (see below).
81
    # If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
82
 
83
 
84
    # By default we accept every block device:
85
    filter = [ "a/.*/" ]
86
 
87
    # Exclude the cdrom drive
88
    # filter = [ "r|/dev/cdrom|" ]
89
 
90
    # When testing I like to work with just loopback devices:
91
    # filter = [ "a/loop/", "r/.*/" ]
92
 
93
    # Or maybe all loops and ide drives except hdc:
94
    # filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
95
 
96
    # Use anchors if you want to be really specific
97
    # filter = [ "a|^/dev/hda8$|", "r/.*/" ]
98
 
99
    # Since "filter" is often overridden from command line, it is not suitable
100
    # for system-wide device filtering (udev rules, lvmetad). To hide devices
101
    # from LVM-specific udev processing and/or from lvmetad, you need to set
102
    # global_filter. The syntax is the same as for normal "filter"
103
    # above. Devices that fail the global_filter are not even opened by LVM.
104
 
105
    # global_filter = []
106
 
107
    # The results of the filtering are cached on disk to avoid
108
    # rescanning dud devices (which can take a very long time).
109
    # By default this cache is stored in the /etc/lvm/cache directory
110
    # in a file called '.cache'.
111
    # It is safe to delete the contents: the tools regenerate it.
112
    # (The old setting 'cache' is still respected if neither of
113
    # these new ones is present.)
114
    # N.B. If obtain_device_list_from_udev is set to 1 the list of
115
    # devices is instead obtained from udev and any existing .cache
116
    # file is removed.
117
    cache_dir = "/etc/lvm/cache"
118
    cache_file_prefix = ""
119
 
120
    # You can turn off writing this cache file by setting this to 0.
121
    write_cache_state = 1
122
 
123
    # Advanced settings.
124
 
125
    # List of pairs of additional acceptable block device types found
126
    # in /proc/devices with maximum (non-zero) number of partitions.
127
    # types = [ "fd", 16 ]
128
 
129
    # If sysfs is mounted (2.6 kernels) restrict device scanning to
130
    # the block devices it believes are valid.
131
    # 1 enables; 0 disables.
132
    sysfs_scan = 1
133
 
134
    # By default, LVM2 will ignore devices used as component paths
135
    # of device-mapper multipath devices.
136
    # 1 enables; 0 disables.
137
    multipath_component_detection = 1
138
 
139
    # By default, LVM2 will ignore devices used as components of
140
    # software RAID (md) devices by looking for md superblocks.
141
    # 1 enables; 0 disables.
142
    md_component_detection = 1
143
 
144
    # By default, if a PV is placed directly upon an md device, LVM2
145
    # will align its data blocks with the md device's stripe-width.
146
    # 1 enables; 0 disables.
147
    md_chunk_alignment = 1
148
 
149
    # Default alignment of the start of a data area in MB.  If set to 0,
150
    # a value of 64KB will be used.  Set to 1 for 1MiB, 2 for 2MiB, etc.
151
    # default_data_alignment = 1
152
 
153
    # By default, the start of a PV's data area will be a multiple of
154
    # the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
155
    # - minimum_io_size - the smallest request the device can perform
156
    #   w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
157
    # - optimal_io_size - the device's preferred unit of receiving I/O
158
    #   (e.g. MD's stripe width)
159
    # minimum_io_size is used if optimal_io_size is undefined (0).
160
    # If md_chunk_alignment is enabled, that detects the optimal_io_size.
161
    # This setting takes precedence over md_chunk_alignment.
162
    # 1 enables; 0 disables.
163
    data_alignment_detection = 1
164
 
165
    # Alignment (in KB) of start of data area when creating a new PV.
166
    # md_chunk_alignment and data_alignment_detection are disabled if set.
167
    # Set to 0 for the default alignment (see: data_alignment_default)
168
    # or page size, if larger.
169
    data_alignment = 0
170
 
171
    # By default, the start of the PV's aligned data area will be shifted by
172
    # the 'alignment_offset' exposed in sysfs.  This offset is often 0 but
173
    # may be non-zero; e.g.: certain 4KB sector drives that compensate for
174
    # windows partitioning will have an alignment_offset of 3584 bytes
175
    # (sector 7 is the lowest aligned logical block, the 4KB sectors start
176
    # at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
177
    # But note that pvcreate --dataalignmentoffset will skip this detection.
178
    # 1 enables; 0 disables.
179
    data_alignment_offset_detection = 1
180
 
181
    # If, while scanning the system for PVs, LVM2 encounters a device-mapper
182
    # device that has its I/O suspended, it waits for it to become accessible.
183
    # Set this to 1 to skip such devices.  This should only be needed
184
    # in recovery situations.
185
    ignore_suspended_devices = 0
186
 
187
    # ignore_lvm_mirrors:  Introduced in version 2.02.104
188
    # This setting determines whether logical volumes of "mirror" segment
189
    # type are scanned for LVM labels.  This affects the ability of
190
    # mirrors to be used as physical volumes.  If 'ignore_lvm_mirrors'
191
    # is set to '1', it becomes impossible to create volume groups on top
192
    # of mirror logical volumes - i.e. to stack volume groups on mirrors.
193
    #
194
    # Allowing mirror logical volumes to be scanned (setting the value to '0')
195
    # can potentially cause LVM processes and I/O to the mirror to become
196
    # blocked.  This is due to the way that the "mirror" segment type handles
197
    # failures.  In order for the hang to manifest itself, an LVM command must
198
    # be run just after a failure and before the automatic LVM repair process
199
    # takes place OR there must be failures in multiple mirrors in the same
200
    # volume group at the same time with write failures occurring moments
201
    # before a scan of the mirror's labels.
202
    #
203
    # Note that these scanning limitations do not apply to the LVM RAID
204
    # types, like "raid1".  The RAID segment types handle failures in a
205
    # different way and are not subject to possible process or I/O blocking.
206
    #
207
    # It is encouraged that users set 'ignore_lvm_mirrors' to 1 if they
208
    # are using the "mirror" segment type.  Users that require volume group
209
    # stacking on mirrored logical volumes should consider using the "raid1"
210
    # segment type.  The "raid1" segment type is not available for
211
    # active/active clustered volume groups.
212
    #
213
    # Set to 1 to disallow stacking and thereby avoid a possible deadlock.
214
    ignore_lvm_mirrors = 0
215
 
216
    # During each LVM operation errors received from each device are counted.
217
    # If the counter of a particular device exceeds the limit set here, no
218
    # further I/O is sent to that device for the remainder of the respective
219
    # operation. Setting the parameter to 0 disables the counters altogether.
220
    disable_after_error_count = 0
221
 
222
    # Allow use of pvcreate --uuid without requiring --restorefile.
223
    require_restorefile_with_uuid = 1
224
 
225
    # Minimum size (in KB) of block devices which can be used as PVs.
226
    # In a clustered environment all nodes must use the same value.
227
    # Any value smaller than 512KB is ignored.
228
 
229
    # Ignore devices smaller than 2MB such as floppy drives.
230
    pv_min_size = 2048
231
 
232
    # The original built-in setting was 512 up to and including version 2.02.84.
233
    # pv_min_size = 512
234
 
235
    # Issue discards to a logical volumes's underlying physical volume(s) when
236
    # the logical volume is no longer using the physical volumes' space (e.g.
237
    # lvremove, lvreduce, etc).  Discards inform the storage that a region is
238
    # no longer in use.  Storage that supports discards advertise the protocol
239
    # specific way discards should be issued by the kernel (TRIM, UNMAP, or
240
    # WRITE SAME with UNMAP bit set).  Not all storage will support or benefit
241
    # from discards but SSDs and thinly provisioned LUNs generally do.  If set
242
    # to 1, discards will only be issued if both the storage and kernel provide
243
    # support.
244
    # 1 enables; 0 disables.
245
    issue_discards = 0
246
}
247
 
248
# This section allows you to configure the way in which LVM selects
249
# free space for its Logical Volumes.
250
allocation {
251
 
252
    # When searching for free space to extend an LV, the "cling"
253
    # allocation policy will choose space on the same PVs as the last
254
    # segment of the existing LV.  If there is insufficient space and a
255
    # list of tags is defined here, it will check whether any of them are
256
    # attached to the PVs concerned and then seek to match those PV tags
257
    # between existing extents and new extents.
258
    # Use the special tag "@*" as a wildcard to match any PV tag.
259
 
260
    # Example: LVs are mirrored between two sites within a single VG.
261
    # PVs are tagged with either @site1 or @site2 to indicate where
262
    # they are situated.
263
 
264
    # cling_tag_list = [ "@site1", "@site2" ]
265
    # cling_tag_list = [ "@*" ]
266
 
267
    # Changes made in version 2.02.85 extended the reach of the 'cling'
268
    # policies to detect more situations where data can be grouped
269
    # onto the same disks.  Set this to 0 to revert to the previous
270
    # algorithm.
271
    maximise_cling = 1
272
 
273
    # Set to 1 to guarantee that mirror logs will always be placed on
274
    # different PVs from the mirror images.  This was the default
275
    # until version 2.02.85.
276
    mirror_logs_require_separate_pvs = 0
277
 
278
    # Set to 1 to guarantee that thin pool metadata will always
279
    # be placed on different PVs from the pool data.
280
    thin_pool_metadata_require_separate_pvs = 0
281
 
282
    # Specify the minimal chunk size (in KB) for thin pool volumes.
283
    # Use of the larger chunk size may improve perfomance for plain
284
    # thin volumes, however using them for snapshot volumes is less efficient,
285
    # as it consumes more space and takes extra time for copying.
286
    # When unset, lvm tries to estimate chunk size starting from 64KB
287
    # Supported values are in range from 64 to 1048576.
288
    # thin_pool_chunk_size = 64
289
 
290
    # Specify discards behavior of the thin pool volume.
291
    # Select one of  "ignore", "nopassdown", "passdown"
292
    # thin_pool_discards = "passdown"
293
 
294
    # Set to 0, to disable zeroing of thin pool data chunks before their
295
    # first use.
296
    # N.B. zeroing larger thin pool chunk size degrades performance.
297
    # thin_pool_zero = 1
298
}
299
 
300
# This section that allows you to configure the nature of the
301
# information that LVM2 reports.
302
log {
303
 
304
    # Controls the messages sent to stdout or stderr.
305
    # There are three levels of verbosity, 3 being the most verbose.
306
    verbose = 0
307
 
308
    # Set to 1 to suppress all non-essential messages from stdout.
309
    # This has the same effect as -qq.
310
    # When this is set, the following commands still produce output:
311
    # dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
312
    # pvs, version, vgcfgrestore -l, vgdisplay, vgs.
313
    # Non-essential messages are shifted from log level 4 to log level 5
314
    # for syslog and lvm2_log_fn purposes.
315
    # Any 'yes' or 'no' questions not overridden by other arguments
316
    # are suppressed and default to 'no'.
317
    silent = 0
318
 
319
    # Should we send log messages through syslog?
320
    # 1 is yes; 0 is no.
321
    syslog = 1
322
 
323
    # Should we log error and debug messages to a file?
324
    # By default there is no log file.
325
    #file = "/var/log/lvm2.log"
326
 
327
    # Should we overwrite the log file each time the program is run?
328
    # By default we append.
329
    overwrite = 0
330
 
331
    # What level of log messages should we send to the log file and/or syslog?
332
    # There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
333
    # 7 is the most verbose (LOG_DEBUG).
334
    level = 0
335
 
336
    # Format of output messages
337
    # Whether or not (1 or 0) to indent messages according to their severity
338
    indent = 1
339
 
340
    # Whether or not (1 or 0) to display the command name on each line output
341
    command_names = 0
342
 
343
    # A prefix to use before the message text (but after the command name,
344
    # if selected).  Default is two spaces, so you can see/grep the severity
345
    # of each message.
346
    prefix = "  "
347
 
348
    # To make the messages look similar to the original LVM tools use:
349
    #   indent = 0
350
    #   command_names = 1
351
    #   prefix = " -- "
352
 
353
    # Set this if you want log messages during activation.
354
    # Don't use this in low memory situations (can deadlock).
355
    # activation = 0
356
 
357
    # Some debugging messages are assigned to a class and only appear
358
    # in debug output if the class is listed here.
359
    # Classes currently available:
360
    #   memory, devices, activation, allocation, lvmetad, metadata, cache,
361
    #   locking
362
    # Use "all" to see everything.
363
    debug_classes = [ "memory", "devices", "activation", "allocation",
364
		      "lvmetad", "metadata", "cache", "locking" ]
365
}
366
 
367
# Configuration of metadata backups and archiving.  In LVM2 when we
368
# talk about a 'backup' we mean making a copy of the metadata for the
369
# *current* system.  The 'archive' contains old metadata configurations.
370
# Backups are stored in a human readeable text format.
371
backup {
372
 
373
    # Should we maintain a backup of the current metadata configuration ?
374
    # Use 1 for Yes; 0 for No.
375
    # Think very hard before turning this off!
376
    backup = 1
377
 
378
    # Where shall we keep it ?
379
    # Remember to back up this directory regularly!
380
    backup_dir = "/etc/lvm/backup"
381
 
382
    # Should we maintain an archive of old metadata configurations.
383
    # Use 1 for Yes; 0 for No.
384
    # On by default.  Think very hard before turning this off.
385
    archive = 1
386
 
387
    # Where should archived files go ?
388
    # Remember to back up this directory regularly!
389
    archive_dir = "/etc/lvm/archive"
390
 
391
    # What is the minimum number of archive files you wish to keep ?
392
    retain_min = 10
393
 
394
    # What is the minimum time you wish to keep an archive file for ?
395
    retain_days = 30
396
}
397
 
398
# Settings for the running LVM2 in shell (readline) mode.
399
shell {
400
 
401
    # Number of lines of history to store in ~/.lvm_history
402
    history_size = 100
403
}
404
 
405
 
406
# Miscellaneous global LVM2 settings
407
global {
408
    # The file creation mask for any files and directories created.
409
    # Interpreted as octal if the first digit is zero.
410
    umask = 077
411
 
412
    # Allow other users to read the files
413
    #umask = 022
414
 
415
    # Enabling test mode means that no changes to the on disk metadata
416
    # will be made.  Equivalent to having the -t option on every
417
    # command.  Defaults to off.
418
    test = 0
419
 
420
    # Default value for --units argument
421
    units = "h"
422
 
423
    # Since version 2.02.54, the tools distinguish between powers of
424
    # 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
425
    # KB, MB, GB).
426
    # If you have scripts that depend on the old behaviour, set this to 0
427
    # temporarily until you update them.
428
    si_unit_consistency = 1
429
 
430
    # Whether or not to communicate with the kernel device-mapper.
431
    # Set to 0 if you want to use the tools to manipulate LVM metadata
432
    # without activating any logical volumes.
433
    # If the device-mapper kernel driver is not present in your kernel
434
    # setting this to 0 should suppress the error messages.
435
    activation = 1
436
 
437
    # If we can't communicate with device-mapper, should we try running
438
    # the LVM1 tools?
439
    # This option only applies to 2.4 kernels and is provided to help you
440
    # switch between device-mapper kernels and LVM1 kernels.
441
    # The LVM1 tools need to be installed with .lvm1 suffices
442
    # e.g. vgscan.lvm1 and they will stop working after you start using
443
    # the new lvm2 on-disk metadata format.
444
    # The default value is set when the tools are built.
445
    # fallback_to_lvm1 = 0
446
 
447
    # The default metadata format that commands should use - "lvm1" or "lvm2".
448
    # The command line override is -M1 or -M2.
449
    # Defaults to "lvm2".
450
    # format = "lvm2"
451
 
452
    # Location of proc filesystem
453
    proc = "/proc"
454
 
455
    # Type of locking to use. Defaults to local file-based locking (1).
456
    # Turn locking off by setting to 0 (dangerous: risks metadata corruption
457
    # if LVM2 commands get run concurrently).
458
    # Type 2 uses the external shared library locking_library.
459
    # Type 3 uses built-in clustered locking.
460
    # Type 4 uses read-only locking which forbids any operations that might
461
    # change metadata.
462
    locking_type = 1
463
 
464
    # Set to 0 to fail when a lock request cannot be satisfied immediately.
465
    wait_for_locks = 1
466
 
467
    # If using external locking (type 2) and initialisation fails,
468
    # with this set to 1 an attempt will be made to use the built-in
469
    # clustered locking.
470
    # If you are using a customised locking_library you should set this to 0.
471
    fallback_to_clustered_locking = 1
472
 
473
    # If an attempt to initialise type 2 or type 3 locking failed, perhaps
474
    # because cluster components such as clvmd are not running, with this set
475
    # to 1 an attempt will be made to use local file-based locking (type 1).
476
    # If this succeeds, only commands against local volume groups will proceed.
477
    # Volume Groups marked as clustered will be ignored.
478
    fallback_to_local_locking = 1
479
 
480
    # Local non-LV directory that holds file-based locks while commands are
481
    # in progress.  A directory like /tmp that may get wiped on reboot is OK.
482
    locking_dir = "/var/lock/lvm"
483
 
484
    # Whenever there are competing read-only and read-write access requests for
485
    # a volume group's metadata, instead of always granting the read-only
486
    # requests immediately, delay them to allow the read-write requests to be
487
    # serviced.  Without this setting, write access may be stalled by a high
488
    # volume of read-only requests.
489
    # NB. This option only affects locking_type = 1 viz. local file-based
490
    # locking.
491
    prioritise_write_locks = 1
492
 
493
    # Other entries can go here to allow you to load shared libraries
494
    # e.g. if support for LVM1 metadata was compiled as a shared library use
495
    #   format_libraries = "liblvm2format1.so"
496
    # Full pathnames can be given.
497
 
498
    # Search this directory first for shared libraries.
499
    #   library_dir = "/lib"
500
 
501
    # The external locking library to load if locking_type is set to 2.
502
    #   locking_library = "liblvm2clusterlock.so"
503
 
504
    # Treat any internal errors as fatal errors, aborting the process that
505
    # encountered the internal error. Please only enable for debugging.
506
    abort_on_internal_errors = 0
507
 
508
    # Check whether CRC is matching when parsed VG is used multiple times.
509
    # This is useful to catch unexpected internal cached volume group
510
    # structure modification. Please only enable for debugging.
511
    detect_internal_vg_cache_corruption = 0
512
 
513
    # If set to 1, no operations that change on-disk metadata will be permitted.
514
    # Additionally, read-only commands that encounter metadata in need of repair
515
    # will still be allowed to proceed exactly as if the repair had been
516
    # performed (except for the unchanged vg_seqno).
517
    # Inappropriate use could mess up your system, so seek advice first!
518
    metadata_read_only = 0
519
 
520
    # 'mirror_segtype_default' defines which segtype will be used when the
521
    # shorthand '-m' option is used for mirroring.  The possible options are:
522
    #
523
    # "mirror" - The original RAID1 implementation provided by LVM2/DM.  It is
524
    # 	         characterized by a flexible log solution (core, disk, mirrored)
525
    #		 and by the necessity to block I/O while reconfiguring in the
526
    #		 event of a failure.
527
    #
528
    #		 There is an inherent race in the dmeventd failure handling
529
    #		 logic with snapshots of devices using this type of RAID1 that
530
    #		 in the worst case could cause a deadlock.
531
    #		   Ref: https://bugzilla.redhat.com/show_bug.cgi?id=817130#c10
532
    #
533
    # "raid1"  - This implementation leverages MD's RAID1 personality through
534
    # 	       	 device-mapper.  It is characterized by a lack of log options.
535
    #		 (A log is always allocated for every device and they are placed
536
    #		 on the same device as the image - no separate devices are
537
    #		 required.)  This mirror implementation does not require I/O
538
    #		 to be blocked in the kernel in the event of a failure.
539
    #		 This mirror implementation is not cluster-aware and cannot be
540
    #		 used in a shared (active/active) fashion in a cluster.
541
    #
542
    # Specify the '--type <mirror|raid1>' option to override this default
543
    # setting.
544
    mirror_segtype_default = "mirror"
545
 
546
    # 'raid10_segtype_default' determines the segment types used by default
547
    # when the '--stripes/-i' and '--mirrors/-m' arguments are both specified
548
    # during the creation of a logical volume.
549
    # Possible settings include:
550
    #
551
    # "raid10" - This implementation leverages MD's RAID10 personality through
552
    #            device-mapper.
553
    #
554
    # "mirror" - LVM will layer the 'mirror' and 'stripe' segment types.  It
555
    #            will do this by creating a mirror on top of striped sub-LVs;
556
    #            effectively creating a RAID 0+1 array.  This is suboptimal
557
    #            in terms of providing redunancy and performance.  Changing to
558
    #            this setting is not advised.
559
    # Specify the '--type <raid10|mirror>' option to override this default
560
    # setting.
561
    raid10_segtype_default = "mirror"
562
 
563
    # The default format for displaying LV names in lvdisplay was changed
564
    # in version 2.02.89 to show the LV name and path separately.
565
    # Previously this was always shown as /dev/vgname/lvname even when that
566
    # was never a valid path in the /dev filesystem.
567
    # Set to 1 to reinstate the previous format.
568
    #
569
    # lvdisplay_shows_full_device_path = 0
570
 
571
    # Whether to use (trust) a running instance of lvmetad. If this is set to
572
    # 0, all commands fall back to the usual scanning mechanisms. When set to 1
573
    # *and* when lvmetad is running (automatically instantiated by making use of
574
    # systemd's socket-based service activation or run as an initscripts service
575
    # or run manually), the volume group metadata and PV state flags are obtained
576
    # from the lvmetad instance and no scanning is done by the individual
577
    # commands. In a setup with lvmetad, lvmetad udev rules *must* be set up for
578
    # LVM to work correctly. Without proper udev rules, all changes in block
579
    # device configuration will be *ignored* until a manual 'pvscan --cache'
580
    # is performed. These rules are installed by default.
581
    #
582
    # If lvmetad has been running while use_lvmetad was 0, it MUST be stopped
583
    # before changing use_lvmetad to 1 and started again afterwards.
584
    #
585
    # If using lvmetad, the volume activation is also switched to automatic
586
    # event-based mode. In this mode, the volumes are activated based on
587
    # incoming udev events that automatically inform lvmetad about new PVs
588
    # that appear in the system. Once the VG is complete (all the PVs are
589
    # present), it is auto-activated. The activation/auto_activation_volume_list
590
    # setting controls which volumes are auto-activated (all by default).
591
    use_lvmetad = 0
592
 
593
    # Full path of the utility called to check that a thin metadata device
594
    # is in a state that allows it to be used.
595
    # Each time a thin pool needs to be activated or after it is deactivated
596
    # this utility is executed. The activation will only proceed if the utility
597
    # has an exit status of 0.
598
    # Set to "" to skip this check.  (Not recommended.)
599
    # The thin tools are available as part of the device-mapper-persistent-data
600
    # package from https://github.com/jthornber/thin-provisioning-tools.
601
    #
602
    # thin_check_executable = "/usr/sbin/thin_check"
603
 
604
    # Array of string options passed with thin_check command. By default,
605
    # option "-q" is for quiet output.
606
    # With thin_check version 2.1 or newer you can add "--ignore-non-fatal-errors"
607
    # to let it pass through ignoreable errors and fix them later.
608
    #
609
    # thin_check_options = [ "-q" ]
610
 
611
    # Full path of the utility called to repair a thin metadata device
612
    # is in a state that allows it to be used.
613
    # Each time a thin pool needs repair this utility is executed.
614
    # See thin_check_executable how to obtain binaries.
615
    #
616
    # thin_repair_executable = "/usr/sbin/thin_repair"
617
 
618
    # Array of extra string options passed with thin_repair command.
619
    # thin_repair_options = [ "" ]
620
 
621
    # Full path of the utility called to dump thin metadata content.
622
    # See thin_check_executable how to obtain binaries.
623
    #
624
    # thin_dump_executable = "/usr/sbin/thin_dump"
625
 
626
    # If set, given features are not used by thin driver.
627
    # This can be helpful not just for testing, but i.e. allows to avoid
628
    # using problematic implementation of some thin feature.
629
    # Features:
630
    #   block_size
631
    #   discards
632
    #   discards_non_power_2
633
    #   external_origin
634
    #   metadata_resize
635
    #
636
    # thin_disabled_features = [ "discards", "block_size" ]
637
}
638
 
639
activation {
640
    # Set to 1 to perform internal checks on the operations issued to
641
    # libdevmapper.  Useful for debugging problems with activation.
642
    # Some of the checks may be expensive, so it's best to use this
643
    # only when there seems to be a problem.
644
    checks = 0
645
 
646
    # Set to 0 to disable udev synchronisation (if compiled into the binaries).
647
    # Processes will not wait for notification from udev.
648
    # They will continue irrespective of any possible udev processing
649
    # in the background.  You should only use this if udev is not running
650
    # or has rules that ignore the devices LVM2 creates.
651
    # The command line argument --nodevsync takes precedence over this setting.
652
    # If set to 1 when udev is not running, and there are LVM2 processes
653
    # waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
654
    udev_sync = 1
655
 
656
    # Set to 0 to disable the udev rules installed by LVM2 (if built with
657
    # --enable-udev_rules). LVM2 will then manage the /dev nodes and symlinks
658
    # for active logical volumes directly itself.
659
    # N.B. Manual intervention may be required if this setting is changed
660
    # while any logical volumes are active.
661
    udev_rules = 1
662
 
663
    # Set to 1 for LVM2 to verify operations performed by udev. This turns on
664
    # additional checks (and if necessary, repairs) on entries in the device
665
    # directory after udev has completed processing its events.
666
    # Useful for diagnosing problems with LVM2/udev interactions.
667
    verify_udev_operations = 0
668
 
669
    # If set to 1 and if deactivation of an LV fails, perhaps because
670
    # a process run from a quick udev rule temporarily opened the device,
671
    # retry the operation for a few seconds before failing.
672
    retry_deactivation = 1
673
 
674
    # How to fill in missing stripes if activating an incomplete volume.
675
    # Using "error" will make inaccessible parts of the device return
676
    # I/O errors on access.  You can instead use a device path, in which
677
    # case, that device will be used to in place of missing stripes.
678
    # But note that using anything other than "error" with mirrored
679
    # or snapshotted volumes is likely to result in data corruption.
680
    missing_stripe_filler = "error"
681
 
682
    # The linear target is an optimised version of the striped target
683
    # that only handles a single stripe.  Set this to 0 to disable this
684
    # optimisation and always use the striped target.
685
    use_linear_target = 1
686
 
687
    # How much stack (in KB) to reserve for use while devices suspended
688
    # Prior to version 2.02.89 this used to be set to 256KB
689
    reserved_stack = 64
690
 
691
    # How much memory (in KB) to reserve for use while devices suspended
692
    reserved_memory = 8192
693
 
694
    # Nice value used while devices suspended
695
    process_priority = -18
696
 
697
    # If volume_list is defined, each LV is only activated if there is a
698
    # match against the list.
699
    #
700
    #   "vgname" and "vgname/lvname" are matched exactly.
701
    #   "@tag" matches any tag set in the LV or VG.
702
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
703
    #
704
    # If any host tags exist but volume_list is not defined, a default
705
    # single-entry list containing "@*" is assumed.
706
    #
707
    # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
708
 
709
    # If auto_activation_volume_list is defined, each LV that is to be
710
    # activated with the autoactivation option (--activate ay/-a ay) is
711
    # first checked against the list. There are two scenarios in which
712
    # the autoactivation option is used:
713
    #
714
    #   - automatic activation of volumes based on incoming PVs. If all the
715
    #     PVs making up a VG are present in the system, the autoactivation
716
    #     is triggered. This requires lvmetad (global/use_lvmetad=1) and udev
717
    #     to be running. In this case, "pvscan --cache -aay" is called
718
    #     automatically without any user intervention while processing
719
    #     udev events. Please, make sure you define auto_activation_volume_list
720
    #     properly so only the volumes you want and expect are autoactivated.
721
    #
722
    #   - direct activation on command line with the autoactivation option.
723
    #     In this case, the user calls "vgchange --activate ay/-a ay" or
724
    #     "lvchange --activate ay/-a ay" directly.
725
    #
726
    # By default, the auto_activation_volume_list is not defined and all
727
    # volumes will be activated either automatically or by using --activate ay/-a ay.
728
    #
729
    # N.B. The "activation/volume_list" is still honoured in all cases so even
730
    # if the VG/LV passes the auto_activation_volume_list, it still needs to
731
    # pass the volume_list for it to be activated in the end.
732
 
733
    # If auto_activation_volume_list is defined but empty, no volumes will be
734
    # activated automatically and --activate ay/-a ay will do nothing.
735
    #
736
    # auto_activation_volume_list = []
737
 
738
    # If auto_activation_volume_list is defined and it's not empty, only matching
739
    # volumes will be activated either automatically or by using --activate ay/-a ay.
740
    #
741
    #   "vgname" and "vgname/lvname" are matched exactly.
742
    #   "@tag" matches any tag set in the LV or VG.
743
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
744
    #
745
    # auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
746
 
747
    # If read_only_volume_list is defined, each LV that is to be activated
748
    # is checked against the list, and if it matches, it as activated
749
    # in read-only mode.  (This overrides '--permission rw' stored in the
750
    # metadata.)
751
    #
752
    #   "vgname" and "vgname/lvname" are matched exactly.
753
    #   "@tag" matches any tag set in the LV or VG.
754
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
755
    #
756
    # read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
757
 
758
    # Each LV can have an 'activation skip' flag stored persistently against it.
759
    # During activation, this flag is used to decide whether such an LV is skipped.
760
    # The 'activation skip' flag can be set during LV creation and by default it
761
    # is automatically set for thin snapshot LVs. The 'auto_set_activation_skip'
762
    # enables or disables this automatic setting of the flag while LVs are created.
763
    # auto_set_activation_skip = 1
764
 
765
    # For RAID or 'mirror' segment types, 'raid_region_size' is the
766
    # size (in kiB) of each:
767
    # - synchronization operation when initializing
768
    # - each copy operation when performing a 'pvmove' (using 'mirror' segtype)
769
    # This setting has replaced 'mirror_region_size' since version 2.02.99
770
    raid_region_size = 512
771
 
772
    # Setting to use when there is no readahead value stored in the metadata.
773
    #
774
    # "none" - Disable readahead.
775
    # "auto" - Use default value chosen by kernel.
776
    readahead = "auto"
777
 
778
    # 'raid_fault_policy' defines how a device failure in a RAID logical
779
    # volume is handled.  This includes logical volumes that have the following
780
    # segment types: raid1, raid4, raid5*, and raid6*.
781
    #
782
    # In the event of a failure, the following policies will determine what
783
    # actions are performed during the automated response to failures (when
784
    # dmeventd is monitoring the RAID logical volume) and when 'lvconvert' is
785
    # called manually with the options '--repair' and '--use-policies'.
786
    #
787
    # "warn"	- Use the system log to warn the user that a device in the RAID
788
    # 		  logical volume has failed.  It is left to the user to run
789
    #		  'lvconvert --repair' manually to remove or replace the failed
790
    #		  device.  As long as the number of failed devices does not
791
    #		  exceed the redundancy of the logical volume (1 device for
792
    #		  raid4/5, 2 for raid6, etc) the logical volume will remain
793
    #		  usable.
794
    #
795
    # "allocate" - Attempt to use any extra physical volumes in the volume
796
    # 		  group as spares and replace faulty devices.
797
    #
798
    raid_fault_policy = "warn"
799
 
800
    # 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
801
    # how a device failure affecting a mirror (of "mirror" segment type) is
802
    # handled.  A mirror is composed of mirror images (copies) and a log.
803
    # A disk log ensures that a mirror does not need to be re-synced
804
    # (all copies made the same) every time a machine reboots or crashes.
805
    #
806
    # In the event of a failure, the specified policy will be used to determine
807
    # what happens. This applies to automatic repairs (when the mirror is being
808
    # monitored by dmeventd) and to manual lvconvert --repair when
809
    # --use-policies is given.
810
    #
811
    # "remove" - Simply remove the faulty device and run without it.  If
812
    #            the log device fails, the mirror would convert to using
813
    #            an in-memory log.  This means the mirror will not
814
    #            remember its sync status across crashes/reboots and
815
    #            the entire mirror will be re-synced.  If a
816
    #            mirror image fails, the mirror will convert to a
817
    #            non-mirrored device if there is only one remaining good
818
    #            copy.
819
    #
820
    # "allocate" - Remove the faulty device and try to allocate space on
821
    #            a new device to be a replacement for the failed device.
822
    #            Using this policy for the log is fast and maintains the
823
    #            ability to remember sync state through crashes/reboots.
824
    #            Using this policy for a mirror device is slow, as it
825
    #            requires the mirror to resynchronize the devices, but it
826
    #            will preserve the mirror characteristic of the device.
827
    #            This policy acts like "remove" if no suitable device and
828
    #            space can be allocated for the replacement.
829
    #
830
    # "allocate_anywhere" - Not yet implemented. Useful to place the log device
831
    #            temporarily on same physical volume as one of the mirror
832
    #            images. This policy is not recommended for mirror devices
833
    #            since it would break the redundant nature of the mirror. This
834
    #            policy acts like "remove" if no suitable device and space can
835
    #            be allocated for the replacement.
836
 
837
    mirror_log_fault_policy = "allocate"
838
    mirror_image_fault_policy = "remove"
839
 
840
    # 'snapshot_autoextend_threshold' and 'snapshot_autoextend_percent' define
841
    # how to handle automatic snapshot extension. The former defines when the
842
    # snapshot should be extended: when its space usage exceeds this many
843
    # percent. The latter defines how much extra space should be allocated for
844
    # the snapshot, in percent of its current size.
845
    #
846
    # For example, if you set snapshot_autoextend_threshold to 70 and
847
    # snapshot_autoextend_percent to 20, whenever a snapshot exceeds 70% usage,
848
    # it will be extended by another 20%. For a 1G snapshot, using up 700M will
849
    # trigger a resize to 1.2G. When the usage exceeds 840M, the snapshot will
850
    # be extended to 1.44G, and so on.
851
    #
852
    # Setting snapshot_autoextend_threshold to 100 disables automatic
853
    # extensions. The minimum value is 50 (A setting below 50 will be treated
854
    # as 50).
855
 
856
    snapshot_autoextend_threshold = 100
857
    snapshot_autoextend_percent = 20
858
 
859
    # 'thin_pool_autoextend_threshold' and 'thin_pool_autoextend_percent' define
860
    # how to handle automatic pool extension. The former defines when the
861
    # pool should be extended: when its space usage exceeds this many
862
    # percent. The latter defines how much extra space should be allocated for
863
    # the pool, in percent of its current size.
864
    #
865
    # For example, if you set thin_pool_autoextend_threshold to 70 and
866
    # thin_pool_autoextend_percent to 20, whenever a pool exceeds 70% usage,
867
    # it will be extended by another 20%. For a 1G pool, using up 700M will
868
    # trigger a resize to 1.2G. When the usage exceeds 840M, the pool will
869
    # be extended to 1.44G, and so on.
870
    #
871
    # Setting thin_pool_autoextend_threshold to 100 disables automatic
872
    # extensions. The minimum value is 50 (A setting below 50 will be treated
873
    # as 50).
874
 
875
    thin_pool_autoextend_threshold = 100
876
    thin_pool_autoextend_percent = 20
877
 
878
    # While activating devices, I/O to devices being (re)configured is
879
    # suspended, and as a precaution against deadlocks, LVM2 needs to pin
880
    # any memory it is using so it is not paged out.  Groups of pages that
881
    # are known not to be accessed during activation need not be pinned
882
    # into memory.  Each string listed in this setting is compared against
883
    # each line in /proc/self/maps, and the pages corresponding to any
884
    # lines that match are not pinned.  On some systems locale-archive was
885
    # found to make up over 80% of the memory used by the process.
886
    # mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]
887
 
888
    # Set to 1 to revert to the default behaviour prior to version 2.02.62
889
    # which used mlockall() to pin the whole process's memory while activating
890
    # devices.
891
    use_mlockall = 0
892
 
893
    # Monitoring is enabled by default when activating logical volumes.
894
    # Set to 0 to disable monitoring or use the --ignoremonitoring option.
895
    monitoring = 1
896
 
897
    # When pvmove or lvconvert must wait for the kernel to finish
898
    # synchronising or merging data, they check and report progress
899
    # at intervals of this number of seconds.  The default is 15 seconds.
900
    # If this is set to 0 and there is only one thing to wait for, there
901
    # are no progress reports, but the process is awoken immediately the
902
    # operation is complete.
903
    polling_interval = 15
904
}
905
 
906
 
907
####################
908
# Advanced section #
909
####################
910
 
911
# Metadata settings
912
#
913
# metadata {
914
    # Default number of copies of metadata to hold on each PV.  0, 1 or 2.
915
    # You might want to override it from the command line with 0
916
    # when running pvcreate on new PVs which are to be added to large VGs.
917
 
918
    # pvmetadatacopies = 1
919
 
920
    # Default number of copies of metadata to maintain for each VG.
921
    # If set to a non-zero value, LVM automatically chooses which of
922
    # the available metadata areas to use to achieve the requested
923
    # number of copies of the VG metadata.  If you set a value larger
924
    # than the the total number of metadata areas available then
925
    # metadata is stored in them all.
926
    # The default value of 0 ("unmanaged") disables this automatic
927
    # management and allows you to control which metadata areas
928
    # are used at the individual PV level using 'pvchange
929
    # --metadataignore y/n'.
930
 
931
    # vgmetadatacopies = 0
932
 
933
    # Approximate default size of on-disk metadata areas in sectors.
934
    # You should increase this if you have large volume groups or
935
    # you want to retain a large on-disk history of your metadata changes.
936
 
937
    # pvmetadatasize = 255
938
 
939
    # List of directories holding live copies of text format metadata.
940
    # These directories must not be on logical volumes!
941
    # It's possible to use LVM2 with a couple of directories here,
942
    # preferably on different (non-LV) filesystems, and with no other
943
    # on-disk metadata (pvmetadatacopies = 0). Or this can be in
944
    # addition to on-disk metadata areas.
945
    # The feature was originally added to simplify testing and is not
946
    # supported under low memory situations - the machine could lock up.
947
    #
948
    # Never edit any files in these directories by hand unless you
949
    # you are absolutely sure you know what you are doing! Use
950
    # the supplied toolset to make changes (e.g. vgcfgrestore).
951
 
952
    # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
953
#}
954
 
955
# Event daemon
956
#
957
dmeventd {
958
    # mirror_library is the library used when monitoring a mirror device.
959
    #
960
    # "libdevmapper-event-lvm2mirror.so" attempts to recover from
961
    # failures.  It removes failed devices from a volume group and
962
    # reconfigures a mirror as necessary. If no mirror library is
963
    # provided, mirrors are not monitored through dmeventd.
964
 
965
    mirror_library = "libdevmapper-event-lvm2mirror.so"
966
 
967
    # snapshot_library is the library used when monitoring a snapshot device.
968
    #
969
    # "libdevmapper-event-lvm2snapshot.so" monitors the filling of
970
    # snapshots and emits a warning through syslog when the use of
971
    # the snapshot exceeds 80%. The warning is repeated when 85%, 90% and
972
    # 95% of the snapshot is filled.
973
 
974
    snapshot_library = "libdevmapper-event-lvm2snapshot.so"
975
 
976
    # thin_library is the library used when monitoring a thin device.
977
    #
978
    # "libdevmapper-event-lvm2thin.so" monitors the filling of
979
    # pool and emits a warning through syslog when the use of
980
    # the pool exceeds 80%. The warning is repeated when 85%, 90% and
981
    # 95% of the pool is filled.
982
 
983
    thin_library = "libdevmapper-event-lvm2thin.so"
984
 
985
    # Full path of the dmeventd binary.
986
    #
987
    # executable = "/sbin/dmeventd"
988
}