diff --git a/blivet/__init__.py b/blivet/__init__.py index 9a9131a..2c2c37d 100644 --- a/blivet/__init__.py +++ b/blivet/__init__.py @@ -539,6 +539,18 @@ class Blivet(object): return lvs @property + def thinlvs(self): + thin = self.devicetree.getDevicesByType("lvmthinlv") + thin.sort(key=lambda d: d.name) + return thin + + @property + def thinpools(self): + pools = self.devicetree.getDevicesByType("lvmthinpool") + pools.sort(key=lambda d: d.name) + return pools + + @property def pvs(self): """ A list of the LVM Physical Volumes in the device tree. @@ -1071,6 +1083,8 @@ class Blivet(object): def newLV(self, *args, **kwargs): """ Return a new LVMLogicalVolumeDevice instance. """ + thin_volume = kwargs.pop("thin_volume", False) + thin_pool = kwargs.pop("thin_pool", False) vg = kwargs.get("parents", [None])[0] mountpoint = kwargs.pop("mountpoint", None) if kwargs.has_key("fmt_type"): @@ -1094,14 +1108,27 @@ class Blivet(object): swap = True else: swap = False + + prefix = "" + if thin_pool: + prefix = "pool" + name = self.suggestDeviceName(parent=vg, swap=swap, - mountpoint=mountpoint) + mountpoint=mountpoint, + prefix=prefix) if "%s-%s" % (vg.name, name) in self.names: raise ValueError("name already in use") - return LVMLogicalVolumeDevice(name, *args, **kwargs) + if thin_pool: + device_class = LVMThinPoolDevice + elif thin_volume: + device_class = LVMThinLogicalVolumeDevice + else: + device_class = LVMLogicalVolumeDevice + + return device_class(name, *args, **kwargs) def newBTRFS(self, *args, **kwargs): """ Return a new BTRFSVolumeDevice or BRFSSubVolumeDevice. """ diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py index 1b118df..f236a39 100644 --- a/blivet/devicefactory.py +++ b/blivet/devicefactory.py @@ -49,10 +49,12 @@ DEVICE_TYPE_MD = 1 DEVICE_TYPE_PARTITION = 2 DEVICE_TYPE_BTRFS = 3 DEVICE_TYPE_DISK = 4 +DEVICE_TYPE_LVM_THINP = 5 def get_device_type(device): device_types = {"partition": DEVICE_TYPE_PARTITION, "lvmlv": DEVICE_TYPE_LVM, + "lvmthinlv": DEVICE_TYPE_LVM_THINP, "btrfs subvolume": DEVICE_TYPE_BTRFS, "btrfs volume": DEVICE_TYPE_BTRFS, "mdarray": DEVICE_TYPE_MD} @@ -95,6 +97,7 @@ def get_device_factory(blivet, device_type, size, **kwargs): DEVICE_TYPE_BTRFS: BTRFSFactory, DEVICE_TYPE_PARTITION: PartitionFactory, DEVICE_TYPE_MD: MDFactory, + DEVICE_TYPE_LVM_THINP: LVMThinPFactory, DEVICE_TYPE_DISK: DeviceFactory} factory_class = class_table[device_type] @@ -430,6 +433,10 @@ class DeviceFactory(object): """ Type-specific container device instantiation. """ pass + def _check_container_size(self): + """ Raise an exception if the container cannot hold its devices. """ + pass + def _reconfigure_container(self): """ Reconfigure a defined container required by this factory device. """ if getattr(self.container, "exists", False): @@ -438,6 +445,10 @@ class DeviceFactory(object): self._set_container_members() self._set_container_raid_level() + # check that the container is still large enough to contain whatever + # other devices it previously contained + self._check_container_size() + def _set_container_members(self): if not self.child_factory: return @@ -692,6 +703,7 @@ class DeviceFactory(object): self._configure() except Exception as e: log.error("failed to configure device factory: %s" % e) + raise if self.parent_factory is None: # only do the backup/restore error handling at the top-level self._revert_devicetree() @@ -1040,7 +1052,7 @@ class LVMFactory(DeviceFactory): size_set_class = TotalSizeSet def __init__(self, *args, **kwargs): - super(LVMFactory, self).__init__(self, *args, **kwargs) + super(LVMFactory, self).__init__(*args, **kwargs) if self.container_raid_level: self.child_factory_class = MDFactory @@ -1148,6 +1160,12 @@ class LVMFactory(DeviceFactory): def _get_new_container(self, *args, **kwargs): return self.storage.newVG(*args, **kwargs) + def _check_container_size(self): + """ Raise an exception if the container cannot hold its devices. """ + if self.container and self.container.freeSpace < 0: + raise DeviceFactoryError("container changes impossible due to " + "the devices it already contains") + # # methods to configure the factory's device # @@ -1218,6 +1236,166 @@ class LVMFactory(DeviceFactory): super(LVMFactory, self)._configure() +class LVMThinPFactory(LVMFactory): + """ Factory for creating LVM using thin provisioning. + + This class will be very similar to LVMFactory except that there are two + layers of container: vg and thin pool (lv). We could make a separate + factory class for creating and managing the thin pool, but we haven't + used a separate factory for any of the other classes' containers. + + pv(s) + vg + pool + thinlv(s) + + This is problematic in that there are two containers in this stack: + the vg and thin pool. + + The thin pool does not need to be large enough to contain all of the + thin lvs, so that check/adjust piece must be overridden/skipped here. + + XXX We aren't going to allow overcommitting initially, so that'll + simplify things somewhat. That means we can manage the thin pool + size automatically. We will need to handle overcommit in + existing thinp setups in anaconda's UI. + + Because of the argument-passing madness that would ensue from being able + to pass specs for two separate containers, the initial version of this + class will only support auto-sized pools. + + Also, the initial version will only support one thin pool per vg. + + In summary: + + - one thin pool per vg + - pools are auto-sized by anaconda/blivet + - thinp setups created by the installer will not overcommit + + Where to manage the pool: + + - the pool will need to be adjusted on device removal, which means + pool management must not be hidden in device management routines + """ + def __init__(self, *args, **kwargs): + # pool name is for identification -- not renaming + self.pool_name = kwargs.pop("pool_name", None) + super(LVMThinPFactory, self).__init__(*args, **kwargs) + + self.pool = None + + # + # methods related to device size and disk space requirements + # + def _get_device_size(self): + # calculate device size based on space in the pool + pool_size = self.pool.size + pool_used = sum(self.container.align(l.size, roundup=True) + for l in self.pool.lvs) + free = pool_size - pool_used # no padding/correction? + size = self.size + if free < size: + log.info("adjusting size from %.2f to %.2f so it fits " + "in pool %s" % (size, free, self.pool.name)) + return size + + def _get_device_space(self): + space = super(LVMThinPFactory, self)._get_device_space() + log.debug("calculated total disk space prior to padding: %s" % space) + # this is a large pad for pool metadata (space used in the vg) + space *= 1.25 + space += 10 * len(self.disks) + log.debug("total disk space needed: %s" % space) + return space + + @property + def pool_list(self): + return self.storage.thinpools + + def get_pool(self): + if not self.container: + return None + + # We're looking for a new pool in our vg to use. If there aren't any, + # we're using one of the existing pools. Would it be better to always + # create a new pool to allocate new devices from? Probably not, since + # that would prevent users from setting up custom pools on tty2. + pool = None + pools = [p for p in self.pool_list if p.vg == self.container] + if pools: + new_pools = [p for p in pools if not p.exists] + if new_pools: + pool = new_pools[0] + else: + pool = pools[0] + + return pool + + def _get_new_pool(self, *args, **kwargs): + kwargs["thin_pool"] = True + return super(LVMThinPFactory, self)._get_new_device(*args, **kwargs) + + def _get_pool_size(self): + size = self.size + free = self.container.freeSpace + if self.device: + free += self.raw_device.size + # THINP + if free < size: + log.info("adjusting size from %.2f to %.2f so it fits " + "in container %s" % (size, free, self.container.name)) + size = free + + corrected_size = size * 0.8 + log.info("adjusting size from %.2f to %.2f to account for thin pool " + "metadata" % (size, corrected_size)) + size = corrected_size + + return size + + def _set_pool_size(self): + new_size = self._get_pool_size() + self.pool.size = new_size + + def _reconfigure_pool(self): + """ Adjust the pool according to the set of devices it will contain. """ + self._set_pool_size() + + def _create_pool(self): + """ Create a pool large enough to contain the new device. """ + if self.size == 0: + return + + size = self._get_pool_size() + self.pool = self._get_new_pool(size=size, parents=[self.container]) + self.storage.createDevice(self.pool) + + # + # methods to configure the factory's container (both vg and pool) + # + def _reconfigure_container(self): + """ Reconfigure a defined container required by this factory device. """ + super(LVMThinPFactory, self)._reconfigure_container() + self.pool = self.get_pool() + if self.pool: + self._reconfigure_pool() + else: + self._create_pool() + + def _create_container(self): + """ Create the container device required by this factory device. """ + super(LVMThinPFactory, self)._create_container() + self._create_pool() + + # + # methods to configure the factory's device + # + def _get_new_device(self, *args, **kwargs): + """ Create and return the factory device as a StorageDevice. """ + kwargs["parents"] = [self.pool] # XXX consider passing vg and pool? + kwargs["thin_volume"] = True + return super(LVMThinPFactory, self)._get_new_device(*args, **kwargs) + class MDFactory(DeviceFactory): """ Factory for creating MD RAID devices. """ child_factory_class = PartitionSetFactory @@ -1275,6 +1453,12 @@ class BTRFSFactory(DeviceFactory): else: self.size_set_class = SameSizeSet + def _handle_no_size(self): + """ Set device size so that it grows to the largest size possible. """ + super(BTRFSFactory, self)._handle_no_size() + if self.container and self.container.exists: + self.size = self.container.size + def _get_total_space(self): """ Return the total space needed for the specified container. """ size = 0 diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py index eaafad1..8385814 100644 --- a/blivet/devicelibs/lvm.py +++ b/blivet/devicelibs/lvm.py @@ -404,3 +404,35 @@ def lvdeactivate(vg_name, lv_name): except LVMError as msg: raise LVMError("lvdeactivate failed for %s: %s" % (lv_name, msg)) +def thinpoolcreate(vg_name, lv_name, size): + args = ["lvcreate", "--thinpool", "%s/%s" % (vg_name, lv_name), + "--size", "%dm" % size] + _getConfigArgs() + + try: + lvm(args) + except LVMError as msg: + raise LVMError("lvcreate failed for %s/%s: %s" % (vg_name, lv_name, msg)) + +def thinlvcreate(vg_name, pool_name, lv_name, size): + args = ["lvcreate", "--thinpool", "%s/%s" % (vg_name, pool_name), + "--virtualsize", "%dm" % size, "-n", lv_name] + \ + _getConfigArgs() + + try: + lvm(args) + except LVMError as msg: + raise LVMError("lvcreate failed for %s/%s: %s" % (vg_name, lv_name, msg)) + +def thinlvpoolname(vg_name, lv_name): + args = ["lvs", "--noheadings", "-o", "pool_lv"] + \ + _getConfigArgs(read_only_locking=True) + \ + ["%s/%s" % (vg_name, lv_name)] + + buf = util.capture_output(["lvm"] + args) + + try: + pool = buf.splitlines()[0].strip() + except IndexError: + pool = '' + + return pool diff --git a/blivet/devices.py b/blivet/devices.py index 2bb7289..a88e0dc 100644 --- a/blivet/devices.py +++ b/blivet/devices.py @@ -786,6 +786,7 @@ class StorageDevice(Device): try: self._create() except Exception as e: + raise raise DeviceCreateError(str(e), self.name) else: self._postCreate() @@ -2228,11 +2229,16 @@ class LVMVolumeGroupDevice(DMDevice): # verify we have the space, then add it # do not verify for growing vg (because of ks) - if not lv.exists and not self.growable and lv.size > self.freeSpace: + # FIXME: add a "isthin" property and/or "ispool"? + if not lv.exists and not self.growable and \ + not isinstance(lv, LVMThinLogicalVolumeDevice) and \ + lv.size > self.freeSpace: raise DeviceError("new lv is too large to fit in free space", self.name) log.debug("Adding %s/%dMB to %s" % (lv.name, lv.size, self.name)) self._lvs.append(lv) + if hasattr(lv, "pool") and lv.pool in self.lvs: + lv.pool._addLogVol(lv) def _removeLogVol(self, lv): """ Remove an LV from this VG. """ @@ -2240,6 +2246,8 @@ class LVMVolumeGroupDevice(DMDevice): raise ValueError("specified lv is not part of this vg") self._lvs.remove(lv) + if hasattr(lv, "pool") and lv.pool in self.lvs: + lv.pool._removeLogVol(lv) def _addPV(self, pv): """ Add a PV to this VG. """ @@ -2378,6 +2386,14 @@ class LVMVolumeGroupDevice(DMDevice): return self._lvs[:] # we don't want folks changing our list @property + def thinpools(self): + return [l for l in self._lvs if isinstance(l, LVMThinPoolDevice)] + + @property + def thinlvs(self): + return [l for l in self._lvs if isinstance(l, LVMThinLogicalVolumeDevice)] + + @property def complete(self): """Check if the vg has all its pvs in the system Return True if complete. @@ -2426,13 +2442,14 @@ class LVMLogicalVolumeDevice(DMDevice): percent -- percent of VG space to take """ - if isinstance(parents, list): - if len(parents) != 1: - raise ValueError("constructor requires a single LVMVolumeGroupDevice instance") - elif not isinstance(parents[0], LVMVolumeGroupDevice): + if self.__class__.__name__ == "LVMLogicalVolumeDevice": + if isinstance(parents, list): + if len(parents) != 1: + raise ValueError("constructor requires a single LVMVolumeGroupDevice instance") + elif not isinstance(parents[0], LVMVolumeGroupDevice): + raise ValueError("constructor requires a LVMVolumeGroupDevice instance") + elif not isinstance(parents, LVMVolumeGroupDevice): raise ValueError("constructor requires a LVMVolumeGroupDevice instance") - elif not isinstance(parents, LVMVolumeGroupDevice): - raise ValueError("constructor requires a LVMVolumeGroupDevice instance") DMDevice.__init__(self, name, size=size, format=format, sysfsPath=sysfsPath, parents=parents, exists=exists) @@ -2474,7 +2491,7 @@ class LVMLogicalVolumeDevice(DMDevice): raise SinglePhysicalVolumeError(self.singlePVerr) # here we go with the circular references - self.vg._addLogVol(self) + self.container._addLogVol(self) def __repr__(self): s = DMDevice.__repr__(self) @@ -2533,6 +2550,11 @@ class LVMLogicalVolumeDevice(DMDevice): return self.parents[0] @property + def container(self): + """ This Logical Volume's container (thin pool or vg). """ + return self.parents[0] + + @property def mapName(self): """ This device's device-mapper map name """ # Thank you lvm for this lovely hack. @@ -2625,15 +2647,6 @@ class LVMLogicalVolumeDevice(DMDevice): def resize(self): log_method_call(self, self.name, status=self.status) self._preDestroy() - - # Setup VG parents (in case they are dmraid partitions for example) - self.vg.setupParents(orig=True) - - if self.originalFormat.exists: - self.originalFormat.teardown() - if self.format.exists: - self.format.teardown() - udev_settle() lvm.lvresize(self.vg.name, self._name, self.size) @@ -2661,6 +2674,167 @@ class LVMLogicalVolumeDevice(DMDevice): return -1 return 0 +class LVMThinPoolDevice(LVMLogicalVolumeDevice): + """ An LVM Thin Pool """ + _type = "lvmthinpool" + _resizable = False + + def __init__(self, name, parents=None, size=None, uuid=None, + format=None, exists=False, sysfsPath='', + grow=None, maxsize=None, percent=None): + """ Create a LVMLogicalVolumeDevice instance. + + Arguments: + + name -- the device name (generally a device node's basename) + parents -- vg (LVMVolumeGroupDevice instance) + + Keyword Arguments: + + size -- the device's size (in MB) + uuid -- the device's UUID + sysfsPath -- sysfs device path + format -- a DeviceFormat instance + exists -- indicates whether this is an existing device + + For new (non-existent) LVs only: + + grow -- whether to grow this LV + maxsize -- maximum size for growable LV (in MB) + percent -- percent of VG space to take + + """ + # thin pool checks go here + + super(LVMThinPoolDevice, self).__init__(name, parents=parents, + size=size, uuid=uuid, + format=format, exists=exists, + sysfsPath=sysfsPath, grow=grow, + maxsize=maxsize, + percent=percent) + + self._lvs = [] + + def _addLogVol(self, lv): + """ Add an LV to this pool. """ + if lv in self._lvs: + raise ValueError("lv is already part of this vg") + + # TODO: add some checking to prevent overcommit? + + log.debug("Adding %s/%dMB to %s" % (lv.name, lv.size, self.name)) + self._lvs.append(lv) + + def _removeLogVol(self, lv): + """ Remove an LV from this pool. """ + if lv not in self._lvs: + raise ValueError("specified lv is not part of this vg") + + self._lvs.remove(lv) + + @property + def lvs(self): + """ A list of this pool's LVs """ + return self._lvs[:] # we don't want folks changing our list + + def _setSize(self, size): + """ + bs = self.vg.peSize * 20 + if size % bs != 0: + size = ((size / bs) + 1) * bs + log.debug("aligning new pool size to a multiple of %d (%d)" + % (bs, size)) + """ + + super(LVMThinPoolDevice, self)._setSize(size) + + size = property(StorageDevice._getSize, _setSize) + + @property + def vgSpaceUsed(self): + space = super(LVMThinPoolDevice, self).vgSpaceUsed + space += self.vg.peSize * len(self.lvs) + space += self.vg.align(10 * len(self.vg.pvs), roundup=True) + return space + + def _create(self): + """ Create the device. """ + log_method_call(self, self.name, status=self.status) + # TODO: chunk size, data/metadata split --> profile + lvm.thinpoolcreate(self.vg.name, self.lvname, self.size) + + @property + def isleaf(self): + return len(self._lvs) == 0 and super(LVMThinPoolDevice, self).isleaf + + +class LVMThinLogicalVolumeDevice(LVMLogicalVolumeDevice): + """ An LVM Thin Logical Volume """ + _type = "lvmthinlv" + _resizable = True + + def __init__(self, name, parents=None, size=None, uuid=None, + format=None, exists=False, sysfsPath='', singlePV=False, + grow=None, maxsize=None, percent=None): + """ Create a LVMThinLogicalVolumeDevice instance. + + Arguments: + + name -- the device name (generally a device node's basename) + parents -- thin pool (LVMThinPoolDevice instance) + + Keyword Arguments: + + size -- the device's size (in MB) + uuid -- the device's UUID + sysfsPath -- sysfs device path + format -- a DeviceFormat instance + exists -- indicates whether this is an existing device + + For new (non-existent) LVs only: + + grow -- whether to grow this LV + maxsize -- maximum size for growable LV (in MB) + percent -- percent of VG space to take + + """ + # thin pool checks go here + + # XXX Should the pool be the parent, or should the vg? + self.pool = parents[0] + parents = self.pool.parents + + super(LVMThinLogicalVolumeDevice, self).__init__(name, parents=parents, + size=size, uuid=uuid, + format=format, exists=exists, + sysfsPath=sysfsPath, grow=grow, + maxsize=maxsize, + percent=percent) + + def dependsOn(self, dep): + """ Return True if this device depends on dep. """ + return (self.pool == dep or Device.dependsOn(self, dep)) + + @property + def vgSpaceUsed(self): + # FIXME: at least account for used space in existing vols + #return self.vg.align(1, roundup=True) + return 0 + + def _setSize(self, size): + size = self.vg.align(util.numeric_type(size)) + self._size = size + self.targetSize = size + + size = property(StorageDevice._getSize, _setSize) + + def _create(self): + """ Create the device. """ + log_method_call(self, self.name, status=self.status) + lvm.thinlvcreate(self.vg.name, self.pool.lvname, self.lvname, + self.size) + + class MDRaidArrayDevice(StorageDevice): """ An mdraid (Linux RAID) device. """ _type = "mdarray" diff --git a/blivet/devicetree.py b/blivet/devicetree.py index 691f1b2..c0edef2 100644 --- a/blivet/devicetree.py +++ b/blivet/devicetree.py @@ -1211,6 +1211,10 @@ class DeviceTree(object): return -1 elif a[0] in snapshot_chars and b[0] not in snapshot_chars: return 1 + elif a[0] == 't' and b[0] == 'V': + return -1 + elif a[0] == 'V' and b[0] == 't': + return 1 else: return 0 @@ -1220,7 +1224,9 @@ class DeviceTree(object): indices.sort(key=lambda i: lv_attr[i], cmp=lv_attr_cmp) mirrors = {} for index in indices: + lv_class = LVMLogicalVolumeDevice lv_name = lv_names[index] + lv_parents = [vg_device] name = "%s-%s" % (vg_name, lv_name) if lv_attr[index][0] in 'Ss': log.info("found lvm snapshot volume '%s'" % name) @@ -1265,16 +1271,25 @@ class DeviceTree(object): mirrors[name] = {"stripes": 0, "log": 0} mirrors[name]["log"] = lv_sizes[index] + elif lv_attr[index][0] == 't': + # thin pool + lv_class = LVMThinPoolDevice + elif lv_attr[index][0] == 'V': + # thin volume + pool_name = devicelibs.lvm.thinlvpoolname(vg_name, lv_name) + pool_device = self.getDeviceByName("%s-%s" % (vg_name, + pool_name)) + if pool_device is None: + raise DeviceTreeError("failed to look up thin pool") + lv_class = LVMThinLogicalVolumeDevice + lv_parents = [pool_device] lv_dev = self.getDeviceByUuid(lv_uuids[index]) if lv_dev is None: lv_uuid = lv_uuids[index] lv_size = lv_sizes[index] - lv_device = LVMLogicalVolumeDevice(lv_name, - vg_device, - uuid=lv_uuid, - size=lv_size, - exists=True) + lv_device = lv_class(lv_name, parents=lv_parents, + uuid=lv_uuid, size=lv_size, exists=True) self._addDevice(lv_device) if flags.installer_mode: lv_device.setup() diff --git a/blivet/partitioning.py b/blivet/partitioning.py index 8660969..f1aee9c 100644 --- a/blivet/partitioning.py +++ b/blivet/partitioning.py @@ -66,7 +66,7 @@ def _scheduleImplicitPartitions(storage, disks): devs = [] # only schedule the partitions if either lvm or btrfs autopart was chosen - if storage.autoPartType not in (AUTOPART_TYPE_LVM, AUTOPART_TYPE_BTRFS): + if storage.autoPartType == AUTOPART_TYPE_PLAIN: return devs for disk in disks: @@ -77,7 +77,7 @@ def _scheduleImplicitPartitions(storage, disks): "escrow_cert": storage.autoPartEscrowCert, "add_backup_passphrase": storage.autoPartAddBackupPassphrase} else: - if storage.autoPartType == AUTOPART_TYPE_LVM: + if storage.autoPartType in [AUTOPART_TYPE_LVM, AUTOPART_TYPE_LVM_THINP]: fmt_type = "lvmpv" else: fmt_type = "btrfs" @@ -122,8 +122,10 @@ def _schedulePartitions(storage, disks): # First pass is for partitions only. We'll do LVs later. # for request in storage.autoPartitionRequests: - if (request.lv and storage.autoPartType == AUTOPART_TYPE_LVM) or \ - (request.btr and storage.autoPartType == AUTOPART_TYPE_BTRFS): + if ((request.lv and + storage.autoPartType in [AUTOPART_TYPE_LVM, + AUTOPART_TYPE_LVM_THINP]) or + (request.btr and storage.autoPartType == AUTOPART_TYPE_BTRFS)): continue if request.requiredSpace and request.requiredSpace > free: @@ -199,7 +201,7 @@ def _scheduleVolumes(storage, devs): if not devs: return - if storage.autoPartType == AUTOPART_TYPE_LVM: + if storage.autoPartType in [AUTOPART_TYPE_LVM, AUTOPART_TYPE_LVM_THINP]: new_container = storage.newVG new_volume = storage.newLV format_name = "lvmpv" @@ -229,15 +231,22 @@ def _scheduleVolumes(storage, devs): # schedule them for creation. # # Second pass, for LVs only. + pool = None for request in storage.autoPartitionRequests: btr = storage.autoPartType == AUTOPART_TYPE_BTRFS and request.btr lv = storage.autoPartType == AUTOPART_TYPE_LVM and request.lv - - if not btr and not lv: + thinlv = (storage.autoPartType == AUTOPART_TYPE_LVM_THINP and + request.lv and request.thin) + if thinlv and pool is None: + # create a single thin pool in the vg + pool = storage.newLV(parents=[container], thin_pool=True, grow=True) + storage.createDevice(pool) + + if not btr and not lv and not thinlv: continue # required space isn't relevant on btrfs - if lv and \ + if (lv or thinlv) and \ request.requiredSpace and request.requiredSpace > container.size: continue @@ -250,11 +259,17 @@ def _scheduleVolumes(storage, devs): kwargs = {"mountpoint": request.mountpoint, "fmt_type": request.fstype} - if lv: - kwargs.update({"parents": [container], + if lv or thinlv: + if thinlv: + parents = [pool] + else: + parents = [container] + + kwargs.update({"parents": parents, "grow": request.grow, "maxsize": request.maxSize, "size": request.size, + "thin_volume": thinlv, "singlePV": request.singlePV}) else: kwargs.update({"parents": [container], @@ -1493,6 +1508,10 @@ class VGChunk(Chunk): self.vg = vg self.path = vg.path usable_extents = vg.extents - (vg.reservedSpace / vg.peSize) + if vg.thinpools: + # leave space for thin pool metadata + usable_extents = int(usable_extents * 0.8) + super(VGChunk, self).__init__(usable_extents, requests=requests) def addRequest(self, req): @@ -1544,6 +1563,28 @@ class VGChunk(Chunk): super(VGChunk, self).growRequests() +class ThinPoolChunk(VGChunk): + """ A free region in an LVM thin pool from which LVs will be allocated """ + def __init__(self, pool, requests=None): + """ Create a ThinPoolChunk instance. + + Arguments: + + pool -- an LVMThinPoolDevice within which this chunk resides + + + Keyword Arguments: + + requests -- list of Request instances allocated from this chunk + + """ + vg = pool.vg + self.vg = vg # only used for align, &c + self.path = pool.path + usable_extents = (pool.size / vg.peSize) * 0.8 + # Skip VGChunk's constructor. + super(VGChunk, self).__init__(usable_extents, requests=requests) + def getDiskChunks(disk, partitions, free): """ Return a list of Chunk instances representing a disk. @@ -1921,7 +1962,16 @@ def lvCompare(lv1, lv2): return ret def growLVM(storage): - """ Grow LVs according to the sizes of the PVs. """ + """ Grow LVs according to the sizes of the PVs. + + Strategy for growth involving thin pools: + - Applies to device factory class as well. + - Overcommit is not allowed. + - Pool lv's base size includes sizes of thin lvs within it. + - Pool is grown along with other non-thin lvs. + - Thin lvs within each pool are grown separately using the + ThinPoolChunk class. + """ for vg in storage.vgs: total_free = vg.freeSpace if total_free < 0: @@ -1935,15 +1985,34 @@ def growLVM(storage): log.debug("vg %s: %dMB free ; lvs: %s" % (vg.name, total_free, [l.lvname for l in vg.lvs])) - chunk = VGChunk(vg, requests=[LVRequest(l) for l in vg.lvs]) - chunk.growRequests() + # don't include thin lvs in the vg's growth calculation + fatlvs = [lv for lv in vg.lvs if lv not in vg.thinlvs] + requests = [] + for lv in fatlvs: + if lv in vg.thinpools: + # make sure the pool's base size is at least the sum of its lvs' + base = sum(l.size + vg.peSize for l in lv.lvs) + lv.req_size = max(lv.req_size, base) + + def apply_chunk_growth(chunk): + # grow the lvs by the amounts the VGChunk calculated + for req in chunk.requests: + if not req.device.req_grow: + continue - # now grow the lvs by the amounts we've calculated above - for req in chunk.requests: - if not req.device.req_grow: - continue + # Base is pe, which means potentially rounded up by as much as + # pesize-1. As a result, you can't just add the growth to the + # initial size. + req.device.size = chunk.lengthToSize(req.base + req.growth) - # Base is in pe, which means potentially rounded up by as much as - # pesize-1. As a result, you can't just add the growth to the - # initial size. - req.device.size = chunk.lengthToSize(req.base + req.growth) + # grow regular lvs + chunk = VGChunk(vg, requests=[LVRequest(l) for l in fatlvs]) + chunk.growRequests() + apply_chunk_growth(chunk) + + # now, grow thin lv requests within their respective pools + for pool in vg.thinpools: + requests = [LVRequest(l) for l in pool.lvs] + thin_chunk = ThinPoolChunk(pool, requests) + thin_chunk.growRequests() + apply_chunk_growth(thin_chunk) diff --git a/blivet/partspec.py b/blivet/partspec.py index 97cb33e..a8ebb6f 100644 --- a/blivet/partspec.py +++ b/blivet/partspec.py @@ -21,8 +21,8 @@ class PartSpec(object): def __init__(self, mountpoint=None, fstype=None, size=None, maxSize=None, - grow=False, btr=False, lv=False, singlePV=False, weight=0, - requiredSpace=0, encrypted=False): + grow=False, btr=False, lv=False, singlePV=False, thin=False, + weight=0, requiredSpace=0, encrypted=False): """ Create a new storage specification. These are used to specify the default partitioning layout as an object before we have the storage system up and running. The attributes are obvious @@ -60,6 +60,7 @@ class PartSpec(object): self.lv = lv self.btr = btr self.singlePV = singlePV + self.thin = thin self.weight = weight self.requiredSpace = requiredSpace self.encrypted = encrypted