From 55a2e8ddacf1ed21fbc320d8c6ef97baf7f623d3 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 25 Apr 2022 13:50:11 +0200 Subject: [PATCH] disk: add function for resizing Entities based on dirs New function that ensures that a partition can hold the total sum of all the required sizes of specific directories on the partition. The function sums the required directory sizes grouped by their mountpoint and then resizes the entity path of that Mountable. --- internal/disk/disk_test.go | 110 +++++++++++++++++++++++++++++++ internal/disk/partition_table.go | 33 ++++++++++ 2 files changed, 143 insertions(+) diff --git a/internal/disk/disk_test.go b/internal/disk/disk_test.go index 9b5fa451a..f3249098d 100644 --- a/internal/disk/disk_test.go +++ b/internal/disk/disk_test.go @@ -573,3 +573,113 @@ func TestFindDirectoryPartition(t *testing.T) { assert.Nil(pt.findDirectoryEntityPath("/var")) } } + +func TestEnsureDirectorySizes(t *testing.T) { + assert := assert.New(t) + + GiB := 1024 * 1024 * 1024 + varSizes := map[string]uint64{ + "/var/lib": uint64(3 * GiB), + "/var/cache": uint64(2 * GiB), + "/var/log/journal": uint64(2 * GiB), + } + + varAndHomeSizes := map[string]uint64{ + "/var/lib": uint64(3 * GiB), + "/var/cache": uint64(2 * GiB), + "/var/log/journal": uint64(2 * GiB), + "/home/user/data": uint64(10 * GiB), + } + + { + pt := testPartitionTables["plain"] + pt = *pt.Clone().(*PartitionTable) // don't modify the original test data + + { + // make sure we have the correct volume + // guard against changes in the test pt + rootPart := pt.Partitions[3] + rootPayload := rootPart.Payload.(*Filesystem) + + assert.Equal("/", rootPayload.Mountpoint) + assert.Equal(uint64(0), rootPart.Size) + } + + { + // add requirements for /var subdirs that are > 5 GiB + pt.EnsureDirectorySizes(varSizes) + rootPart := pt.Partitions[3] + assert.Equal(uint64(7*GiB), rootPart.Size) + + // invalid + assert.Panics(func() { pt.EnsureDirectorySizes(map[string]uint64{"invalid": uint64(300)}) }) + } + } + + { + pt := testPartitionTables["luks+lvm"] + pt = *pt.Clone().(*PartitionTable) // don't modify the original test data + + { + // make sure we have the correct volume + // guard against changes in the test pt + rootPart := pt.Partitions[3] + rootLUKS := rootPart.Payload.(*LUKSContainer) + rootVG := rootLUKS.Payload.(*LVMVolumeGroup) + rootLV := rootVG.LogicalVolumes[0] + rootFS := rootLV.Payload.(*Filesystem) + homeLV := rootVG.LogicalVolumes[1] + homeFS := homeLV.Payload.(*Filesystem) + + assert.Equal(uint64(5*GiB), rootPart.Size) + assert.Equal("/", rootFS.Mountpoint) + assert.Equal(uint64(2*GiB), rootLV.Size) + assert.Equal("/home", homeFS.Mountpoint) + assert.Equal(uint64(2*GiB), homeLV.Size) + } + + { + // add requirements for /var subdirs that are > 5 GiB + pt.EnsureDirectorySizes(varAndHomeSizes) + rootPart := pt.Partitions[3] + rootLUKS := rootPart.Payload.(*LUKSContainer) + rootVG := rootLUKS.Payload.(*LVMVolumeGroup) + rootLV := rootVG.LogicalVolumes[0] + homeLV := rootVG.LogicalVolumes[1] + assert.Equal(uint64(17*GiB)+rootVG.MetadataSize(), rootPart.Size) + assert.Equal(uint64(7*GiB), rootLV.Size) + assert.Equal(uint64(10*GiB), homeLV.Size) + + // invalid + assert.Panics(func() { pt.EnsureDirectorySizes(map[string]uint64{"invalid": uint64(300)}) }) + } + } + + { + pt := testPartitionTables["btrfs"] + pt = *pt.Clone().(*PartitionTable) // don't modify the original test data + + { + // make sure we have the correct volume + // guard against changes in the test pt + rootPart := pt.Partitions[3] + rootPayload := rootPart.Payload.(*Btrfs) + assert.Equal("/", rootPayload.Subvolumes[0].Mountpoint) + assert.Equal(uint64(0), rootPayload.Subvolumes[0].Size) + assert.Equal("/var", rootPayload.Subvolumes[1].Mountpoint) + assert.Equal(uint64(5*GiB), rootPayload.Subvolumes[1].Size) + } + + { + // add requirements for /var subdirs that are > 5 GiB + pt.EnsureDirectorySizes(varSizes) + rootPart := pt.Partitions[3] + rootPayload := rootPart.Payload.(*Btrfs) + assert.Equal(uint64(7*GiB), rootPayload.Subvolumes[1].Size) + + // invalid + assert.Panics(func() { pt.EnsureDirectorySizes(map[string]uint64{"invalid": uint64(300)}) }) + } + } + +} diff --git a/internal/disk/partition_table.go b/internal/disk/partition_table.go index 9a94bcad2..f420cfde4 100644 --- a/internal/disk/partition_table.go +++ b/internal/disk/partition_table.go @@ -177,6 +177,39 @@ func (pt *PartitionTable) findDirectoryEntityPath(dir string) []Entity { return pt.findDirectoryEntityPath(parent) } +// EnsureDirectorySizes takes a mapping of directory paths to sizes (in bytes) +// and resizes the appropriate partitions such that they are at least the size +// of the sum of their subdirectories. +// The function will panic if any of the directory paths are invalid. +func (pt *PartitionTable) EnsureDirectorySizes(dirSizeMap map[string]uint64) { + + type mntSize struct { + entPath []Entity + newSize uint64 + } + + // add up the required size for each directory grouped by their mountpoints + mntSizeMap := make(map[string]*mntSize) + for dir, size := range dirSizeMap { + entPath := pt.findDirectoryEntityPath(dir) + if entPath == nil { + panic(fmt.Sprintf("EnsureDirectorySizes: invalid dir path %q", dir)) + } + mnt := entPath[0].(Mountable) + mountpoint := mnt.GetMountpoint() + if _, ok := mntSizeMap[mountpoint]; !ok { + mntSizeMap[mountpoint] = &mntSize{entPath, 0} + } + es := mntSizeMap[mountpoint] + es.newSize += size + } + + // resize all the entities in the map + for _, es := range mntSizeMap { + resizeEntityBranch(es.entPath, es.newSize) + } +} + func (pt *PartitionTable) CreateMountpoint(mountpoint string, size uint64) (Entity, error) { filesystem := Filesystem{ Type: "xfs",