disk: add function for resizing Entities based on dirs
New function that ensures that a partition can hold the total sum of all the required sizes of specific directories on the partition. The function sums the required directory sizes grouped by their mountpoint and then resizes the entity path of that Mountable.
This commit is contained in:
parent
ce5a28c113
commit
55a2e8ddac
2 changed files with 143 additions and 0 deletions
|
|
@ -573,3 +573,113 @@ func TestFindDirectoryPartition(t *testing.T) {
|
|||
assert.Nil(pt.findDirectoryEntityPath("/var"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureDirectorySizes(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
GiB := 1024 * 1024 * 1024
|
||||
varSizes := map[string]uint64{
|
||||
"/var/lib": uint64(3 * GiB),
|
||||
"/var/cache": uint64(2 * GiB),
|
||||
"/var/log/journal": uint64(2 * GiB),
|
||||
}
|
||||
|
||||
varAndHomeSizes := map[string]uint64{
|
||||
"/var/lib": uint64(3 * GiB),
|
||||
"/var/cache": uint64(2 * GiB),
|
||||
"/var/log/journal": uint64(2 * GiB),
|
||||
"/home/user/data": uint64(10 * GiB),
|
||||
}
|
||||
|
||||
{
|
||||
pt := testPartitionTables["plain"]
|
||||
pt = *pt.Clone().(*PartitionTable) // don't modify the original test data
|
||||
|
||||
{
|
||||
// make sure we have the correct volume
|
||||
// guard against changes in the test pt
|
||||
rootPart := pt.Partitions[3]
|
||||
rootPayload := rootPart.Payload.(*Filesystem)
|
||||
|
||||
assert.Equal("/", rootPayload.Mountpoint)
|
||||
assert.Equal(uint64(0), rootPart.Size)
|
||||
}
|
||||
|
||||
{
|
||||
// add requirements for /var subdirs that are > 5 GiB
|
||||
pt.EnsureDirectorySizes(varSizes)
|
||||
rootPart := pt.Partitions[3]
|
||||
assert.Equal(uint64(7*GiB), rootPart.Size)
|
||||
|
||||
// invalid
|
||||
assert.Panics(func() { pt.EnsureDirectorySizes(map[string]uint64{"invalid": uint64(300)}) })
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
pt := testPartitionTables["luks+lvm"]
|
||||
pt = *pt.Clone().(*PartitionTable) // don't modify the original test data
|
||||
|
||||
{
|
||||
// make sure we have the correct volume
|
||||
// guard against changes in the test pt
|
||||
rootPart := pt.Partitions[3]
|
||||
rootLUKS := rootPart.Payload.(*LUKSContainer)
|
||||
rootVG := rootLUKS.Payload.(*LVMVolumeGroup)
|
||||
rootLV := rootVG.LogicalVolumes[0]
|
||||
rootFS := rootLV.Payload.(*Filesystem)
|
||||
homeLV := rootVG.LogicalVolumes[1]
|
||||
homeFS := homeLV.Payload.(*Filesystem)
|
||||
|
||||
assert.Equal(uint64(5*GiB), rootPart.Size)
|
||||
assert.Equal("/", rootFS.Mountpoint)
|
||||
assert.Equal(uint64(2*GiB), rootLV.Size)
|
||||
assert.Equal("/home", homeFS.Mountpoint)
|
||||
assert.Equal(uint64(2*GiB), homeLV.Size)
|
||||
}
|
||||
|
||||
{
|
||||
// add requirements for /var subdirs that are > 5 GiB
|
||||
pt.EnsureDirectorySizes(varAndHomeSizes)
|
||||
rootPart := pt.Partitions[3]
|
||||
rootLUKS := rootPart.Payload.(*LUKSContainer)
|
||||
rootVG := rootLUKS.Payload.(*LVMVolumeGroup)
|
||||
rootLV := rootVG.LogicalVolumes[0]
|
||||
homeLV := rootVG.LogicalVolumes[1]
|
||||
assert.Equal(uint64(17*GiB)+rootVG.MetadataSize(), rootPart.Size)
|
||||
assert.Equal(uint64(7*GiB), rootLV.Size)
|
||||
assert.Equal(uint64(10*GiB), homeLV.Size)
|
||||
|
||||
// invalid
|
||||
assert.Panics(func() { pt.EnsureDirectorySizes(map[string]uint64{"invalid": uint64(300)}) })
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
pt := testPartitionTables["btrfs"]
|
||||
pt = *pt.Clone().(*PartitionTable) // don't modify the original test data
|
||||
|
||||
{
|
||||
// make sure we have the correct volume
|
||||
// guard against changes in the test pt
|
||||
rootPart := pt.Partitions[3]
|
||||
rootPayload := rootPart.Payload.(*Btrfs)
|
||||
assert.Equal("/", rootPayload.Subvolumes[0].Mountpoint)
|
||||
assert.Equal(uint64(0), rootPayload.Subvolumes[0].Size)
|
||||
assert.Equal("/var", rootPayload.Subvolumes[1].Mountpoint)
|
||||
assert.Equal(uint64(5*GiB), rootPayload.Subvolumes[1].Size)
|
||||
}
|
||||
|
||||
{
|
||||
// add requirements for /var subdirs that are > 5 GiB
|
||||
pt.EnsureDirectorySizes(varSizes)
|
||||
rootPart := pt.Partitions[3]
|
||||
rootPayload := rootPart.Payload.(*Btrfs)
|
||||
assert.Equal(uint64(7*GiB), rootPayload.Subvolumes[1].Size)
|
||||
|
||||
// invalid
|
||||
assert.Panics(func() { pt.EnsureDirectorySizes(map[string]uint64{"invalid": uint64(300)}) })
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -177,6 +177,39 @@ func (pt *PartitionTable) findDirectoryEntityPath(dir string) []Entity {
|
|||
return pt.findDirectoryEntityPath(parent)
|
||||
}
|
||||
|
||||
// EnsureDirectorySizes takes a mapping of directory paths to sizes (in bytes)
|
||||
// and resizes the appropriate partitions such that they are at least the size
|
||||
// of the sum of their subdirectories.
|
||||
// The function will panic if any of the directory paths are invalid.
|
||||
func (pt *PartitionTable) EnsureDirectorySizes(dirSizeMap map[string]uint64) {
|
||||
|
||||
type mntSize struct {
|
||||
entPath []Entity
|
||||
newSize uint64
|
||||
}
|
||||
|
||||
// add up the required size for each directory grouped by their mountpoints
|
||||
mntSizeMap := make(map[string]*mntSize)
|
||||
for dir, size := range dirSizeMap {
|
||||
entPath := pt.findDirectoryEntityPath(dir)
|
||||
if entPath == nil {
|
||||
panic(fmt.Sprintf("EnsureDirectorySizes: invalid dir path %q", dir))
|
||||
}
|
||||
mnt := entPath[0].(Mountable)
|
||||
mountpoint := mnt.GetMountpoint()
|
||||
if _, ok := mntSizeMap[mountpoint]; !ok {
|
||||
mntSizeMap[mountpoint] = &mntSize{entPath, 0}
|
||||
}
|
||||
es := mntSizeMap[mountpoint]
|
||||
es.newSize += size
|
||||
}
|
||||
|
||||
// resize all the entities in the map
|
||||
for _, es := range mntSizeMap {
|
||||
resizeEntityBranch(es.entPath, es.newSize)
|
||||
}
|
||||
}
|
||||
|
||||
func (pt *PartitionTable) CreateMountpoint(mountpoint string, size uint64) (Entity, error) {
|
||||
filesystem := Filesystem{
|
||||
Type: "xfs",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue