diff --git a/proxmox/nodes/containers/containers.go b/proxmox/nodes/containers/containers.go index 239209919..30f2d33da 100644 --- a/proxmox/nodes/containers/containers.go +++ b/proxmox/nodes/containers/containers.go @@ -475,3 +475,13 @@ func (c *Client) WaitForContainerConfigUnlock(ctx context.Context, ignoreErrorRe return nil } + +// ResizeContainerDisk resizes a container disk. +func (c *Client) ResizeContainerDisk(ctx context.Context, d *ResizeRequestBody) error { + err := c.DoRequest(ctx, http.MethodPut, c.ExpandPath("resize"), d, nil) + if err != nil { + return fmt.Errorf("error resizing container disk: %w", err) + } + + return nil +} diff --git a/proxmox/nodes/containers/containers_types.go b/proxmox/nodes/containers/containers_types.go index 347c6a884..998c1ec1c 100644 --- a/proxmox/nodes/containers/containers_types.go +++ b/proxmox/nodes/containers/containers_types.go @@ -285,6 +285,11 @@ type ShutdownRequestBody struct { Timeout *int `json:"timeout,omitempty" url:"timeout,omitempty"` } +type ResizeRequestBody struct { + Disk string `json:"disk" url:"disk"` + Size string `json:"size" url:"size"` +} + // UpdateRequestBody contains the data for an user update request. type UpdateRequestBody CreateRequestBody diff --git a/proxmoxtf/resource/container/container.go b/proxmoxtf/resource/container/container.go index 76ca2ea41..dd0f9b179 100644 --- a/proxmoxtf/resource/container/container.go +++ b/proxmoxtf/resource/container/container.go @@ -10,7 +10,9 @@ import ( "context" "errors" "fmt" + "reflect" "regexp" + "slices" "sort" "strconv" "strings" @@ -336,7 +338,6 @@ func Container() *schema.Resource { Type: schema.TypeList, Description: "The disks", Optional: true, - ForceNew: true, DefaultFunc: func() (any, error) { return []any{ map[string]any{ @@ -377,7 +378,6 @@ func Container() *schema.Resource { Type: schema.TypeInt, Description: "The rootfs size in gigabytes", Optional: true, - ForceNew: true, Default: dvDiskSize, ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(0)), }, @@ -666,6 +666,7 @@ func Container() *schema.Resource { Type: schema.TypeList, Description: "A mount point", Optional: true, + ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ mkMountPointACL: { @@ -725,12 +726,14 @@ func Container() *schema.Resource { Description: "Volume size (only used for volume mount points)", Optional: true, Default: dvMountPointSize, + ForceNew: true, ValidateDiagFunc: validators.FileSize(), }, mkMountPointVolume: { Type: schema.TypeString, Description: "Volume, device or directory to mount into the container", Required: true, + ForceNew: true, DiffSuppressFunc: func(_, oldVal, newVal string, _ *schema.ResourceData) bool { // For *new* volume mounts PVE returns an actual volume ID which is saved in the stare, // so on reapply the provider will try override it:" @@ -1053,6 +1056,96 @@ func Container() *schema.Resource { return strconv.Itoa(newValue.(int)) != d.Id() }, ), + // create a customdiff that checks each mount point + customdiff.ForceNewIf( + mkMountPoint, + func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) bool { + oldRaw, newRaw := d.GetChange(mkMountPoint) + // compare all oldRaw and newRaw entries + oldList, _ := oldRaw.([]interface{}) + newList, _ := newRaw.([]interface{}) + + if oldList == nil { + oldList = []interface{}{} + } + if newList == nil { + newList = []interface{}{} + } + + for i := 0; i < len(oldList); i++ { + if len(newList)-1 < i { + return true + } + // compare old and new list entries and call ForceNew on the correspondig string + // make a deep comparison + oldMap, _ := oldList[i].(map[string]interface{}) + // the volume entry of oldMap does containe the storage volume PLUS the identifier, which we have to strip + volumeEntry, ok := oldMap["volume"] + if ok { + volumeParts := strings.Split(volumeEntry.(string), ":") + if len(volumeParts) >= 1 { + oldMap["volume"] = volumeParts[0] + } + } + + newMap, _ := newList[i].(map[string]interface{}) + // deep compare + if !reflect.DeepEqual(oldMap, newMap) { + // get key that is different and call ForceNew + for _, v := range oldMap { + d.ForceNew(fmt.Sprintf("%s.%d.%s", mkMountPoint, i, v)) + } + return true + } + } + return false + + }, + ), + customdiff.ForceNewIf( + mkDisk, + func(_ context.Context, d *schema.ResourceDiff, _ interface{}) bool { + oldRaw, newRaw := d.GetChange(mkDisk) + oldList, _ := oldRaw.([]interface{}) + newList, _ := newRaw.([]interface{}) + + if oldList == nil { + oldList = []interface{}{} + } + if newList == nil { + newList = []interface{}{} + } + + minDrives := min(len(oldList), len(newList)) + + for i := range minDrives { + oldSize := dvDiskSize + newSize := dvDiskSize + if i < len(oldList) && oldList[i] != nil { + if om, ok := oldList[i].(map[string]interface{}); ok { + if v, ok := om[mkDiskSize].(int); ok { + oldSize = v + } + } + } + + if i < len(newList) && newList[i] != nil { + if nm, ok := newList[i].(map[string]interface{}); ok { + if v, ok := nm[mkDiskSize].(int); ok { + newSize = v + } + } + } + + if oldSize > newSize { + _ = d.ForceNew(fmt.Sprintf("%s.%d.%s", mkDisk, i, mkDiskSize)) + return true + } + } + + return false + }, + ), ), Importer: &schema.ResourceImporter{ StateContext: func(_ context.Context, d *schema.ResourceData, _ any) ([]*schema.ResourceData, error) { @@ -3019,7 +3112,23 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m any) diag.Di mountOptions := diskBlock[mkDiskMountOptions].([]any) quota := types.CustomBool(diskBlock[mkDiskQuota].(bool)) replicate := types.CustomBool(diskBlock[mkDiskReplicate].(bool)) + + oldSize := containerConfig.RootFS.Size size := types.DiskSizeFromGigabytes(int64(diskBlock[mkDiskSize].(int))) + // we should never reach this point. The `plan` should recreate the container, not update it, if the old size is larger. + if *oldSize > *size { + return diag.Errorf("New disk size (%s) has to be greater the current disk (%s)!", oldSize, size) + } + + if !ptr.Eq(oldSize, size) { + err = containerAPI.ResizeContainerDisk(ctx, &containers.ResizeRequestBody{ + Disk: "rootfs", + Size: size.String(), + }) + if err != nil { + return diag.FromErr(err) + } + } rootFS.ACL = &acl rootFS.Quota = "a @@ -3029,15 +3138,26 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m any) diag.Di mountOptionsStrings := make([]string, 0, len(mountOptions)) for _, option := range mountOptions { - mountOptionsStrings = append(mountOptionsStrings, option.(string)) + optionString := option.(string) + mountOptionsStrings = append(mountOptionsStrings, optionString) } - // Always set, including empty, to allow clearing mount options rootFS.MountOptions = &mountOptionsStrings - updateBody.RootFS = rootFS + // To compare contents regardless of order, we can sort them. + // The schema already uses a suppress func for order, so we should be consistent. + sort.Strings(mountOptionsStrings) + currentMountOptions := containerConfig.RootFS.MountOptions + currentMountOptionsSorted := []string{} + if currentMountOptions != nil { + currentMountOptionsSorted = append(currentMountOptionsSorted, *currentMountOptions...) + } + sort.Strings(currentMountOptionsSorted) + if !slices.Equal(mountOptionsStrings, currentMountOptionsSorted) { + rebootRequired = true + } - rebootRequired = true + updateBody.RootFS = rootFS } if d.HasChange(mkFeatures) {