package main
import (
"context"
"flag"
"log"
"terraform-provider-infomaniak/internal/provider"
"terraform-provider-infomaniak/internal/services/dbaas"
"terraform-provider-infomaniak/internal/services/domain"
"terraform-provider-infomaniak/internal/services/kaas"
"github.com/hashicorp/terraform-plugin-framework/providerserver"
)
var (
// these will be set by the goreleaser configuration
// to appropriate values for the compiled binary.
version string = "dev"
// goreleaser can pass other information to the main package, such as the specific commit
// https://goreleaser.com/cookbooks/using-main.version/
)
func main() {
var debug bool
flag.BoolVar(&debug, "debug", false, "set to true to run the kaas with support for debuggers like delve")
flag.Parse()
opts := providerserver.ServeOpts{
Address: "registry.terraform.io/Infomaniak/infomaniak",
Debug: debug,
}
// Register resources
kaas.Register()
domain.Register()
dbaas.Register()
err := providerserver.Serve(context.Background(), provider.New(version), opts)
if err != nil {
log.Fatal(err.Error())
}
}
package apis
import (
"terraform-provider-infomaniak/internal/apis/dbaas"
implem_dbaas "terraform-provider-infomaniak/internal/apis/dbaas/implementation"
"terraform-provider-infomaniak/internal/apis/domain"
"terraform-provider-infomaniak/internal/apis/kaas"
implem_kaas "terraform-provider-infomaniak/internal/apis/kaas/implementation"
mock_kaas "terraform-provider-infomaniak/internal/apis/kaas/mock"
implem_domain "terraform-provider-infomaniak/internal/apis/domain/implementation"
)
type Client struct {
Kaas kaas.Api
Domain domain.Api
DBaas dbaas.Api
}
// NewMockClient defines the mock client for Infomaniak's API,
// It is used for testing or dryrunning
func NewMockClient() *Client {
return &Client{
Kaas: mock_kaas.New(),
}
}
// NewClient defines the client for Infomaniak's API
func NewClient(baseUri, token, version string) *Client {
return &Client{
Kaas: implem_kaas.New(baseUri, token, version),
DBaas: implem_dbaas.New(baseUri, token, version),
Domain: implem_domain.New(baseUri, token, version),
}
}
package implementation
import (
"fmt"
"strconv"
"strings"
"terraform-provider-infomaniak/internal/apis/dbaas"
"terraform-provider-infomaniak/internal/apis/helpers"
"resty.dev/v3"
)
// Ensure that our client implements Api
var (
_ dbaas.Api = (*Client)(nil)
)
type Client struct {
resty *resty.Client
}
func New(baseUri, token, version string) *Client {
return &Client{
resty: resty.New().
SetBaseURL(baseUri).
SetAuthToken(token).
SetHeader("User-Agent", helpers.GetUserAgent(version)),
}
}
func (client *Client) FindPack(dbType string, name string) (*dbaas.DBaaSPack, error) {
var result helpers.NormalizedApiResponse[[]*dbaas.DBaaSPack]
resp, err := client.resty.R().
SetResult(&result).
SetError(&result).
SetQueryParam("filter[type]", dbType).
SetQueryParam("filter[names][]", name).
Get(EndpointPacks)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
data := result.Data
if len(data) != 1 || data[0].Name != name {
return nil, fmt.Errorf("pack not found")
}
return data[0], nil
}
func (client *Client) GetDBaaS(publicCloudId int64, publicCloudProjectId int64, dbaasId int64) (*dbaas.DBaaS, error) {
var result helpers.NormalizedApiResponse[*dbaas.DBaaS]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("dbaas_id", fmt.Sprint(dbaasId)).
SetQueryParam("with", "packs,projects,tags,connection").
SetResult(&result).
SetError(&result).
Get(EndpointDatabase)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) CreateDBaaS(input *dbaas.DBaaS) (*dbaas.DBaaSCreateInfo, error) {
var result helpers.NormalizedApiResponse[*dbaas.DBaaSCreateInfo]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(input.Project.PublicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(input.Project.ProjectId)).
SetBody(input).
SetResult(&result).
SetError(&result).
Post(EndpointDatabases)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) UpdateDBaaS(input *dbaas.DBaaS) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(input.Project.PublicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(input.Project.ProjectId)).
SetPathParam("dbaas_id", fmt.Sprint(input.Id)).
SetBody(input).
SetResult(&result).
SetError(&result).
Patch(EndpointDatabase)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) DeleteDBaaS(publicCloudId int64, publicCloudProjectId int64, dbaasId int64) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("dbaas_id", fmt.Sprint(dbaasId)).
SetResult(&result).
SetError(&result).
Delete(EndpointDatabase)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) PatchIpFilters(publicCloudId int64, publicCloudProjectId int64, dbaasId int64, filters dbaas.AllowedCIDRs) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("dbaas_id", fmt.Sprint(dbaasId)).
SetBody(filters).
SetResult(&result).
SetError(&result).
Put(EndpointDatabaseIpFilter)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) GetIpFilters(publicCloudId int64, publicCloudProjectId int64, dbaasId int64) ([]string, error) {
var result helpers.NormalizedApiResponse[[]string]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("dbaas_id", fmt.Sprint(dbaasId)).
SetResult(&result).
SetError(&result).
Get(EndpointDatabaseIpFilter)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) CreateDBaasScheduleBackup(publicCloudId int64, publicCloudProjectId int64, dbaasId int64, backupSchedules *dbaas.DBaasBackupSchedule) (int64, error) {
var result helpers.NormalizedApiResponse[int64]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("dbaas_id", fmt.Sprint(dbaasId)).
SetBody(backupSchedules).
SetResult(&result).
SetError(&result).
Post(EndpointDatabaseBackupSchedules)
if err != nil {
return 0, err
}
if resp.IsError() {
return 0, result.Error
}
return result.Data, nil
}
func (client *Client) UpdateDBaasScheduleBackup(publicCloudId int64, publicCloudProjectId int64, dbaasId int64, id int64, backupSchedules *dbaas.DBaasBackupSchedule) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("dbaas_id", fmt.Sprint(dbaasId)).
SetPathParam("schedule_id", fmt.Sprint(id)).
SetBody(backupSchedules).
SetResult(&result).
SetError(&result).
Patch(EndpointDatabaseBackupSchedule)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) GetDBaasScheduleBackup(publicCloudId int64, publicCloudProjectId int64, dbaasId int64, id int64) (*dbaas.DBaasBackupSchedule, error) {
var result helpers.NormalizedApiResponse[*dbaas.DBaasBackupSchedule]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("dbaas_id", fmt.Sprint(dbaasId)).
SetPathParam("schedule_id", fmt.Sprint(id)).
SetResult(&result).
SetError(&result).
Get(EndpointDatabaseBackupSchedule)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) DeleteDBaasScheduleBackup(publicCloudId int64, publicCloudProjectId int64, dbaasId int64, id int64) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("dbaas_id", fmt.Sprint(dbaasId)).
SetPathParam("schedule_id", fmt.Sprint(id)).
SetResult(&result).
SetError(&result).
Delete(EndpointDatabaseBackupSchedule)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) GetDbaasRegions() ([]string, error) {
var result helpers.NormalizedApiResponse[[]string]
resp, err := client.resty.R().
SetResult(&result).
SetError(&result).
Get(EndpointDbaasDataRegion)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) GetDbaasTypes() ([]*dbaas.DbaasType, error) {
var result helpers.NormalizedApiResponse[[]*dbaas.DbaasType]
resp, err := client.resty.R().
SetResult(&result).
SetError(&result).
Get(EndpointDbaasDataTypes)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) GetDbaasPack(params dbaas.PackFilter) (*dbaas.Pack, error) {
var result helpers.NormalizedApiResponse[[]*dbaas.Pack]
builder := client.resty.R().
SetResult(&result).
SetError(&result).
SetQueryParam("filter[type]", params.DbType)
if params.Name != nil {
builder = builder.SetQueryParam("filter[names][]", *params.Name)
}
if params.Group != nil {
builder = builder.SetQueryParam("filter[groups][]", *params.Group)
}
if params.Instances != nil {
builder = builder.SetQueryParam("filter[instances]", strconv.FormatInt(*params.Instances, 10))
}
if params.Cpu != nil {
builder = builder.SetQueryParam("filter[cpu]", strconv.FormatInt(*params.Cpu, 10))
}
if params.Ram != nil {
builder = builder.SetQueryParam("filter[ram]", strconv.FormatInt(*params.Ram, 10))
}
if params.Storage != nil {
builder = builder.SetQueryParam("filter[storage]", strconv.FormatInt(*params.Storage, 10))
}
resp, err := builder.Get(EndpointPacks)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
data := result.Data
if len(data) == 0 {
return nil, fmt.Errorf("pack not found")
}
if len(data) != 1 {
packs := strings.Builder{}
for _, pack := range data {
packs.WriteString(pack.Name)
packs.WriteString(", ")
}
return nil, fmt.Errorf("multiple packs found, please refine your search\nfound packs: %s", packs.String())
}
return data[0], nil
}
package dbaas
import (
"encoding/json"
"fmt"
"strings"
)
type DBaaSPack struct {
Id int64 `json:"id,omitempty"`
Name string `json:"name,omitempty"`
}
type DbaasType struct {
Name string `json:"name,omitempty"`
Versions []string `json:"versions,omitempty"`
}
type PackFilter struct {
DbType string
Group *string
Name *string
Instances *int64
Cpu *int64
Ram *int64
Storage *int64
}
type Pack struct {
ID int64 `json:"id,omitempty"`
Type string `json:"type,omitempty"`
Group string `json:"group,omitempty"`
Name string `json:"name,omitempty"`
Instances int64 `json:"instances,omitempty"`
CPU int64 `json:"cpu,omitempty"`
RAM int64 `json:"ram,omitempty"`
Storage int64 `json:"storage,omitempty"`
Rates Rates `json:"rates"`
}
type Rates struct {
CHF Pricing `json:"CHF"`
EUR Pricing `json:"EUR"`
}
type Pricing struct {
HourExclTax float64 `json:"hour_excl_tax,omitempty"`
HourInclTax float64 `json:"hour_incl_tax,omitempty"`
}
type DBaaS struct {
Id int64 `json:"id,omitempty"`
Project DBaaSProject `json:"project,omitzero"`
PackId int64 `json:"pack_id,omitempty"`
Pack *DBaaSPack `json:"pack,omitempty"`
Connection *DBaaSConnectionInfo `json:"connection,omitempty"`
Type string `json:"type,omitempty"`
Version string `json:"version,omitempty"`
Name string `json:"name,omitempty"`
KubernetesIdentifier string `json:"kube_identifier,omitempty"`
Region string `json:"region,omitempty"`
Status string `json:"status,omitempty"`
}
type AllowedCIDRs struct {
IpFilters []string `json:"ip_filters,omitempty"`
}
// avoid crashes when the backend returns [] instead of null when connection is not yet avaialble
func (d *DBaaS) UnmarshalJSON(data []byte) error {
type Alias DBaaS
aux := &struct {
Connection json.RawMessage `json:"connection,omitempty"`
*Alias
}{
Alias: (*Alias)(d),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if len(aux.Connection) > 0 {
if strings.TrimSpace(string(aux.Connection)) == "[]" {
d.Connection = nil
} else {
d.Connection = &DBaaSConnectionInfo{}
if err := json.Unmarshal(aux.Connection, d.Connection); err != nil {
return err
}
}
}
return nil
}
type DBaasBackupSchedule struct {
Id *int64 `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
ScheduledAt *string `json:"scheduled_at,omitempty"`
Retention *int64 `json:"retention,omitempty"`
IsPitrEnabled *bool `json:"is_pitr_enabled,omitempty"`
}
type DBaaSCreateInfo struct {
Id int64 `json:"id"`
RootPassword string `json:"root_password"`
KubeIdentifier string `json:"kube_identifier"`
}
type DBaaSConnectionInfo struct {
Host string `json:"host"`
Port string `json:"port"`
User string `json:"user"`
Password string `json:"password"`
Ca string `json:"ca"`
}
type DBaaSBackup struct {
Id string `json:"id,omitempty"`
Location string `json:"location,omitempty"`
CreatedAt uint64 `json:"created_at,omitempty"`
CompletedAt uint64 `json:"completed_at,omitempty"`
Status string `json:"status,omitempty"`
}
type DBaaSRestore struct {
Id string `json:"id,omitempty"`
BackupSource string `json:"backup_source,omitempty"`
CreatedAt uint64 `json:"created_at,omitempty"`
Status string `json:"status,omitempty"`
NewService *DBaaSCreateInfo `json:"new_service,omitempty"`
}
func (dbaas *DBaaS) Key() string {
return fmt.Sprintf("%d-%d-%d", dbaas.Project.PublicCloudId, dbaas.Project.ProjectId, dbaas.Id)
}
type DBaaSProject struct {
PublicCloudId int64 `json:"public_cloud_id,omitempty"`
ProjectId int64 `json:"id,omitempty"`
}
package implementation
import (
"fmt"
"strings"
"terraform-provider-infomaniak/internal/apis/domain"
"terraform-provider-infomaniak/internal/apis/helpers"
"resty.dev/v3"
)
// Ensure that our client implements Api
var (
_ domain.Api = (*Client)(nil)
)
type Client struct {
resty *resty.Client
}
func New(baseUri, token, version string) *Client {
return &Client{
resty: resty.New().
SetBaseURL(baseUri).
SetAuthToken(token).
SetHeader("User-Agent", helpers.GetUserAgent(version)),
}
}
func (client *Client) GetZone(fqdn string) (*domain.Zone, error) {
var result helpers.NormalizedApiResponse[*domain.Zone]
resp, err := client.resty.R().
SetPathParam("fqdn", fmt.Sprint(fqdn)).
SetQueryParam("with", "records,idn").
SetResult(&result).
SetError(&result).
Get(EndpointZone)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) CreateZone(fqdn string) (*domain.Zone, error) {
var result helpers.NormalizedApiResponse[*domain.Zone]
resp, err := client.resty.R().
SetPathParam("fqdn", fmt.Sprint(fqdn)).
SetQueryParam("with", "records,idn").
SetResult(&result).
SetError(&result).
Post(EndpointZone)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) DeleteZone(fqdn string) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("fqdn", fmt.Sprint(fqdn)).
SetResult(&result).
SetError(&result).
Delete(EndpointZone)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) GetRecord(zoneFqdn string, id int64) (*domain.Record, error) {
var result helpers.NormalizedApiResponse[*domain.Record]
resp, err := client.resty.R().
SetPathParam("zone_fqdn", strings.TrimSuffix(zoneFqdn, ".")).
SetPathParam("id", fmt.Sprint(id)).
SetQueryParam("with", "idn,records_description").
SetResult(&result).
SetError(&result).
Get(EndpointRecord)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
type CreateRecordRequest struct {
Type string `json:"type"`
Source string `json:"source"`
Target string `json:"target"`
TTL int64 `json:"ttl"`
}
func (client *Client) CreateRecord(zoneFqdn, recordType, source, target string, ttl int64) (*domain.Record, error) {
var result helpers.NormalizedApiResponse[*domain.Record]
var input = CreateRecordRequest{
Type: recordType,
Source: source,
Target: target,
TTL: ttl,
}
resp, err := client.resty.R().
SetPathParam("zone_fqdn", strings.TrimSuffix(zoneFqdn, ".")).
SetQueryParam("with", "idn,records_description").
SetResult(&result).
SetBody(input).
SetError(&result).
Post(EndpointRecords)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) UpdateRecord(zoneFqdn string, id int64, recordType, source, target string, ttl int64) (*domain.Record, error) {
var result helpers.NormalizedApiResponse[*domain.Record]
var input = CreateRecordRequest{
Type: recordType,
Source: source,
Target: target,
TTL: ttl,
}
resp, err := client.resty.R().
SetPathParam("zone_fqdn", strings.TrimSuffix(zoneFqdn, ".")).
SetPathParam("id", fmt.Sprint(id)).
SetQueryParam("with", "idn,records_description").
SetResult(&result).
SetBody(input).
SetError(&result).
Put(EndpointRecord)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) DeleteRecord(zoneFqdn string, id int64) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("zone_fqdn", strings.TrimSuffix(zoneFqdn, ".")).
SetPathParam("id", fmt.Sprint(id)).
SetResult(&result).
SetError(&result).
Delete(EndpointRecord)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
package domain
import "slices"
type Zone struct {
ID int64 `json:"id,omitempty"`
FQDN string `json:"fqdn,omitempty"`
DNSSEC ZoneDNSSEC `json:"dnssec,omitempty"`
Nameservers []string `json:"nameservers,omitempty"`
Records []Record `json:"records,omitempty"`
ClusterRecords []Record `json:"cluster_records,omitempty"`
}
type ZoneDNSSEC struct {
IsEnabled bool `json:"is_enabled,omitempty"`
}
type RecordType = string
var (
RecordA RecordType = "A"
RecordAAAA RecordType = "AAAA"
RecordCAA RecordType = "CAA"
RecordCNAME RecordType = "CNAME"
RecordDNAME RecordType = "DNAME"
RecordDS RecordType = "DS"
RecordMX RecordType = "MX"
RecordNS RecordType = "NS"
RecordSMIMEA RecordType = "SMIMEA"
RecordSRV RecordType = "SRV"
RecordSSHFP RecordType = "SSHFP"
RecordTLSA RecordType = "TLSA"
RecordTXT RecordType = "TXT"
)
var RecordTypes = []RecordType{RecordA, RecordAAAA, RecordCAA, RecordCNAME, RecordDNAME, RecordNS, RecordDS, RecordMX, RecordSMIMEA, RecordSRV, RecordSSHFP, RecordTLSA, RecordTXT}
func IsValidRecordType(t RecordType) bool {
return slices.Contains(RecordTypes, t)
}
type Record struct {
ID int64 `json:"id,omitempty"`
Source string `json:"source,omitempty"`
SourceIDN *string `json:"source_idn,omitempty"`
Type RecordType `json:"type,omitempty"`
TTL int64 `json:"ttl,omitempty"`
Target string `json:"target,omitempty"`
DynDNSID int64 `json:"dyndns_id,omitempty"`
// Description string `json:"description,omitempty"`
}
type (
recordTypeA struct{ string }
recordTypeAAAA struct{ string }
recordTypeCAA struct{ string }
recordTypeCNAME struct{ string }
recordTypeDNAME struct{ string }
recordTypeDS struct{ string }
recordTypeMX struct{ string }
recordTypeNS struct{ string }
recordTypeSMIMEA struct{ string }
recordTypeSRV struct{ string }
recordTypeSSHFP struct{ string }
recordTypeTLSA struct{ string }
recordTypeTXT struct{ string }
)
var (
RecordTypeA = recordTypeA{"A"}
RecordTypeAAAA = recordTypeAAAA{"AAAA"}
RecordTypeCAA = recordTypeCAA{"CAA"}
RecordTypeCNAME = recordTypeCNAME{"CNAME"}
RecordTypeDNAME = recordTypeDNAME{"DNAME"}
RecordTypeDS = recordTypeDS{"DS"}
RecordTypeMX = recordTypeMX{"MX"}
RecordTypeNS = recordTypeNS{"NS"}
RecordTypeSMIMEA = recordTypeSMIMEA{"SMIMEA"}
RecordTypeSRV = recordTypeSRV{"SRV"}
RecordTypeSSHFP = recordTypeSSHFP{"SSHFP"}
RecordTypeTLSA = recordTypeTLSA{"TLSA"}
RecordTypeTXT = recordTypeTXT{"TXT"}
)
type RecordConstraint interface {
recordTypeA | recordTypeAAAA | recordTypeCAA | recordTypeCNAME | recordTypeDNAME | recordTypeDS | recordTypeMX | recordTypeSMIMEA | recordTypeSRV | recordTypeSSHFP | recordTypeTLSA | recordTypeTXT | recordTypeNS
}
package helpers
import (
"fmt"
"strings"
)
type NormalizedApiResponse[K any] struct {
Result string `json:"result"`
Data K `json:"data"`
Error *ApiError `json:"error"`
}
type ApiError struct {
Description string `json:"description"`
Errors []*ApiError `json:"errors"`
Context ApiErrorContext `json:"context"`
}
type ApiErrorContext struct {
Attribute string `json:"attribute"`
Values []any `json:"values"`
}
func (apiError *ApiError) Error() string {
var builder strings.Builder
builder.WriteString(apiError.Description)
if len(apiError.Context.Values) > 0 {
builder.WriteString(fmt.Sprintf(" (possible values: %v)", apiError.Context.Values))
}
if len(apiError.Errors) > 0 {
builder.WriteString(":\n")
}
for _, err := range apiError.Errors {
tabulated := " " + strings.ReplaceAll(err.Error(), "\n", "\n ")
builder.WriteString(tabulated + "\n")
}
return strings.TrimSuffix(builder.String(), "\n")
}
package helpers
import "fmt"
const (
userAgentFormat = "terraform-provider-infomaniak/%s (resty; +https://github.com/Infomaniak/terraform-provider-infomaniak)"
)
func GetUserAgent(version string) string {
return fmt.Sprintf(userAgentFormat, version)
}
package implementation
import (
"fmt"
"terraform-provider-infomaniak/internal/apis/helpers"
"terraform-provider-infomaniak/internal/apis/kaas"
"resty.dev/v3"
)
// Ensure that our client implements Api
var (
_ kaas.Api = (*Client)(nil)
)
type Client struct {
resty *resty.Client
}
func New(baseUri, token, version string) *Client {
return &Client{
resty: resty.New().
SetBaseURL(baseUri).
SetAuthToken(token).
SetHeader("User-Agent", helpers.GetUserAgent(version)),
}
}
func (client *Client) GetPacks() ([]*kaas.KaasPack, error) {
var result helpers.NormalizedApiResponse[[]*kaas.KaasPack]
resp, err := client.resty.R().
SetResult(&result).
SetError(&result).
Get(EndpointPacks)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) GetVersions() ([]string, error) {
var result helpers.NormalizedApiResponse[[]string]
resp, err := client.resty.R().
SetResult(&result).
SetError(&result).
Get(EndpointVersions)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) GetKaas(publicCloudId int64, publicCloudProjectId int64, kaasId int64) (*kaas.Kaas, error) {
var result helpers.NormalizedApiResponse[*kaas.Kaas]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("kaas_id", fmt.Sprint(kaasId)).
SetQueryParam("with", "packs,projects,instances,tags").
SetResult(&result).
SetError(&result).
Get(EndpointKaas)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
func (client *Client) GetKubeconfig(publicCloudId int64, publicCloudProjectId int64, kaasId int64) (string, error) {
var result helpers.NormalizedApiResponse[string]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("kaas_id", fmt.Sprint(kaasId)).
SetResult(&result).
SetError(&result).
Get(EndpointKaasKubeconfig)
if err != nil {
return "", err
}
if resp.IsError() {
return "", result.Error
}
return result.Data, nil
}
func (client *Client) CreateKaas(input *kaas.Kaas) (int64, error) {
var result helpers.NormalizedApiResponse[int64]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(input.Project.PublicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(input.Project.ProjectId)).
SetBody(input).
SetResult(&result).
SetError(&result).
Post(EndpointKaases)
if err != nil {
return 0, err
}
if resp.IsError() {
return 0, result.Error
}
return result.Data, nil
}
func (client *Client) UpdateKaas(input *kaas.Kaas) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(input.Project.PublicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(input.Project.ProjectId)).
SetPathParam("kaas_id", fmt.Sprint(input.Id)).
SetBody(input).
SetResult(&result).
SetError(&result).
Patch(EndpointKaas)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) DeleteKaas(publicCloudId int64, publicCloudProjectId int64, kaasId int64) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("kaas_id", fmt.Sprint(kaasId)).
SetResult(&result).
SetError(&result).
Delete(EndpointKaas)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) GetInstancePool(publicCloudId int64, publicCloudProjectId int64, kaasId int64, instancePoolId int64) (*kaas.InstancePool, error) {
var result helpers.NormalizedApiResponse[*kaas.InstancePool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("kaas_id", fmt.Sprint(kaasId)).
SetPathParam("kaas_instance_pool_id", fmt.Sprint(instancePoolId)).
SetResult(&result).
SetError(&result).
Get(EndpointInstancePool)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
// Default Max = Min
if result.Data.MaxInstances == 0 {
result.Data.MaxInstances = result.Data.MinInstances
}
return result.Data, nil
}
func (client *Client) CreateInstancePool(publicCloudId int64, publicCloudProjectId int64, input *kaas.InstancePool) (int64, error) {
var result helpers.NormalizedApiResponse[int64]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("kaas_id", fmt.Sprint(input.KaasId)).
SetBody(input).
SetResult(&result).
SetError(&result).
Post(EndpointInstancePools)
if err != nil {
return 0, err
}
if resp.IsError() {
return 0, result.Error
}
return result.Data, nil
}
func (client *Client) UpdateInstancePool(publicCloudId int64, publicCloudProjectId int64, input *kaas.InstancePool) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("kaas_id", fmt.Sprint(input.KaasId)).
SetPathParam("kaas_instance_pool_id", fmt.Sprint(input.Id)).
SetBody(input).
SetResult(&result).
SetError(&result).
Patch(EndpointInstancePool)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) DeleteInstancePool(publicCloudId int64, publicCloudProjectId int64, kaasId int64, instancePoolId int64) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(publicCloudProjectId)).
SetPathParam("kaas_id", fmt.Sprint(kaasId)).
SetPathParam("kaas_instance_pool_id", fmt.Sprint(instancePoolId)).
SetResult(&result).
SetError(&result).
Delete(EndpointInstancePool)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) PatchApiserverParams(input *kaas.Apiserver, publicCloudId int64, projectId int64, kaasId int64) (bool, error) {
var result helpers.NormalizedApiResponse[bool]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(projectId)).
SetPathParam("kaas_id", fmt.Sprint(kaasId)).
SetBody(input).
SetResult(&result).
SetError(&result).
Patch(EndpointApiserver)
if err != nil {
return false, err
}
if resp.IsError() {
return false, result.Error
}
return result.Data, nil
}
func (client *Client) GetApiserverParams(publicCloudId int64, projectId int64, kaasId int64) (*kaas.Apiserver, error) {
var result helpers.NormalizedApiResponse[*kaas.Apiserver]
resp, err := client.resty.R().
SetPathParam("public_cloud_id", fmt.Sprint(publicCloudId)).
SetPathParam("public_cloud_project_id", fmt.Sprint(projectId)).
SetPathParam("kaas_id", fmt.Sprint(kaasId)).
SetResult(&result).
SetError(&result).
Get(EndpointApiserver)
if err != nil {
return nil, err
}
if resp.IsError() {
return nil, result.Error
}
return result.Data, nil
}
package mock
import (
"bytes"
"encoding/gob"
"encoding/json"
"errors"
"os"
"path"
"terraform-provider-infomaniak/internal/apis/kaas"
"time"
)
type KaasObject interface {
Key() string
*kaas.Kaas | *kaas.InstancePool
}
var (
mockedApiStatePath = path.Join(os.TempDir(), "terraform-provider-infomaniak-kaas")
mockedApiState = make(map[string][]byte)
ErrKeyNotFound = errors.New("key not found")
ErrDuplicateKey = errors.New("duplicate key found")
)
func getFromCache[K KaasObject](key string) (K, error) {
obj, found := mockedApiState[key]
if !found {
return nil, ErrKeyNotFound
}
var buff = bytes.NewBuffer(obj)
var result K
err := gob.NewDecoder(buff).Decode(&result)
if err != nil {
return nil, err
}
if result == nil {
return nil, ErrKeyNotFound
}
return result, nil
}
func addToCache[K KaasObject](obj K) error {
key := obj.Key()
_, found := mockedApiState[key]
if found {
return ErrDuplicateKey
}
var buff bytes.Buffer
err := gob.NewEncoder(&buff).Encode(obj)
if err != nil {
return err
}
mockedApiState[key] = buff.Bytes()
saveCache()
return nil
}
func updateCache[K KaasObject](obj K) error {
key := obj.Key()
cachedObject, found := mockedApiState[key]
if !found {
return ErrKeyNotFound
}
var buff = bytes.NewBuffer(cachedObject)
var result K
err := gob.NewDecoder(buff).Decode(&result)
if err != nil {
return err
}
var newBuff bytes.Buffer
err = gob.NewEncoder(&newBuff).Encode(obj)
if err != nil {
return err
}
mockedApiState[key] = newBuff.Bytes()
saveCache()
return nil
}
func removeFromCache[K KaasObject](obj K) error {
key := obj.Key()
_, found := mockedApiState[key]
if !found {
return ErrKeyNotFound
}
delete(mockedApiState, key)
saveCache()
return nil
}
func init() {
// Gob register
gob.Register(&kaas.Kaas{})
gob.Register(&kaas.InstancePool{})
// Check cache age
stat, err := os.Stat(mockedApiStatePath)
if err == nil {
// DeleteKaas cache if old
if time.Since(stat.ModTime()) > 24*time.Hour {
os.Remove(mockedApiStatePath)
return
}
}
// Try to get cache
bdy, err := os.ReadFile(mockedApiStatePath)
if err == nil {
// Cache found
err := json.Unmarshal(bdy, &mockedApiState)
if err != nil {
os.Remove(mockedApiStatePath)
}
return
}
// Create Kaas tmp file for caching
_, err = os.Create(mockedApiStatePath)
if err != nil {
panic(err)
}
}
func saveCache() {
data, err := json.Marshal(mockedApiState)
if err != nil {
return
}
//nolint:errcheck
os.WriteFile(mockedApiStatePath, data, 0666)
}
func ResetCache() {
mockedApiState = make(map[string][]byte)
}
package mock
import (
"fmt"
"log"
"regexp"
"terraform-provider-infomaniak/internal/apis/kaas"
)
// Ensure that our client implements Api
var (
_ kaas.Api = (*Client)(nil)
dnsRegexp = regexp.MustCompile("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$")
kubeLabelRegexp = regexp.MustCompile(`^[a-zA-Z0-9\-./]+:\s*[a-zA-Z0-9\-_.]{1,63}$`)
)
type Client struct{}
func New() *Client {
return &Client{}
}
func (c *Client) GetPacks() ([]*kaas.KaasPack, error) {
return []*kaas.KaasPack{
{
Id: 1,
Name: "standard",
Description: "Standard Cluster",
},
{
Id: 2,
Name: "pro",
Description: "Pro Cluster",
},
}, nil
}
func (c *Client) MustGetPackFromId(id int64) *kaas.KaasPack {
packs, _ := c.GetPacks()
for _, pack := range packs {
if pack.Id == id {
return pack
}
}
log.Fatalf("pack with id %d not found", id)
return nil
}
func (c *Client) GetVersions() ([]string, error) {
return []string{"1.29", "1.30", "1.31"}, nil
}
func (c *Client) GetKaas(publicCloudId int64, publicCloudProjectId int64, kaasId int64) (*kaas.Kaas, error) {
key := fmt.Sprintf("%d-%d-%d", publicCloudId, publicCloudProjectId, kaasId)
obj, err := getFromCache[*kaas.Kaas](key)
if err != nil {
return nil, err
}
obj.Status = "Active"
return obj, nil
}
func (client *Client) GetKubeconfig(publicCloudId int64, publicCloudProjectId int64, kaasId int64) (string, error) {
return genKubeconfig(), nil
}
func (c *Client) CreateKaas(input *kaas.Kaas) (int64, error) {
// Checks
if input.Project.PublicCloudId == 0 {
return 0, fmt.Errorf("kaas is missing public cloud project id")
}
if input.Region == "" {
return 0, fmt.Errorf("kaas is missing region")
}
if input.PackId == 0 {
return 0, fmt.Errorf("kaas is missing pack id")
}
var obj = kaas.Kaas{
Project: input.Project,
Region: input.Region,
KubernetesVersion: input.KubernetesVersion,
PackId: input.PackId,
Pack: c.MustGetPackFromId(input.PackId),
Name: input.Name,
}
obj.Id = genId()
return obj.Id, addToCache(&obj)
}
func (c *Client) UpdateKaas(input *kaas.Kaas) (bool, error) {
// Checks
if input.Project.PublicCloudId == 0 {
return false, fmt.Errorf("kaas is missing public cloud project id")
}
if input.Id == 0 {
return false, fmt.Errorf("kaas is missing kaas id")
}
if input.PackId == 0 {
return false, fmt.Errorf("kaas is missing pack id")
}
if input.Region != "" {
return false, fmt.Errorf("client cannot update region")
}
var obj = kaas.Kaas{
Id: input.Id,
Project: input.Project,
Name: input.Name,
Region: input.Region,
PackId: input.PackId,
Pack: c.MustGetPackFromId(input.PackId),
KubernetesVersion: input.KubernetesVersion,
}
return true, updateCache(&obj)
}
func (c *Client) DeleteKaas(publicCloudId int64, publicCloudProjectId int64, kaasId int64) (bool, error) {
var obj = kaas.Kaas{
Project: kaas.KaasProject{
PublicCloudId: publicCloudId,
ProjectId: publicCloudProjectId,
},
Id: kaasId,
}
return true, removeFromCache(&obj)
}
func (c *Client) GetInstancePool(publicCloudId int64, publicCloudProjectId int64, kaasId int64, instancePoolId int64) (*kaas.InstancePool, error) {
_, err := c.GetKaas(publicCloudId, publicCloudProjectId, kaasId)
if err != nil {
return nil, err
}
key := fmt.Sprintf("%d-%d", kaasId, instancePoolId)
obj, err := getFromCache[*kaas.InstancePool](key)
if err != nil {
return nil, err
}
obj.Status = "Active"
return obj, nil
}
func (c *Client) CreateInstancePool(publicCloudId int64, publicCloudProjectId int64, input *kaas.InstancePool) (int64, error) {
// Checks
if publicCloudId == 0 {
return 0, fmt.Errorf("instance pool is missing public cloud id")
}
if publicCloudProjectId == 0 {
return 0, fmt.Errorf("instance pool is missing public cloud project id")
}
if input.KaasId == 0 {
return 0, fmt.Errorf("instance pool is missing kaas id")
}
if !dnsRegexp.MatchString(input.Name) {
return 0, fmt.Errorf("instance pool name should be a dns name according to RFC 1123")
}
if input.FlavorName == "" {
return 0, fmt.Errorf("instance pool is missing flavor name")
}
if input.MinInstances < 0 {
return 0, fmt.Errorf("instance pool min instances should be greater than 0")
}
// if input.MaxInstances < 0 {
// return nil, fmt.Errorf("instance pool max instances should be greater than 0")
// }
// if input.MinInstances > input.MaxInstances {
// return nil, fmt.Errorf("instance pool min instance should be lesser than (or equal) max")
// }
if len(input.Labels) > 0 {
for key, label := range input.Labels {
keyLabel := key + ": " + label
if !kubeLabelRegexp.MatchString(keyLabel) {
return 0, fmt.Errorf("instance pool label should be a kubernetes label")
}
}
}
_, err := c.GetKaas(publicCloudId, publicCloudProjectId, input.KaasId)
if err != nil {
return 0, err
}
var obj = kaas.InstancePool{
Id: genId(),
KaasId: input.KaasId,
Name: input.Name,
FlavorName: input.FlavorName,
AvailabilityZone: input.AvailabilityZone,
MinInstances: input.MinInstances,
MaxInstances: input.MaxInstances,
TargetInstances: input.MinInstances,
AvailableInstances: input.MinInstances,
Labels: input.Labels,
}
return obj.Id, addToCache(&obj)
}
func (c *Client) UpdateInstancePool(publicCloudId int64, publicCloudProjectId int64, input *kaas.InstancePool) (bool, error) {
// Checks
if publicCloudId == 0 {
return false, fmt.Errorf("instance pool is missing public cloud id")
}
if publicCloudProjectId == 0 {
return false, fmt.Errorf("instance pool is missing public cloud project id")
}
if input.KaasId == 0 {
return false, fmt.Errorf("instance pool is missing kaas id")
}
if input.Id == 0 {
return false, fmt.Errorf("instance pool is instance pool id")
}
if !dnsRegexp.MatchString(input.Name) {
return false, fmt.Errorf("instance pool name should be a dns name according to RFC 1123")
}
if input.FlavorName == "" {
return false, fmt.Errorf("instance pool is missing flavor name")
}
if input.MinInstances < 0 {
return false, fmt.Errorf("instance pool min instances should be greater than 0")
}
// if input.MaxInstances < 0 {
// return nil, fmt.Errorf("instance pool max instances should be greater than 0")
// }
// if input.MinInstances > input.MaxInstances {
// return nil, fmt.Errorf("instance pool min instance should be lesser than (or equal) max")
// }
_, err := c.GetKaas(publicCloudId, publicCloudProjectId, input.KaasId)
if err != nil {
return false, err
}
_, err = c.GetInstancePool(publicCloudId, publicCloudProjectId, input.KaasId, input.Id)
if err != nil {
return false, err
}
var obj = kaas.InstancePool{
KaasId: input.KaasId,
Id: input.Id,
Name: input.Name,
FlavorName: input.FlavorName,
AvailabilityZone: input.AvailabilityZone,
MinInstances: input.MinInstances,
MaxInstances: input.MinInstances,
TargetInstances: input.MinInstances,
AvailableInstances: input.MinInstances,
}
return true, updateCache(&obj)
}
func (c *Client) DeleteInstancePool(publicCloudId int64, publicCloudProjectId int64, kaasId int64, instancePoolId int64) (bool, error) {
_, err := c.GetKaas(publicCloudId, publicCloudProjectId, kaasId)
if err != nil {
return false, err
}
var obj = kaas.InstancePool{
KaasId: kaasId,
Id: instancePoolId,
}
return true, removeFromCache(&obj)
}
func (c *Client) GetApiserverParams(publicCloudId int64, projectId int64, kaasId int64) (*kaas.Apiserver, error) {
return nil, nil
}
func (c *Client) PatchApiserverParams(input *kaas.Apiserver, publicCloudId int64, projectId int64, kaasId int64) (bool, error) {
return true, nil
}
package mock
import (
"bytes"
cryptorand "crypto/rand"
"encoding/base64"
"math/rand/v2"
)
func genId() int64 {
return rand.Int64()
}
func genKubeconfig() string {
var b = make([]byte, 1024)
_, err := cryptorand.Read(b)
if err != nil {
panic(err)
}
var out bytes.Buffer
enc := base64.NewEncoder(base64.StdEncoding, &out)
_, err = enc.Write(b)
if err != nil {
panic(err)
}
return out.String()
}
package kaas
import (
"encoding/json"
"fmt"
"maps"
)
type KaasPack struct {
Id int64 `json:"kaas_pack_id,omitempty"`
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
}
type Apiserver struct {
Params *ApiServerParams `json:"apiserver_params"`
NonSpecificApiServerParams map[string]string `json:"-"`
OidcCa *string `json:"oidc_ca"`
AuditLogWebhook *string `json:"audit-webhook-config"`
AuditLogPolicy *string `json:"audit-policy"`
}
var _ json.Marshaler = (*Apiserver)(nil)
// We can delete this once json v2 is out, so we can flatten everything without having to do this
func (a *Apiserver) MarshalJSON() ([]byte, error) {
paramBytes, err := json.Marshal(a.Params)
if err != nil {
paramBytes = []byte("{}")
}
paramsMap := make(map[string]string)
json.Unmarshal(paramBytes, ¶msMap)
nonSpecificMap := a.NonSpecificApiServerParams
res := make(map[string]string)
maps.Copy(res, paramsMap)
maps.Copy(res, nonSpecificMap)
result, err := json.Marshal(map[string]any{
"apiserver_params": res,
"oidc_ca": a.OidcCa,
"audit-policy": a.AuditLogPolicy,
"audit-webhook-config": a.AuditLogWebhook,
})
return result, err
}
type ApiServerParams struct {
IssuerUrl *string `json:"--oidc-issuer-url,omitempty"`
ClientId *string `json:"--oidc-client-id,omitempty"`
UsernameClaim *string `json:"--oidc-username-claim,omitempty"`
UsernamePrefix *string `json:"--oidc-username-prefix,omitempty"`
SigningAlgs *string `json:"--oidc-signing-algs,omitempty"`
GroupsClaim *string `json:"--oidc-groups-claim,omitempty"`
GroupsPrefix *string `json:"--oidc-groups-prefix,omitempty"`
RequiredClaim *string `json:"--oidc-required-claim,omitempty"`
}
type Kaas struct {
Name string `json:"name,omitempty"`
Id int64 `json:"kaas_id,omitempty"`
Project KaasProject `json:"project,omitzero"`
PackId int64 `json:"kaas_pack_id,omitempty"`
Pack *KaasPack `json:"pack,omitempty"`
Region string `json:"region,omitempty"`
KubernetesVersion string `json:"kubernetes_version,omitempty"`
Status string `json:"status,omitempty"`
}
func (kaas *Kaas) Key() string {
return fmt.Sprintf("%d-%d-%d", kaas.Project.PublicCloudId, kaas.Project.ProjectId, kaas.Id)
}
type KaasProject struct {
PublicCloudId int64 `json:"public_cloud_id,omitempty"`
ProjectId int64 `json:"id,omitempty"`
}
type InstancePool struct {
KaasId int64 `json:"kaas_id,omitempty"`
Id int64 `json:"instance_pool_id,omitempty"`
Name string `json:"name,omitempty"`
FlavorName string `json:"flavor,omitempty"`
AvailabilityZone string `json:"availability_zone,omitempty"`
MinInstances int64 `json:"minimum_instances,omitempty"`
MaxInstances int64 `json:"maximum_instances,omitempty"`
Status string `json:"status,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
TargetInstances int64 `json:"target_instances,omitempty"`
AvailableInstances int64 `json:"available_instances,omitempty"`
ErrorMessages []string `json:"error_messages,omitempty"`
}
func (instancePool *InstancePool) Key() string {
return fmt.Sprintf("%d-%d", instancePool.KaasId, instancePool.Id)
}
package provider
import (
"context"
"os"
"terraform-provider-infomaniak/internal/provider/registry"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/function"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/provider"
"github.com/hashicorp/terraform-plugin-framework/provider/schema"
"github.com/hashicorp/terraform-plugin-framework/providerserver"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-go/tfprotov6"
"github.com/hashicorp/terraform-plugin-log/tflog"
)
// Environment variables used by the provider
const (
INFOMANIAK_TOKEN = "INFOMANIAK_TOKEN"
INFOMANIAK_HOST = "INFOMANIAK_HOST"
)
// Ensure IkProvider satisfies various kaas interfaces.
var (
_ provider.Provider = &IkProvider{}
_ provider.ProviderWithFunctions = &IkProvider{}
DefaultHost = "https://api.infomaniak.com"
)
// IkProvider defines the kaas implementation.
type IkProvider struct {
// version is set to the kaas version on release, "dev" when the
// kaas is built and ran locally, and "test" when running acceptance
// testing.
version string
ik *IkProviderData
}
// IkProviderData defines the data associated with the provider
type IkProviderData struct {
Version types.String `tfsdk:"version"`
Data *IkProviderModel
}
type IkProviderModel struct {
Host types.String `tfsdk:"host"`
Token types.String `tfsdk:"token"`
}
func (p *IkProvider) Metadata(ctx context.Context, req provider.MetadataRequest, resp *provider.MetadataResponse) {
resp.TypeName = "infomaniak"
resp.Version = p.version
}
func (p *IkProvider) Schema(ctx context.Context, req provider.SchemaRequest, resp *provider.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: map[string]schema.Attribute{
"host": schema.StringAttribute{
Optional: true,
Description: "The base endpoint for Infomaniak's API (including scheme).",
MarkdownDescription: "The base endpoint for Infomaniak's API (including scheme).",
},
"token": schema.StringAttribute{
Required: os.Getenv(INFOMANIAK_TOKEN) == "",
Optional: os.Getenv(INFOMANIAK_TOKEN) != "",
Sensitive: true,
Description: "The token used for authenticating against Infomaniak's API.",
MarkdownDescription: "The token used for authenticating against Infomaniak's API.",
},
},
Description: "Infomaniak's provider.",
MarkdownDescription: "Infomaniak's provider.",
}
}
func (p *IkProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) {
tflog.Debug(ctx, "Provider configuration started")
if p.ik != nil {
tflog.Debug(ctx, "Provider already present, skipping configuration")
resp.DataSourceData = p.ik
resp.ResourceData = p.ik
return
}
var data IkProviderModel
resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
if data.Host.IsUnknown() {
resp.Diagnostics.AddAttributeError(
path.Root("host"),
"Unknown Infomaniak API Host",
"The provider cannot create the Infomaniak API client as there is an unknown configuration value for the Infomaniak API host. "+
"Either target apply the source of the value first, set the value statically in the configuration, or use the INFOMANIAK_HOST environment variable.",
)
}
if data.Token.IsUnknown() {
resp.Diagnostics.AddAttributeError(
path.Root("token"),
"Unknown Infomaniak API Token",
"The provider cannot create the Infomaniak API client as there is an unknown configuration value for the Infomaniak API username. "+
"Either target apply the source of the value first, set the value statically in the configuration, or use the INFOMANIAK_TOKEN environment variable.",
)
}
if resp.Diagnostics.HasError() {
return
}
host := os.Getenv(INFOMANIAK_HOST)
token := os.Getenv(INFOMANIAK_TOKEN)
if host == "" {
if !data.Host.IsNull() {
host = data.Host.ValueString()
} else {
host = DefaultHost
}
}
if token == "" && !data.Token.IsNull() {
token = data.Token.ValueString()
}
data.Host = types.StringValue(host)
data.Token = types.StringValue(token)
if token == "" {
resp.Diagnostics.AddAttributeError(
path.Root("username"),
"Missing Infomaniak API Username",
"The provider cannot create the Infomaniak API client as there is a missing or empty value for the Infomaniak API username. "+
"Set the username value in the configuration or use the INFOMANIAK_TOKEN environment variable. "+
"If either is already set, ensure the value is not empty.",
)
}
if resp.Diagnostics.HasError() {
return
}
p.ik = &IkProviderData{
Version: types.StringValue(p.version),
Data: &data,
}
resp.DataSourceData = p.ik
resp.ResourceData = p.ik
}
func (p *IkProvider) Resources(ctx context.Context) []func() resource.Resource {
return registry.GetResources()
}
func (p *IkProvider) DataSources(ctx context.Context) []func() datasource.DataSource {
return registry.GetDataSources()
}
func (p *IkProvider) Functions(ctx context.Context) []func() function.Function {
return nil
}
func New(version string) func() provider.Provider {
return func() provider.Provider {
return &IkProvider{
version: version,
}
}
}
func ProtoV6ProviderFactories() map[string]func() (tfprotov6.ProviderServer, error) {
return map[string]func() (tfprotov6.ProviderServer, error){
"infomaniak": providerserver.NewProtocol6WithError(New("test")()),
}
}
package registry
import (
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/resource"
)
var resources []func() resource.Resource
var datasources []func() datasource.DataSource
func RegisterResource(F func() resource.Resource) {
resources = append(resources, F)
}
func RegisterDataSource(F func() datasource.DataSource) {
datasources = append(datasources, F)
}
func GetResources() []func() resource.Resource {
return resources
}
func GetDataSources() []func() datasource.DataSource {
return datasources
}
package provider
import (
"fmt"
"os"
"terraform-provider-infomaniak/internal/apis"
)
func GetApiClient(providerData any) (*apis.Client, error) {
data, ok := providerData.(*IkProviderData)
if !ok {
return nil, fmt.Errorf("expected *provider.IkProviderData, got: %T", providerData)
}
mocked := os.Getenv("TF_TESTS_MOCKED")
if data.Version.ValueString() == "dev" && mocked == "true" {
return apis.NewMockClient(), nil
}
client := apis.NewClient(data.Data.Host.ValueString(), data.Data.Token.ValueString(), data.Version.ValueString())
return client, nil
}
package dbaas
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/apis/dbaas"
"terraform-provider-infomaniak/internal/provider"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
var (
_ resource.Resource = &dbaasBackupScheduleResource{}
_ resource.ResourceWithConfigure = &dbaasBackupScheduleResource{}
_ resource.ResourceWithImportState = &dbaasBackupScheduleResource{}
)
func NewDBaasBackupScheduleResource() resource.Resource {
return &dbaasBackupScheduleResource{}
}
type dbaasBackupScheduleResource struct {
client *apis.Client
}
type DBaasBackupScheduleModel struct {
PublicCloudId types.Int64 `tfsdk:"public_cloud_id"`
PublicCloudProjectId types.Int64 `tfsdk:"public_cloud_project_id"`
DbaasId types.Int64 `tfsdk:"dbaas_id"`
Id types.Int64 `tfsdk:"id"`
Name types.String `tfsdk:"name"`
ScheduledAt types.String `tfsdk:"scheduled_at"`
Retention types.Int64 `tfsdk:"retention"`
IsPitrEnabled types.Bool `tfsdk:"is_pitr_enabled"`
}
func (model *DBaasBackupScheduleModel) fill(backupSchedule *dbaas.DBaasBackupSchedule) {
model.ScheduledAt = types.StringPointerValue(backupSchedule.ScheduledAt)
model.Retention = types.Int64PointerValue(backupSchedule.Retention)
model.IsPitrEnabled = types.BoolPointerValue(backupSchedule.IsPitrEnabled)
model.Name = types.StringPointerValue(backupSchedule.Name)
model.Id = types.Int64PointerValue(backupSchedule.Id)
}
func (r *dbaasBackupScheduleResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_dbaas_backup_schedule"
}
// Configure adds the provider configured client to the data source.
func (r *dbaasBackupScheduleResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
err.Error(),
)
return
}
r.client = client
}
func (r *dbaasBackupScheduleResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = getDbaasBackupScheduleResourceSchema()
}
func (r *dbaasBackupScheduleResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
var data DBaasBackupScheduleModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
input := &dbaas.DBaasBackupSchedule{
ScheduledAt: data.ScheduledAt.ValueStringPointer(),
Retention: data.Retention.ValueInt64Pointer(),
IsPitrEnabled: data.IsPitrEnabled.ValueBoolPointer(),
}
scheduleId, err := r.client.DBaas.CreateDBaasScheduleBackup(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.DbaasId.ValueInt64(),
input,
)
if err != nil {
resp.Diagnostics.AddError(
"Error when creating Backup Schedule",
err.Error(),
)
return
}
data.Id = types.Int64Value(scheduleId)
scheduleBackup, err := r.client.DBaas.GetDBaasScheduleBackup(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.DbaasId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when getting Backup Schedule",
err.Error(),
)
return
}
data.fill(scheduleBackup)
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *dbaasBackupScheduleResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
var data DBaasBackupScheduleModel
var state DBaasBackupScheduleModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
input := &dbaas.DBaasBackupSchedule{
ScheduledAt: data.ScheduledAt.ValueStringPointer(),
Retention: data.Retention.ValueInt64Pointer(),
IsPitrEnabled: data.IsPitrEnabled.ValueBoolPointer(),
}
ok, err := r.client.DBaas.UpdateDBaasScheduleBackup(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.DbaasId.ValueInt64(),
data.Id.ValueInt64(),
input,
)
if !ok && err == nil {
resp.Diagnostics.AddError("Unknown Backup Schedule error", "")
}
if err != nil {
resp.Diagnostics.AddError(
"Error when updating Backup Schedule",
err.Error(),
)
return
}
scheduleBackup, err := r.client.DBaas.GetDBaasScheduleBackup(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.DbaasId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when getting Backup Schedule",
err.Error(),
)
return
}
state.fill(scheduleBackup)
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
}
func (r *dbaasBackupScheduleResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
var state DBaasBackupScheduleModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
scheduleBackup, err := r.client.DBaas.GetDBaasScheduleBackup(
state.PublicCloudId.ValueInt64(),
state.PublicCloudProjectId.ValueInt64(),
state.DbaasId.ValueInt64(),
state.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when getting Backup Schedule",
err.Error(),
)
return
}
state.fill(scheduleBackup)
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
}
func (r *dbaasBackupScheduleResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
var state DBaasBackupScheduleModel
// Read Terraform prior state data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
// DeleteDBaas API call logic
_, err := r.client.DBaas.DeleteDBaasScheduleBackup(
state.PublicCloudId.ValueInt64(),
state.PublicCloudProjectId.ValueInt64(),
state.DbaasId.ValueInt64(),
state.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when deleting DBaaS backup schedule",
err.Error(),
)
return
}
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
}
func (r *dbaasBackupScheduleResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, ",")
if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: public_cloud_id,public_cloud_project_id,id. Got: %q", req.ID),
)
return
}
var errorList error
publicCloudId, err := strconv.ParseInt(idParts[0], 10, 64)
errorList = errors.Join(errorList, err)
publicCloudProjectId, err := strconv.ParseInt(idParts[1], 10, 64)
errorList = errors.Join(errorList, err)
dbaasId, err := strconv.ParseInt(idParts[2], 10, 64)
errorList = errors.Join(errorList, err)
id, err := strconv.ParseInt(idParts[3], 10, 64)
errorList = errors.Join(errorList, err)
if errorList != nil {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: public_cloud_id,public_cloud_project_id,dbaas_id,id. Got: %q", req.ID),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("public_cloud_id"), publicCloudId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("public_cloud_project_id"), publicCloudProjectId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("dbaas_id"), dbaasId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), id)...)
}
package dbaas
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
)
func getDbaasBackupScheduleResourceSchema() schema.Schema {
return schema.Schema{
Attributes: map[string]schema.Attribute{
"public_cloud_id": schema.Int64Attribute{
Required: true,
MarkdownDescription: "The id of the public cloud",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"public_cloud_project_id": schema.Int64Attribute{
Required: true,
MarkdownDescription: "The id of the public cloud project",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"dbaas_id": schema.Int64Attribute{
Required: true,
MarkdownDescription: "The id of the dbaas",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"id": schema.Int64Attribute{
Computed: true,
MarkdownDescription: "BackupSchedule identifier",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
"name": schema.StringAttribute{
Computed: true,
MarkdownDescription: "Name of the backup",
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"scheduled_at": schema.StringAttribute{
Required: true,
MarkdownDescription: "Use the given time as the time to create the scheduled backup (24 hour, UTC)",
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"retention": schema.Int64Attribute{
Required: true,
MarkdownDescription: "The number of backups to keep for the schedule",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
"is_pitr_enabled": schema.BoolAttribute{
Required: true,
MarkdownDescription: "Enable/Disable point in time recovery",
PlanModifiers: []planmodifier.Bool{
boolplanmodifier.UseStateForUnknown(),
},
},
},
}
}
package dbaas
import (
"context"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/provider"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dbaasConstsDataSource{}
_ datasource.DataSourceWithConfigure = &dbaasConstsDataSource{}
)
type dbaasConstsDataSource struct {
client *apis.Client
}
// NewDBaasDataSource is a helper function to simplify the provider implementation.
func NewDBaasConstsDataSource() datasource.DataSource {
return &dbaasConstsDataSource{}
}
// Configure adds the provider configured client to the data source.
func (d *dbaasConstsDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
err.Error(),
)
return
}
d.client = client
}
type DBaasConstsDataModel struct {
Regions types.List `tfsdk:"regions"`
Types []DBaasTypes `tfsdk:"types"`
}
type DBaasTypes struct {
Name types.String `tfsdk:"name"`
Versions types.List `tfsdk:"versions"`
}
// Schema defines the schema for the data source.
func (d *dbaasConstsDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = getDbaasConstantsDataSourceSchema()
}
// Read refreshes the Terraform state with the latest data.
func (d *dbaasConstsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
var data DBaasConstsDataModel
resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
regions, err := d.client.DBaas.GetDbaasRegions()
if err != nil {
resp.Diagnostics.AddError(
"Unable to find DBaaS regions",
err.Error(),
)
return
}
tfregions, diags := types.ListValueFrom(ctx, types.StringType, regions)
resp.Diagnostics.Append(diags...)
data.Regions = tfregions
dbaasTypes, err := d.client.DBaas.GetDbaasTypes()
if err != nil {
resp.Diagnostics.AddError(
"Unable to find DBaaS types",
err.Error(),
)
return
}
var dbaasPacks []DBaasTypes
for _, dbType := range dbaasTypes {
versioned, diags := types.ListValueFrom(ctx, types.StringType, dbType.Versions)
resp.Diagnostics.Append(diags...)
dbaasPacks = append(dbaasPacks, DBaasTypes{
Name: types.StringValue(dbType.Name),
Versions: versioned,
})
}
data.Types = dbaasPacks
// Set state
diags = resp.State.Set(ctx, &data)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
// Metadata returns the data source type name.
func (d *dbaasConstsDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_dbaas_constants"
}
package dbaas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func getDbaasConstantsDataSourceSchema() schema.Schema {
return schema.Schema{
Attributes: map[string]schema.Attribute{
"types": schema.ListNestedAttribute{
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"name": schema.StringAttribute{
Computed: true,
},
"versions": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
},
},
Computed: true,
},
"regions": schema.ListAttribute{
ElementType: types.StringType,
Computed: true,
},
},
}
}
package dbaas
import (
"context"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/apis/dbaas"
"terraform-provider-infomaniak/internal/provider"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dbaasDataSource{}
_ datasource.DataSourceWithConfigure = &dbaasDataSource{}
)
type dbaasDataSource struct {
client *apis.Client
}
// NewDBaasDataSource is a helper function to simplify the provider implementation.
func NewDBaasDataSource() datasource.DataSource {
return &dbaasDataSource{}
}
// Configure adds the provider configured client to the data source.
func (d *dbaasDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
err.Error(),
)
return
}
d.client = client
}
type DBaasDataModel struct {
PublicCloudId types.Int64 `tfsdk:"public_cloud_id"`
PublicCloudProjectId types.Int64 `tfsdk:"public_cloud_project_id"`
Id types.Int64 `tfsdk:"id"`
KubernetesIdentifier types.String `tfsdk:"kube_identifier"`
Name types.String `tfsdk:"name"`
PackName types.String `tfsdk:"pack_name"`
Region types.String `tfsdk:"region"`
Type types.String `tfsdk:"type"`
Version types.String `tfsdk:"version"`
Host types.String `tfsdk:"host"`
Port types.String `tfsdk:"port"`
User types.String `tfsdk:"user"`
Ca types.String `tfsdk:"ca"`
AllowedCIDRs types.List `tfsdk:"allowed_cidrs"`
}
func (data *DBaasDataModel) fill(obj *dbaas.DBaaS) {
data.Region = types.StringValue(obj.Region)
data.Name = types.StringValue(obj.Name)
data.PackName = types.StringValue(obj.Pack.Name)
data.Region = types.StringValue(obj.Region)
data.Type = types.StringValue(obj.Type)
data.Version = types.StringValue(obj.Version)
if obj.Connection != nil {
data.Host = types.StringValue(obj.Connection.Host)
data.Port = types.StringValue(obj.Connection.Port)
data.User = types.StringValue(obj.Connection.User)
data.Ca = types.StringValue(obj.Connection.Ca)
}
data.KubernetesIdentifier = types.StringValue(obj.KubernetesIdentifier)
}
// Schema defines the schema for the data source.
func (d *dbaasDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = getDbaasDataSourceSchema()
}
// Read refreshes the Terraform state with the latest data.
func (d *dbaasDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
var data DBaasDataModel
resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
obj, err := d.client.DBaas.GetDBaaS(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Unable to find DBaaS",
err.Error(),
)
return
}
data.fill(obj)
filteredIps, err := d.client.DBaas.GetIpFilters(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when reading DBaaS filtered IPs",
err.Error(),
)
return
}
listFilteredIps, diags := types.ListValueFrom(ctx, types.StringType, filteredIps)
data.AllowedCIDRs = listFilteredIps
resp.Diagnostics.Append(diags...)
// Set state
diags = resp.State.Set(ctx, &data)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
// Metadata returns the data source type name.
func (d *dbaasDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_dbaas"
}
package dbaas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func getDbaasDataSourceSchema() schema.Schema {
return schema.Schema{
Attributes: map[string]schema.Attribute{
"public_cloud_id": schema.Int64Attribute{
Required: true,
MarkdownDescription: "The id of the public cloud where DBaaS is installed",
},
"public_cloud_project_id": schema.Int64Attribute{
Required: true,
MarkdownDescription: "The id of the public cloud project where DBaaS is installed",
},
"id": schema.Int64Attribute{
Required: true,
MarkdownDescription: "The id of this DBaaS",
},
"name": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The name of the DBaaS project",
},
"pack_name": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The name of the pack associated to the DBaaS project",
},
"region": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The region where the DBaaS project resides in.",
},
"type": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The type of the database associated with the DBaaS project",
},
"version": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The version of the database associated with the DBaaS project",
},
"host": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The host to access this database.",
},
"port": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The port to access this database.",
},
"user": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The username to access this database.",
},
"ca": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The Database CA Certificate",
},
"allowed_cidrs": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
MarkdownDescription: "Allowed to query Database IP whitelist",
},
"kube_identifier": schema.StringAttribute{
Computed: true,
MarkdownDescription: "DbaaS kubernetes name",
},
},
MarkdownDescription: "The dbaas data source allows the user to manage a dbaas project",
}
}
package dbaas
import (
"context"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/apis/dbaas"
"terraform-provider-infomaniak/internal/provider"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dbaasPackDataSource{}
_ datasource.DataSourceWithConfigure = &dbaasPackDataSource{}
)
type dbaasPackDataSource struct {
client *apis.Client
}
// NewDBaasDataSource is a helper function to simplify the provider implementation.
func NewDBaasPackDataSource() datasource.DataSource {
return &dbaasPackDataSource{}
}
// Configure adds the provider configured client to the data source.
func (d *dbaasPackDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
err.Error(),
)
return
}
d.client = client
}
type DBaasPackDataModel struct {
Type types.String `tfsdk:"type"`
ID types.Int64 `tfsdk:"id"`
Group types.String `tfsdk:"group"`
Name types.String `tfsdk:"name"`
Instances types.Int64 `tfsdk:"instances"`
CPU types.Int64 `tfsdk:"cpu"`
RAM types.Int64 `tfsdk:"ram"`
Storage types.Int64 `tfsdk:"storage"`
Rates *RatesModel `tfsdk:"rates"`
}
type RatesModel struct {
CHF *PricingModel `tfsdk:"chf"`
EUR *PricingModel `tfsdk:"eur"`
}
type PricingModel struct {
HourlyExcludingTaxes types.Float64 `tfsdk:"hour_excl_tax"`
HourlyIncludingTaxes types.Float64 `tfsdk:"hour_incl_tax"`
}
// Schema defines the schema for the data source.
func (d *dbaasPackDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = getDbaasPacksDataSourceSchema()
}
// Read refreshes the Terraform state with the latest data.
func (d *dbaasPackDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
var data DBaasPackDataModel
resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
pack, err := d.client.DBaas.GetDbaasPack(dbaas.PackFilter{
DbType: data.Type.ValueString(),
Group: data.Group.ValueStringPointer(),
Name: data.Name.ValueStringPointer(),
Instances: data.Instances.ValueInt64Pointer(),
Cpu: data.CPU.ValueInt64Pointer(),
Ram: data.RAM.ValueInt64Pointer(),
Storage: data.Storage.ValueInt64Pointer(),
})
if err != nil {
resp.Diagnostics.AddError(
"Unable to find DBaaS packs",
err.Error(),
)
return
}
data.Type = types.StringValue(pack.Type)
data.Group = types.StringValue(pack.Group)
data.ID = types.Int64Value(pack.ID)
data.Name = types.StringValue(pack.Name)
data.Instances = types.Int64Value(pack.Instances)
data.CPU = types.Int64Value(pack.CPU)
data.RAM = types.Int64Value(pack.RAM)
data.Storage = types.Int64Value(pack.Storage)
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
}
// Metadata returns the data source type name.
func (d *dbaasPackDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_dbaas_pack"
}
package dbaas
import "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
func getDbaasPacksDataSourceSchema() schema.Schema {
pricingObject := schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"hour_excl_tax": schema.Float64Attribute{
Computed: true,
},
"hour_incl_tax": schema.Float64Attribute{
Computed: true,
},
},
}
return schema.Schema{
Attributes: map[string]schema.Attribute{
"type": schema.StringAttribute{
Required: true,
},
"id": schema.Int64Attribute{
Computed: true,
},
"group": schema.StringAttribute{
Optional: true,
},
"name": schema.StringAttribute{
Optional: true,
},
"instances": schema.Int64Attribute{
Optional: true,
},
"cpu": schema.Int64Attribute{
Optional: true,
},
"ram": schema.Int64Attribute{
Optional: true,
},
"storage": schema.Int64Attribute{
Optional: true,
},
"rates": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"chf": pricingObject,
"eur": pricingObject,
},
},
},
}
}
package dbaas
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/apis/dbaas"
"terraform-provider-infomaniak/internal/provider"
"time"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
var (
_ resource.Resource = &dbaasResource{}
_ resource.ResourceWithConfigure = &dbaasResource{}
_ resource.ResourceWithImportState = &dbaasResource{}
)
func NewDBaasResource() resource.Resource {
return &dbaasResource{}
}
type dbaasResource struct {
client *apis.Client
}
type DBaasModel struct {
PublicCloudId types.Int64 `tfsdk:"public_cloud_id"`
PublicCloudProjectId types.Int64 `tfsdk:"public_cloud_project_id"`
Id types.Int64 `tfsdk:"id"`
KubernetesIdentifier types.String `tfsdk:"kube_identifier"`
Name types.String `tfsdk:"name"`
PackName types.String `tfsdk:"pack_name"`
Region types.String `tfsdk:"region"`
Type types.String `tfsdk:"type"`
Version types.String `tfsdk:"version"`
Host types.String `tfsdk:"host"`
Port types.String `tfsdk:"port"`
User types.String `tfsdk:"user"`
Password types.String `tfsdk:"password"`
Ca types.String `tfsdk:"ca"`
AllowedCIDRs types.List `tfsdk:"allowed_cidrs"`
}
func (r *dbaasResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_dbaas"
}
// Configure adds the provider configured client to the data source.
func (r *dbaasResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
err.Error(),
)
return
}
r.client = client
}
func (r *dbaasResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = getDbaasResourceSchema()
}
func (r *dbaasResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
var data DBaasModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
chosenPack, err := r.getPackId(data, &resp.Diagnostics)
if err != nil {
return
}
input := &dbaas.DBaaS{
Project: dbaas.DBaaSProject{
PublicCloudId: data.PublicCloudId.ValueInt64(),
ProjectId: data.PublicCloudProjectId.ValueInt64(),
},
Region: data.Region.ValueString(),
Version: data.Version.ValueString(),
Type: data.Type.ValueString(),
Name: data.Name.ValueString(),
PackId: chosenPack.Id,
}
// CreateDBaas API call logic
createInfos, err := r.client.DBaas.CreateDBaaS(input)
if err != nil {
resp.Diagnostics.AddError(
"Error when creating DBaaS",
err.Error(),
)
return
}
data.Id = types.Int64Value(createInfos.Id)
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
dbaasObject, err := r.waitUntilActive(ctx, input, createInfos.Id)
if err != nil {
resp.Diagnostics.AddError(
"Error when waiting for DBaaS to be Active",
err.Error(),
)
return
}
if dbaasObject == nil {
return
}
cidrs := make([]string, 0, len(data.AllowedCIDRs.Elements()))
resp.Diagnostics.Append(data.AllowedCIDRs.ElementsAs(ctx, &cidrs, false)...)
allowedCIDRs := dbaas.AllowedCIDRs{
IpFilters: cidrs,
}
ok, err := r.client.DBaas.PatchIpFilters(
input.Project.PublicCloudId,
input.Project.ProjectId,
dbaasObject.Id,
allowedCIDRs,
)
if !ok {
resp.Diagnostics.AddError("Unknown IP filter error", "")
return
}
if err != nil {
resp.Diagnostics.AddError(
"Error when updating IP Filters",
err.Error(),
)
return
}
data.fill(dbaasObject)
// Save data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *dbaasResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
var state DBaasModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
// Read API call logic
dbaasObject, err := r.client.DBaas.GetDBaaS(
state.PublicCloudId.ValueInt64(),
state.PublicCloudProjectId.ValueInt64(),
state.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when reading DBaaS",
err.Error(),
)
return
}
filteredIps, err := r.client.DBaas.GetIpFilters(
state.PublicCloudId.ValueInt64(),
state.PublicCloudProjectId.ValueInt64(),
state.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when reading DBaaS filtered IPs",
err.Error(),
)
return
}
listFilteredIps, diags := types.ListValueFrom(ctx, types.StringType, filteredIps)
state.AllowedCIDRs = listFilteredIps
resp.Diagnostics.Append(diags...)
state.fill(dbaasObject)
// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
}
func (r *dbaasResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
var state DBaasModel
var data DBaasModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
chosenPackState, err := r.getPackId(state, &resp.Diagnostics)
if err != nil {
return
}
// Update API call logic
input := &dbaas.DBaaS{
Project: dbaas.DBaaSProject{
PublicCloudId: data.PublicCloudId.ValueInt64(),
ProjectId: data.PublicCloudProjectId.ValueInt64(),
},
Id: state.Id.ValueInt64(),
Name: data.Name.ValueString(),
PackId: chosenPackState.Id,
Region: state.Region.ValueString(),
Version: state.Version.ValueString(),
Type: state.Type.ValueString(),
}
_, err = r.client.DBaas.UpdateDBaaS(input)
if err != nil {
resp.Diagnostics.AddError(
"Error when updating DBaaS",
err.Error(),
)
return
}
dbaasObject, err := r.waitUntilActive(ctx, input, input.Id)
if err != nil {
resp.Diagnostics.AddError(
"Error when getting DBaaS",
err.Error(),
)
return
}
if dbaasObject == nil {
return
}
cidrs := make([]string, 0, len(data.AllowedCIDRs.Elements()))
resp.Diagnostics.Append(data.AllowedCIDRs.ElementsAs(ctx, &cidrs, false)...)
allowedCIDRs := dbaas.AllowedCIDRs{
IpFilters: cidrs,
}
ok, err := r.client.DBaas.PatchIpFilters(
state.PublicCloudId.ValueInt64(),
state.PublicCloudProjectId.ValueInt64(),
state.Id.ValueInt64(),
allowedCIDRs,
)
if !ok && err == nil {
resp.Diagnostics.AddError("Unknown IP filter error", "")
}
if err != nil {
resp.Diagnostics.AddError(
"Error when updating IP Filters",
err.Error(),
)
return
}
state.AllowedCIDRs = data.AllowedCIDRs
state.fill(dbaasObject)
// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
}
func (r *dbaasResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
var data DBaasModel
// Read Terraform prior state data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
// DeleteDBaas API call logic
_, err := r.client.DBaas.DeleteDBaaS(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when deleting DBaaS",
err.Error(),
)
return
}
}
func (r *dbaasResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, ",")
if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: public_cloud_id,public_cloud_project_id,id. Got: %q", req.ID),
)
return
}
var errorList error
publicCloudId, err := strconv.ParseInt(idParts[0], 10, 64)
errorList = errors.Join(errorList, err)
publicCloudProjectId, err := strconv.ParseInt(idParts[1], 10, 64)
errorList = errors.Join(errorList, err)
dbaasId, err := strconv.ParseInt(idParts[2], 10, 64)
errorList = errors.Join(errorList, err)
if errorList != nil {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: public_cloud_id,public_cloud_project_id,id. Got: %q", req.ID),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("public_cloud_id"), publicCloudId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("public_cloud_project_id"), publicCloudProjectId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), dbaasId)...)
}
func (r *dbaasResource) getPackId(data DBaasModel, diagnostic *diag.Diagnostics) (*dbaas.DBaaSPack, error) {
pack, err := r.client.DBaas.FindPack(data.Type.ValueString(), data.PackName.ValueString())
if err != nil {
diagnostic.AddError(
"Could not find DBaaS Pack",
err.Error(),
)
return nil, err
}
return pack, nil
}
func (model *DBaasModel) fill(dbaas *dbaas.DBaaS) {
model.Id = types.Int64Value(dbaas.Id)
model.KubernetesIdentifier = types.StringValue(dbaas.KubernetesIdentifier)
model.Region = types.StringValue(dbaas.Region)
model.Type = types.StringValue(dbaas.Type)
model.Version = types.StringValue(dbaas.Version)
model.Name = types.StringValue(dbaas.Name)
model.PackName = types.StringValue(dbaas.Pack.Name)
if dbaas.Connection != nil {
model.Host = types.StringValue(dbaas.Connection.Host)
model.Port = types.StringValue(dbaas.Connection.Port)
model.User = types.StringValue(dbaas.Connection.User)
model.Password = types.StringValue(dbaas.Connection.Password)
model.Ca = types.StringValue(dbaas.Connection.Ca)
}
}
func (r *dbaasResource) waitUntilActive(ctx context.Context, dbaas *dbaas.DBaaS, id int64) (*dbaas.DBaaS, error) {
t := time.NewTicker(5 * time.Second)
for {
select {
case <-ctx.Done():
return nil, nil
case <-t.C:
found, err := r.client.DBaas.GetDBaaS(dbaas.Project.PublicCloudId, dbaas.Project.ProjectId, id)
if err != nil {
return nil, err
}
if ctx.Err() != nil {
return nil, nil
}
if found.Status == "ready" {
return found, nil
}
}
}
}
package dbaas
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func getDbaasResourceSchema() schema.Schema {
return schema.Schema{
Attributes: map[string]schema.Attribute{
"public_cloud_id": schema.Int64Attribute{
Required: true,
MarkdownDescription: "The id of the public cloud",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"public_cloud_project_id": schema.Int64Attribute{
Required: true,
MarkdownDescription: "The id of the public cloud project",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"id": schema.Int64Attribute{
Computed: true,
MarkdownDescription: "The unique identifier for the DBaaS instance.",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
"pack_name": schema.StringAttribute{
Required: true,
MarkdownDescription: "The name of the pack associated to the DBaaS project",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
"type": schema.StringAttribute{
Required: true,
MarkdownDescription: "The type of database associated with the DBaaS being installed",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
"version": schema.StringAttribute{
Required: true,
MarkdownDescription: "The version of database associated with the DBaaS being installed",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
"name": schema.StringAttribute{
Required: true,
MarkdownDescription: "The name of the DBaaS project",
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"region": schema.StringAttribute{
Required: true,
MarkdownDescription: "The region where the DBaaS will reside.",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
},
"host": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The host to access this database.",
},
"port": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The port to access this database.",
},
"user": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The username to access this database.",
},
"password": schema.StringAttribute{
Computed: true,
Sensitive: true,
MarkdownDescription: "The password to access this database.",
},
"ca": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The Database CA Certificate",
},
"allowed_cidrs": schema.ListAttribute{
Required: true,
ElementType: types.StringType,
MarkdownDescription: "Allowed to query Database IP whitelist",
PlanModifiers: []planmodifier.List{
listplanmodifier.UseStateForUnknown(),
},
},
"kube_identifier": schema.StringAttribute{
Computed: true,
MarkdownDescription: "DbaaS kubernetes name",
},
},
MarkdownDescription: "The dbaas resource allows the user to manage a dbaas project",
}
}
package dbaas
import "terraform-provider-infomaniak/internal/provider/registry"
func Register() {
registry.RegisterResource(NewDBaasResource)
registry.RegisterResource(NewDBaasBackupScheduleResource)
registry.RegisterDataSource(NewDBaasDataSource)
registry.RegisterDataSource(NewDBaasPackDataSource)
registry.RegisterDataSource(NewDBaasConstsDataSource)
}
package dbaas
import (
"errors"
"fmt"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-framework/resource"
)
type ImportIds struct {
PublicCloudId int64
PublicCloudProjectId int64
DbaasId int64
Id string
}
func parseBackupRestoreImport(req resource.ImportStateRequest) (*ImportIds, error) {
idParts := strings.Split(req.ID, ",")
if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" {
return nil, fmt.Errorf("expected import identifier with format: public_cloud_id,public_cloud_project_id,dbaas_id,id. got: %q", req.ID)
}
var errorList error
publicCloudId, err := strconv.ParseInt(idParts[0], 10, 64)
errorList = errors.Join(errorList, err)
publicCloudProjectId, err := strconv.ParseInt(idParts[1], 10, 64)
errorList = errors.Join(errorList, err)
dbaasId, err := strconv.ParseInt(idParts[2], 10, 64)
errorList = errors.Join(errorList, err)
id := idParts[3]
if errorList != nil {
return nil, fmt.Errorf("expected import identifier with format: public_cloud_id,public_cloud_project_id,dbaas_id,id. got: %q", req.ID)
}
return &ImportIds{
PublicCloudId: publicCloudId,
PublicCloudProjectId: publicCloudProjectId,
DbaasId: dbaasId,
Id: id,
}, nil
}
package domain
import (
"fmt"
"net"
"strings"
"terraform-provider-infomaniak/internal/apis/domain"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/miekg/dns"
)
func (model *RecordModel) ComputeRawTarget() string {
// don't do anything if it's already set
if !model.Target.IsUnknown() && !model.Target.IsNull() {
return model.Target.ValueString()
}
var record dns.RR
switch model.Type.ValueString() {
case domain.RecordA:
record = &dns.A{
A: net.ParseIP(model.Data.IP.ValueString()),
}
case domain.RecordAAAA:
record = &dns.AAAA{
AAAA: net.ParseIP(model.Data.IP.ValueString()),
}
case domain.RecordCAA:
record = &dns.CAA{
Flag: uint8(model.Data.Flags.ValueInt64()),
Tag: model.Data.Tag.ValueString(),
Value: model.Data.Value.ValueString(),
}
case domain.RecordCNAME:
record = &dns.CNAME{
Target: dns.Fqdn(model.Data.Target.ValueString()),
}
case domain.RecordDNAME:
record = &dns.DNAME{
Target: dns.Fqdn(model.Data.Target.ValueString()),
}
case domain.RecordDS:
record = &dns.DS{
KeyTag: uint16(model.Data.KeyTag.ValueInt64()),
Algorithm: uint8(model.Data.Algorithm.ValueInt64()),
DigestType: uint8(model.Data.DigestType.ValueInt64()),
Digest: model.Data.Digest.ValueString(),
}
case domain.RecordMX:
record = &dns.MX{
Preference: uint16(model.Data.Priority.ValueInt64()),
Mx: dns.Fqdn(model.Data.Target.ValueString()),
}
case domain.RecordNS:
record = &dns.NS{
Ns: dns.Fqdn(model.Data.Target.ValueString()),
}
case domain.RecordSMIMEA:
record = &dns.SMIMEA{
Usage: uint8(model.Data.Priority.ValueInt64()),
Selector: uint8(model.Data.Selector.ValueInt64()),
MatchingType: uint8(model.Data.MatchingType.ValueInt64()),
Certificate: model.Data.CertAssocData.ValueString(),
}
case domain.RecordSRV:
record = &dns.SRV{
Priority: uint16(model.Data.Priority.ValueInt64()),
Weight: uint16(model.Data.Weight.ValueInt64()),
Port: uint16(model.Data.Port.ValueInt64()),
Target: dns.Fqdn(model.Data.Target.ValueString()),
}
case domain.RecordSSHFP:
record = &dns.SSHFP{
Algorithm: uint8(model.Data.FingerprintAlgorithm.ValueInt64()),
Type: uint8(model.Data.FingerprintType.ValueInt64()),
FingerPrint: model.Data.Fingerprint.ValueString(),
}
case domain.RecordTLSA:
record = &dns.TLSA{
Usage: uint8(model.Data.Priority.ValueInt64()),
Selector: uint8(model.Data.Selector.ValueInt64()),
MatchingType: uint8(model.Data.MatchingType.ValueInt64()),
Certificate: model.Data.CertAssocData.ValueString(),
}
case domain.RecordTXT:
record = &dns.TXT{
Txt: []string{model.Data.Value.ValueString()},
}
}
return strings.TrimPrefix(record.String(), record.Header().String())
}
func (model *RecordModel) ParseRawTarget(raw string) error {
// We need to prepend a fake name to make dns.NewRR happy
full := fmt.Sprintf("example.com. 3600 IN %s %s", model.Type.ValueString(), raw)
rr, err := dns.NewRR(full)
if err != nil {
return fmt.Errorf("failed to parse DNS record: %w", err)
}
switch v := rr.(type) {
case *dns.A:
model.Data.IP = types.StringValue(v.A.String())
case *dns.AAAA:
model.Data.IP = types.StringValue(v.AAAA.String())
case *dns.CAA:
model.Data.Flags = types.Int64Value(int64(v.Flag))
model.Data.Tag = types.StringValue(v.Tag)
model.Data.Value = types.StringValue(v.Value)
case *dns.CNAME:
model.Data.Target = types.StringValue(strings.TrimSuffix(v.Target, "."))
case *dns.DNAME:
model.Data.Target = types.StringValue(strings.TrimSuffix(v.Target, "."))
case *dns.DS:
model.Data.KeyTag = types.Int64Value(int64(v.KeyTag))
model.Data.Algorithm = types.Int64Value(int64(v.Algorithm))
model.Data.DigestType = types.Int64Value(int64(v.DigestType))
model.Data.Digest = types.StringValue(v.Digest)
case *dns.MX:
model.Data.Priority = types.Int64Value(int64(v.Preference))
model.Data.Target = types.StringValue(strings.TrimSuffix(v.Mx, "."))
case *dns.NS:
model.Data.Target = types.StringValue(strings.TrimSuffix(v.Ns, "."))
case *dns.PTR:
model.Data.Target = types.StringValue(strings.TrimSuffix(v.Ptr, "."))
case *dns.SMIMEA:
model.Data.Priority = types.Int64Value(int64(v.Usage))
model.Data.Selector = types.Int64Value(int64(v.Selector))
model.Data.MatchingType = types.Int64Value(int64(v.MatchingType))
model.Data.CertAssocData = types.StringValue(v.Certificate)
case *dns.SRV:
model.Data.Priority = types.Int64Value(int64(v.Priority))
model.Data.Weight = types.Int64Value(int64(v.Weight))
model.Data.Port = types.Int64Value(int64(v.Port))
model.Data.Target = types.StringValue(strings.TrimSuffix(v.Target, "."))
case *dns.SSHFP:
model.Data.FingerprintAlgorithm = types.Int64Value(int64(v.Algorithm))
model.Data.FingerprintType = types.Int64Value(int64(v.Type))
model.Data.Fingerprint = types.StringValue(v.FingerPrint)
case *dns.TLSA:
model.Data.Priority = types.Int64Value(int64(v.Usage))
model.Data.Selector = types.Int64Value(int64(v.Selector))
model.Data.MatchingType = types.Int64Value(int64(v.MatchingType))
model.Data.CertAssocData = types.StringValue(v.Certificate)
case *dns.TXT:
if len(v.Txt) > 0 {
model.Data.Value = types.StringValue(v.Txt[0])
}
default:
return fmt.Errorf("unsupported record type: %T", rr)
}
return nil
}
package domain
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/provider"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
var (
_ resource.Resource = &recordResource{}
_ resource.ResourceWithConfigure = &recordResource{}
_ resource.ResourceWithImportState = &recordResource{}
_ resource.ResourceWithModifyPlan = &recordResource{}
)
func NewRecordResource() resource.Resource {
return &recordResource{}
}
type recordResource struct {
client *apis.Client
}
type RecordModel struct {
ZoneFqdn types.String `tfsdk:"zone_fqdn"`
Id types.Int64 `tfsdk:"id"`
Type types.String `tfsdk:"type"`
Source types.String `tfsdk:"source"`
ComputedTarget types.String `tfsdk:"computed_target"`
Target types.String `tfsdk:"target"`
TTL types.Int64 `tfsdk:"ttl"`
Description types.String `tfsdk:"description"`
Data *RecordDataModel `tfsdk:"data"`
}
type RecordDataModel struct {
IP types.String `tfsdk:"ip"` // A, AAAA
Priority types.Int64 `tfsdk:"priority"` // MX, SRV
Target types.String `tfsdk:"target"` // MX, SRV, CNAME, NS, PTR
Weight types.Int64 `tfsdk:"weight"` // SRV
Port types.Int64 `tfsdk:"port"` // SRV
KeyTag types.Int64 `tfsdk:"key_tag"` // DS
Algorithm types.Int64 `tfsdk:"algorithm"` // DNSKEY, DS, SSHFP, TLSA
DigestType types.Int64 `tfsdk:"digest_type"` // DS, TLSA
Digest types.String `tfsdk:"digest"` // DS, TLSA, SSHFP
Selector types.Int64 `tfsdk:"selector"` // SMIMEA, TLSA
MatchingType types.Int64 `tfsdk:"matching_type"` // SMIMEA, TLSA
CertAssocData types.String `tfsdk:"cert_assoc_data"` // SMIMEA, TLSA
Flags types.Int64 `tfsdk:"flags"` // CAA, DNSKEY
Tag types.String `tfsdk:"tag"` // CAA
Value types.String `tfsdk:"value"` // CAA, TXT
Fingerprint types.String `tfsdk:"fingerprint"` // SSHFP
FingerprintType types.Int64 `tfsdk:"fingerprint_type"` // SSHFP
FingerprintAlgorithm types.Int64 `tfsdk:"fingerprint_algorithm"` // SSHFP
}
func (r *recordResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_record"
}
// Configure adds the provider configured client to the data source.
func (r *recordResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Record Resource Configure Type",
err.Error(),
)
return
}
r.client = client
}
func (r *recordResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = getRecordResourceSchema()
}
func (r *recordResource) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) {
if req.Plan.Raw.IsNull() {
// Handle destroy plan (optional)
return
}
var plan RecordModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
return
}
computedTarget := plan.ComputeRawTarget()
plan.ComputedTarget = types.StringValue(computedTarget)
// Set the modified plan back
resp.Diagnostics.Append(resp.Plan.Set(ctx, &plan)...)
}
func (r *recordResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
var data RecordModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
rawTarget := data.ComputeRawTarget()
record, err := r.client.Domain.CreateRecord(
data.ZoneFqdn.ValueString(),
data.Type.ValueString(),
data.Source.ValueString(),
rawTarget,
data.TTL.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when creating Record",
err.Error(),
)
return
}
data.Id = types.Int64Value(int64(record.ID))
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *recordResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
var state RecordModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
// Read API call logic
record, err := r.client.Domain.GetRecord(state.ZoneFqdn.ValueString(), state.Id.ValueInt64())
if err != nil {
resp.Diagnostics.AddError(
"Error when reading Record",
err.Error(),
)
return
}
state.Id = types.Int64Value(int64(record.ID))
state.TTL = types.Int64Value(int64(record.TTL))
state.Source = types.StringValue(record.Source)
state.Type = types.StringValue(record.Type)
// If we have neither of them, we fill them with the API
// However in this state (import), we can't know which field is planned by the user
if state.Target.IsNull() && state.Data == nil {
state.Target = types.StringValue(record.Target)
state.Data = &RecordDataModel{}
state.ParseRawTarget(record.Target)
}
// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
}
func (r *recordResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
var state RecordModel
var data RecordModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
rawTarget := data.ComputeRawTarget()
record, err := r.client.Domain.UpdateRecord(
data.ZoneFqdn.ValueString(),
state.Id.ValueInt64(),
data.Type.ValueString(),
data.Source.ValueString(),
rawTarget,
data.TTL.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when updating Record",
err.Error(),
)
return
}
data.Id = types.Int64Value(int64(record.ID))
// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *recordResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
var state RecordModel
var data RecordModel
// Read Terraform prior state data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
_, err := r.client.Domain.DeleteRecord(
data.ZoneFqdn.ValueString(),
state.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when deleting Record",
err.Error(),
)
return
}
}
func (r *recordResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, ",")
if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: zone_fqdn,id. Got: %q", req.ID),
)
return
}
var errorList error
zoneFQDN := idParts[0]
recordId, err := strconv.ParseInt(idParts[1], 10, 64)
errorList = errors.Join(errorList, err)
if errorList != nil {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: zone_fqdn,id. Got: %q", req.ID),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("zone_fqdn"), zoneFQDN)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), recordId)...)
}
package domain
import (
"terraform-provider-infomaniak/internal/apis/domain"
"github.com/hashicorp/terraform-plugin-framework-validators/objectvalidator"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
)
func getRecordResourceSchema() schema.Schema {
return schema.Schema{
Attributes: map[string]schema.Attribute{
"zone_fqdn": schema.StringAttribute{
Required: true,
MarkdownDescription: "The FQDN of the zone where the record should be put in.",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
"source": schema.StringAttribute{
Required: true,
MarkdownDescription: "The source of the Record.",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
},
"type": schema.StringAttribute{
Required: true,
MarkdownDescription: "The type of the Record.",
Validators: []validator.String{
stringvalidator.OneOf(domain.RecordTypes...),
},
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
},
"description": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The description of the Record.",
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"ttl": schema.Int64Attribute{
Optional: true,
Computed: true,
MarkdownDescription: "The TTL of the Record.",
Default: int64default.StaticInt64(3600),
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
"computed_target": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The computed target of the Record.",
},
"target": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The target of the Record.",
Validators: []validator.String{
stringvalidator.ConflictsWith(path.MatchRoot("data")),
},
},
"data": schema.SingleNestedAttribute{
Description: "Components of a DNS record.",
Optional: true,
Validators: []validator.Object{
objectvalidator.All(
objectvalidator.ConflictsWith(path.MatchRoot("target")),
),
},
Attributes: map[string]schema.Attribute{
"ip": schema.StringAttribute{
Optional: true,
MarkdownDescription: "IP for the record",
},
// For MX, SRV, TLSA, SMIMEA, SSHFP
"priority": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The priority/usage/weight of the Record (MX, SRV, TLSA, SMIMEA).",
},
// For SRV
"weight": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The weight of the Record (SRV).",
},
"port": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The port of the Record (SRV).",
},
// For CAA
"flags": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The flags of the Record (CAA).",
},
"tag": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The tag of the Record (CAA).",
},
// For DNSKEY
"algorithm": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The algorithm of the Record (DNSKEY, DS, SSHFP).",
},
// For DS
"key_tag": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The Key Tag of the Record (DS).",
},
"digest_type": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The digest type of the Record (DS).",
},
"digest": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The digest of the Record (DS).",
},
// For TLSA / SMIMEA
"selector": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The selector of the Record (TLSA, SMIMEA).",
},
"matching_type": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The matching type of the Record (TLSA, SMIMEA).",
},
"cert_assoc_data": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The certificate association data (TLSA, SMIMEA).",
},
// For SSHFP
"fingerprint_algorithm": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The algorithm of the Record (DNSKEY, DS, SSHFP).",
},
"fingerprint_type": schema.Int64Attribute{
Optional: true,
MarkdownDescription: "The fingerprint type of the Record (SSHFP).",
},
"fingerprint": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The fingerprint of the Record (SSHFP).",
},
"target": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The target of the Record (MX, CNAME, DNAME, NS, PTR, etc).",
},
// For generic text value (e.g. TXT, CAA value, etc)
"value": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The value of the Record (TXT, CAA, etc).",
},
},
},
"id": schema.Int64Attribute{
Computed: true,
MarkdownDescription: "The id of the Record.",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
},
MarkdownDescription: "The record resource allows the user to manage a record inside a zone of a domain",
}
}
package domain
import "terraform-provider-infomaniak/internal/provider/registry"
func Register() {
registry.RegisterResource(NewZoneResource)
registry.RegisterResource(NewRecordResource)
}
package domain
import (
"context"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/provider"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
var (
_ resource.Resource = &zoneResource{}
_ resource.ResourceWithConfigure = &zoneResource{}
_ resource.ResourceWithImportState = &zoneResource{}
)
func NewZoneResource() resource.Resource {
return &zoneResource{}
}
type zoneResource struct {
client *apis.Client
}
type ZoneModel struct {
Fqdn types.String `tfsdk:"fqdn"`
Id types.Int64 `tfsdk:"id"`
}
func (r *zoneResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_zone"
}
// Configure adds the provider configured client to the data source.
func (r *zoneResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Zone Resource Configure Type",
err.Error(),
)
return
}
r.client = client
}
func (r *zoneResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = getZoneResourceSchema()
}
func (r *zoneResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
var data ZoneModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
// CreateZone API call logic
zone, err := r.client.Domain.CreateZone(data.Fqdn.ValueString())
if err != nil {
resp.Diagnostics.AddError(
"Error when creating Zone",
err.Error(),
)
return
}
data.Id = types.Int64Value(int64(zone.ID))
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *zoneResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
var state ZoneModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
// Read API call logic
zone, err := r.client.Domain.GetZone(state.Fqdn.ValueString())
if err != nil {
resp.Diagnostics.AddError(
"Error when reading Zone",
err.Error(),
)
return
}
state.Id = types.Int64Value(int64(zone.ID))
state.Fqdn = types.StringValue(zone.FQDN)
// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
}
func (r *zoneResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
var state ZoneModel
var data ZoneModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *zoneResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
var data ZoneModel
// Read Terraform prior state data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
// DeleteZone API call logic
_, err := r.client.Domain.DeleteZone(data.Fqdn.ValueString())
if err != nil {
resp.Diagnostics.AddError(
"Error when deleting Zone",
err.Error(),
)
return
}
}
func (r *zoneResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("fqdn"), req.ID)...)
}
package domain
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
)
func getZoneResourceSchema() schema.Schema {
return schema.Schema{
Attributes: map[string]schema.Attribute{
"fqdn": schema.StringAttribute{
Required: true,
MarkdownDescription: "The fqdn of the zone",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
"id": schema.Int64Attribute{
Computed: true,
MarkdownDescription: "The unique identifier for the zone. Mandatory for acceptance testing.",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
},
MarkdownDescription: "The Zone resource allows the user to manage a zone for a domain project",
}
}
package kaas
import (
"context"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/provider"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &kaasDataSource{}
_ datasource.DataSourceWithConfigure = &kaasDataSource{}
)
type kaasDataSource struct {
client *apis.Client
}
// NewKaasDataSource is a helper function to simplify the provider implementation.
func NewKaasDataSource() datasource.DataSource {
return &kaasDataSource{}
}
// Configure adds the provider configured client to the data source.
func (d *kaasDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
err.Error(),
)
return
}
d.client = client
}
// Schema defines the schema for the data source.
func (d *kaasDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = getKaasDataSourceSchema()
}
// Read refreshes the Terraform state with the latest data.
func (d *kaasDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
var data KaasModel
resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
obj, err := d.client.Kaas.GetKaas(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Unable to find KaaS",
err.Error(),
)
return
}
kubeconfig, err := d.client.Kaas.GetKubeconfig(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Unable to get kubeconfig from KaaS",
err.Error(),
)
return
}
data.Kubeconfig = types.StringValue(kubeconfig)
data.Region = types.StringValue(obj.Region)
data.KubernetesVersion = types.StringValue(obj.KubernetesVersion)
apiserverParams, err := d.client.Kaas.GetApiserverParams(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Unable to get Oidc from KaaS",
err.Error(),
)
return
}
if apiserverParams != nil {
data.fillApiserverState(ctx, apiserverParams)
}
// Set state
diags := resp.State.Set(ctx, &data)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
// Metadata returns the data source type name.
func (d *kaasDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kaas"
}
package kaas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func getKaasDataSourceSchema() schema.Schema {
return schema.Schema{
Attributes: map[string]schema.Attribute{
"public_cloud_id": schema.Int64Attribute{
Required: true,
Description: "The id of the public cloud where KaaS is installed",
MarkdownDescription: "The id of the public cloud where KaaS is installed",
},
"public_cloud_project_id": schema.Int64Attribute{
Required: true,
Description: "The id of the public cloud project where KaaS is installed",
MarkdownDescription: "The id of the public cloud project where KaaS is installed",
},
"id": schema.Int64Attribute{
Required: true,
Description: "The id of this KaaS",
MarkdownDescription: "The id of this KaaS",
},
"name": schema.StringAttribute{
Computed: true,
Description: "The name of the KaaS project",
MarkdownDescription: "The name of the KaaS project",
},
"pack_name": schema.StringAttribute{
Computed: true,
Description: "The name of the pack associated to the KaaS project",
MarkdownDescription: "The name of the pack associated to the KaaS project",
},
"region": schema.StringAttribute{
Computed: true,
Description: "The region where the KaaS project resides in.",
MarkdownDescription: "The region where the KaaS project resides in.",
},
"kubeconfig": schema.StringAttribute{
Computed: true,
Sensitive: true,
Description: "The kubeconfig generated to access to KaaS project",
MarkdownDescription: "The kubeconfig generated to access to KaaS project",
},
"kubernetes_version": schema.StringAttribute{
Computed: true,
Description: "The version of Kubernetes associated with the KaaS project",
MarkdownDescription: "The version of Kubernetes associated with the KaaS project",
},
"apiserver": schema.SingleNestedAttribute{
Description: "Kubernetes Apiserver editable params",
MarkdownDescription: "Kubernetes Apiserver editable params",
Attributes: map[string]schema.Attribute{
"params": schema.MapAttribute{
ElementType: types.StringType,
Optional: true,
Description: "Map of Kubernetes Apiserver params in case the terraform provider does not already abstracts them",
MarkdownDescription: "Map of Kubernetes Apiserver params in case the terraform provider does not already abstracts them",
},
"audit": schema.SingleNestedAttribute{
MarkdownDescription: "Kubernetes audit logs specification files",
Computed: true,
Attributes: map[string]schema.Attribute{
"webhook_config": schema.StringAttribute{
MarkdownDescription: "YAML manifest for audit webhook config",
Computed: true,
},
"policy": schema.StringAttribute{
MarkdownDescription: "YAML manifest for audit policy",
Computed: true,
},
},
},
"oidc": schema.SingleNestedAttribute{
Description: "OIDC specific Apiserver params",
MarkdownDescription: "OIDC specific Apiserver params",
Computed: true,
Attributes: map[string]schema.Attribute{
"ca": schema.StringAttribute{
Computed: true,
Description: "OIDC Ca Certificate",
MarkdownDescription: "OIDC Ca Certificate",
},
"groups_claim": schema.StringAttribute{
Computed: true,
MarkdownDescription: "OIDC groups claim",
},
"groups_prefix": schema.StringAttribute{
Computed: true,
MarkdownDescription: "OIDC groups prefix",
},
"issuer_url": schema.StringAttribute{
Computed: true,
Description: "OIDC issuer URL",
MarkdownDescription: "OIDC issuer URL",
},
"client_id": schema.StringAttribute{
Computed: true,
Description: "OIDC client identifier",
MarkdownDescription: "OIDC client identifier",
},
"username_claim": schema.StringAttribute{
Optional: true,
Description: "OIDC username claim",
MarkdownDescription: "OIDC username claim",
},
"username_prefix": schema.StringAttribute{
Computed: true,
Description: "OIDC username prefix",
MarkdownDescription: "OIDC username prefix",
},
"required_claim": schema.StringAttribute{
Computed: true,
MarkdownDescription: "A key=value pair that describes a required claim in the ID Token.",
},
"signing_algs": schema.StringAttribute{
Computed: true,
Description: "OIDC signing algorithm. Kubernetes will default it to RS256",
MarkdownDescription: "OIDC signing algorithm. Kubernetes will default it to RS256",
},
},
},
},
Optional: true,
},
},
MarkdownDescription: "The kaas data source allows the user to manage a kaas project",
}
}
package kaas
import (
"context"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/provider"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &kaasInstancePoolDataSource{}
_ datasource.DataSourceWithConfigure = &kaasInstancePoolDataSource{}
)
type kaasInstancePoolDataSource struct {
client *apis.Client
}
// NewKaasInstancePoolDataSource is a helper function to simplify the provider implementation.
func NewKaasInstancePoolDataSource() datasource.DataSource {
return &kaasInstancePoolDataSource{}
}
// Configure adds the provider configured client to the data source.
func (d *kaasInstancePoolDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
err.Error(),
)
return
}
d.client = client
}
// Schema defines the schema for the data source.
func (d *kaasInstancePoolDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = getKaasInstancePoolDataSourceSchema()
}
// Read refreshes the Terraform state with the latest data.
func (d *kaasInstancePoolDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
var data KaasInstancePoolModel
resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
obj, err := d.client.Kaas.GetInstancePool(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.KaasId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Unable to find KaaS instance pool",
err.Error(),
)
return
}
data.Id = types.Int64Value(obj.Id)
data.Name = types.StringValue(obj.Name)
data.FlavorName = types.StringValue(obj.FlavorName)
data.MinInstances = types.Int64Value(obj.MinInstances)
data.MaxInstances = types.Int64Value(obj.MaxInstances)
labels, diags := types.MapValueFrom(ctx, types.StringType, obj.Labels)
resp.Diagnostics.Append(diags...)
data.Labels = labels
// Set state
diags = resp.State.Set(ctx, &data)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
// Metadata returns the data source type name.
func (d *kaasInstancePoolDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kaas_instance_pool"
}
package kaas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func getKaasInstancePoolDataSourceSchema() schema.Schema {
return schema.Schema{
Attributes: map[string]schema.Attribute{
"public_cloud_id": schema.Int64Attribute{
Required: true,
Description: "The id of the public cloud where KaaS is installed",
},
"public_cloud_project_id": schema.Int64Attribute{
Required: true,
Description: "The id of the public cloud project where KaaS is installed",
},
"kaas_id": schema.Int64Attribute{
Required: true,
Description: "The id of the kaas project.",
},
"id": schema.Int64Attribute{
Required: true,
Description: "The unique identifier for the instance pool.",
},
"name": schema.StringAttribute{
Computed: true,
Description: "The name of this instance pool",
},
"availability_zone": schema.StringAttribute{
Computed: true,
Description: "The availability zone for the instances in the pool",
MarkdownDescription: "The availability zone for the instances in the pool",
},
"flavor_name": schema.StringAttribute{
Computed: true,
Description: "The flavor name of the instance in this instance pool",
},
"min_instances": schema.Int64Attribute{
Computed: true,
Description: "The minimum amount of instances in the instance pool",
},
"max_instances": schema.Int64Attribute{
Computed: true,
Description: "The maximum amount of instances in the instance pool",
},
"labels": schema.MapAttribute{
ElementType: types.StringType,
Computed: true,
Description: "Kubernetes node labels",
},
},
MarkdownDescription: "The KaaS Instance Pool data source retrieves information about a KaaS instance pool.",
}
}
package kaas
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/apis/kaas"
"terraform-provider-infomaniak/internal/provider"
"time"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types"
)
var (
_ resource.Resource = &kaasInstancePoolResource{}
_ resource.ResourceWithConfigure = &kaasInstancePoolResource{}
)
func NewKaasInstancePoolResource() resource.Resource {
return &kaasInstancePoolResource{}
}
type kaasInstancePoolResource struct {
client *apis.Client
}
type KaasInstancePoolModel struct {
PublicCloudId types.Int64 `tfsdk:"public_cloud_id"`
PublicCloudProjectId types.Int64 `tfsdk:"public_cloud_project_id"`
KaasId types.Int64 `tfsdk:"kaas_id"`
Id types.Int64 `tfsdk:"id"`
Name types.String `tfsdk:"name"`
AvailabilityZone types.String `tfsdk:"availability_zone"`
FlavorName types.String `tfsdk:"flavor_name"`
MinInstances types.Int64 `tfsdk:"min_instances"`
MaxInstances types.Int64 `tfsdk:"max_instances"`
Labels types.Map `tfsdk:"labels"`
}
func (r *kaasInstancePoolResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kaas_instance_pool"
}
// Configure adds the provider configured client to the data source.
func (r *kaasInstancePoolResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
err.Error(),
)
return
}
r.client = client
}
func (r *kaasInstancePoolResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = getKaasInstancePoolResourceSchema()
}
func (r *kaasInstancePoolResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
var data KaasInstancePoolModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
input := &kaas.InstancePool{
KaasId: data.KaasId.ValueInt64(),
Name: data.Name.ValueString(),
AvailabilityZone: data.AvailabilityZone.ValueString(),
FlavorName: data.FlavorName.ValueString(),
MinInstances: data.MinInstances.ValueInt64(),
MaxInstances: data.MaxInstances.ValueInt64(),
Labels: r.getLabelsValues(data),
}
// CreateKaas API call logic
instancePoolId, err := r.client.Kaas.CreateInstancePool(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
input,
)
if err != nil {
resp.Diagnostics.AddError(
"Error when creating KaaS instance pool",
err.Error(),
)
return
}
data.Id = types.Int64Value(instancePoolId)
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
isScalingDown := false
instancePoolObject, err := r.waitUntilActive(ctx, data, instancePoolId, isScalingDown)
if err != nil {
resp.Diagnostics.AddError(
"Error when waiting for KaaS Instance Pool to be Active",
err.Error(),
)
return
}
if instancePoolObject == nil {
return
}
data.fill(instancePoolObject)
// Save data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *kaasInstancePoolResource) getLabelsValues(data KaasInstancePoolModel) map[string]string {
labels := make(map[string]string)
if !data.Labels.IsNull() && !data.Labels.IsUnknown() {
for key, val := range data.Labels.Elements() {
if strVal, ok := val.(types.String); ok && !strVal.IsNull() && !strVal.IsUnknown() {
labels[key] = strVal.ValueString()
}
}
}
return labels
}
func (r *kaasInstancePoolResource) waitUntilActive(ctx context.Context, data KaasInstancePoolModel, id int64, scalingDown bool) (*kaas.InstancePool, error) {
scaleDownFailedQuotaCount := 0
scaleDownFailedQuotaAllowedRetrys := 5
ticker := time.NewTicker(5 * time.Second)
for {
select {
case <-ctx.Done():
return nil, nil
case <-ticker.C:
found, err := r.client.Kaas.GetInstancePool(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.KaasId.ValueInt64(),
id,
)
if err != nil {
return nil, err
}
if len(found.ErrorMessages) > 0 {
// Special case when we hit quota failure but we are scaling down. OpenStack can take some time to update so we let him do his work
if (found.Status == "ScalingDown" || scalingDown) && scaleDownFailedQuotaCount <= scaleDownFailedQuotaAllowedRetrys {
scaleDownFailedQuotaCount++
continue
}
return nil, errors.New(strings.Join(found.ErrorMessages, ","))
}
// We need the instance pool to be active, have the same state as us, be scaled properly and be in bound of the autoscaling
isActive := found.Status == "Active"
isEquivalent := found.MinInstances == data.MinInstances.ValueInt64()
isScaledProperly := found.AvailableInstances == found.TargetInstances
isInBound := found.MinInstances <= found.TargetInstances && found.TargetInstances <= found.MaxInstances
if isActive && isEquivalent && isScaledProperly && isInBound {
return found, nil
}
}
}
}
func (r *kaasInstancePoolResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
var data KaasInstancePoolModel
// Read Terraform prior state data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
// Read API call logic
obj, err := r.client.Kaas.GetInstancePool(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.KaasId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when reading KaaS Instance Pool",
err.Error(),
)
return
}
if len(obj.ErrorMessages) > 0 {
resp.Diagnostics.AddWarning(
"KaaS was in error state:",
strings.Join(obj.ErrorMessages, ","),
)
}
data.fill(obj)
// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *kaasInstancePoolResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
var state KaasInstancePoolModel
var data KaasInstancePoolModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
// Update API call logic
input := &kaas.InstancePool{
KaasId: data.KaasId.ValueInt64(),
Id: state.Id.ValueInt64(),
Name: data.Name.ValueString(),
FlavorName: data.FlavorName.ValueString(),
MinInstances: data.MinInstances.ValueInt64(),
MaxInstances: data.MaxInstances.ValueInt64(),
Labels: r.getLabelsValues(data),
}
_, err := r.client.Kaas.UpdateInstancePool(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
input,
)
if err != nil {
resp.Diagnostics.AddError(
"Error when updating KaaS Instance Pool",
err.Error(),
)
return
}
scalingDown := data.MaxInstances.ValueInt64() < state.MaxInstances.ValueInt64()
instancePoolObject, err := r.waitUntilActive(ctx, data, state.Id.ValueInt64(), scalingDown)
if err != nil {
resp.Diagnostics.AddError(
"Error when waiting for KaaS Instance Pool to be Active",
err.Error(),
)
return
}
if instancePoolObject == nil {
return
}
data.fill(instancePoolObject)
// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *kaasInstancePoolResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
var data KaasInstancePoolModel
// Read Terraform prior state data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
// DeleteKaas API call logic
_, err := r.client.Kaas.DeleteInstancePool(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.KaasId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when deleting KaaS",
err.Error(),
)
return
}
}
func (r *kaasInstancePoolResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, ",")
if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: public_cloud_id,public_cloud_project_id,kaas_id,id. Got: %q", req.ID),
)
return
}
var errorList error
publicCloudId, err := strconv.ParseInt(idParts[0], 10, 64)
errorList = errors.Join(errorList, err)
publicCloudProjectId, err := strconv.ParseInt(idParts[1], 10, 64)
errorList = errors.Join(errorList, err)
kaasId, err := strconv.ParseInt(idParts[2], 10, 64)
errorList = errors.Join(errorList, err)
instancePoolId, err := strconv.ParseInt(idParts[3], 10, 64)
errorList = errors.Join(errorList, err)
if errorList != nil {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: public_cloud_id,public_cloud_project_id,kaas_id,id. Got: %q", req.ID),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("public_cloud_id"), publicCloudId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("public_cloud_project_id"), publicCloudProjectId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("kaas_id"), kaasId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), instancePoolId)...)
}
func (model *KaasInstancePoolModel) fill(instancePool *kaas.InstancePool) {
model.Id = types.Int64Value(instancePool.Id)
model.Name = types.StringValue(instancePool.Name)
model.FlavorName = types.StringValue(instancePool.FlavorName)
model.MinInstances = types.Int64Value(instancePool.MinInstances)
model.MaxInstances = types.Int64Value(instancePool.MaxInstances)
model.AvailabilityZone = types.StringValue(instancePool.AvailabilityZone)
}
package kaas
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func getKaasInstancePoolResourceSchema() schema.Schema {
return schema.Schema{
Attributes: map[string]schema.Attribute{
"public_cloud_id": schema.Int64Attribute{
Required: true,
Description: "The id of the public cloud where KaaS is installed",
MarkdownDescription: "The id of the public cloud where KaaS is installed",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"public_cloud_project_id": schema.Int64Attribute{
Required: true,
Description: "The id of the public cloud project where KaaS is installed",
MarkdownDescription: "The id of the public cloud project where KaaS is installed",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"kaas_id": schema.Int64Attribute{
Required: true,
Description: "The id of the kaas project.",
MarkdownDescription: "The id of the kaas project.",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"id": schema.Int64Attribute{
Computed: true,
Description: "A computed value representing the unique identifier for the instance pool. Mandatory for acceptance testing.",
MarkdownDescription: "A computed value representing the unique identifier for the instance pool. Mandatory for acceptance testing.",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
"name": schema.StringAttribute{
Required: true,
Description: "The name of the instance pool",
MarkdownDescription: "The name of the instance pool",
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"availability_zone": schema.StringAttribute{
Required: true,
Description: "The availability zone for the instances in the pool",
MarkdownDescription: "The availability zone for the instances in the pool",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
"flavor_name": schema.StringAttribute{
Required: true,
Description: "The flavor name for the instances in the pool",
MarkdownDescription: "The flavor name for the instances in the pool",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
"min_instances": schema.Int64Attribute{
Required: true,
Description: "The minimum amount of instances in this instance pool",
MarkdownDescription: "The minimum amount of instances in this instance pool",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
"max_instances": schema.Int64Attribute{
Required: true,
Description: "The maximum amount of instances in this instance pool",
MarkdownDescription: "The maximum amount of instances in this instance pool",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
"labels": schema.MapAttribute{
ElementType: types.StringType,
Optional: true,
PlanModifiers: []planmodifier.Map{
mapplanmodifier.UseStateForUnknown(),
mapplanmodifier.RequiresReplace(),
},
Description: "Kubernetes labels to apply to the instances. The label must have a prefix of node-role.kubernetes.io or belong to the domains node-restriction.kubernetes.io or custom.kaas.infomaniak.cloud.",
MarkdownDescription: "Kubernetes labels to apply to the instances. The label must have a prefix of node-role.kubernetes.io or belong to the domains node-restriction.kubernetes.io or custom.kaas.infomaniak.cloud.",
},
},
MarkdownDescription: "The kaas instance pool resource is used to manage instance pools inside a kaas project",
}
}
package kaas
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"terraform-provider-infomaniak/internal/apis"
"terraform-provider-infomaniak/internal/apis/kaas"
"terraform-provider-infomaniak/internal/provider"
"time"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
)
var (
_ resource.Resource = &kaasResource{}
_ resource.ResourceWithConfigure = &kaasResource{}
_ resource.ResourceWithImportState = &kaasResource{}
)
func NewKaasResource() resource.Resource {
return &kaasResource{}
}
type kaasResource struct {
client *apis.Client
}
type KaasModel struct {
PublicCloudId types.Int64 `tfsdk:"public_cloud_id"`
PublicCloudProjectId types.Int64 `tfsdk:"public_cloud_project_id"`
Id types.Int64 `tfsdk:"id"`
Name types.String `tfsdk:"name"`
PackName types.String `tfsdk:"pack_name"`
Region types.String `tfsdk:"region"`
Kubeconfig types.String `tfsdk:"kubeconfig"`
KubernetesVersion types.String `tfsdk:"kubernetes_version"`
Apiserver *ApiserverModel `tfsdk:"apiserver"`
}
func (m *KaasModel) SetDefaultValues(ctx context.Context) {
if m.Apiserver == nil {
defaultParams, _ := types.MapValueFrom(ctx, types.StringType, map[string]string{})
m.Apiserver = &ApiserverModel{
Params: defaultParams,
}
}
if m.Apiserver.Audit == nil {
m.Apiserver.Audit = &Audit{}
}
if m.Apiserver.Oidc == nil {
m.Apiserver.Oidc = &OidcModel{}
}
}
type ApiserverModel struct {
Params types.Map `tfsdk:"params"`
Oidc *OidcModel `tfsdk:"oidc"`
Audit *Audit `tfsdk:"audit"`
}
type OidcModel struct {
IssuerUrl types.String `tfsdk:"issuer_url"`
ClientId types.String `tfsdk:"client_id"`
UsernameClaim types.String `tfsdk:"username_claim"`
UsernamePrefix types.String `tfsdk:"username_prefix"`
SigningAlgs types.String `tfsdk:"signing_algs"`
GroupsClaim types.String `tfsdk:"groups_claim"`
GroupsPrefix types.String `tfsdk:"groups_prefix"`
RequiredClaim types.String `tfsdk:"required_claim"`
Ca types.String `tfsdk:"ca"`
}
type Audit struct {
WebhookConfig types.String `tfsdk:"webhook_config"`
Policy types.String `tfsdk:"policy"`
}
func (r *kaasResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kaas"
}
// Configure adds the provider configured client to the data source.
func (r *kaasResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
// Add a nil check when handling ProviderData because Terraform
// sets that data after it calls the ConfigureProvider RPC.
if req.ProviderData == nil {
return
}
client, err := provider.GetApiClient(req.ProviderData)
if err != nil {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
err.Error(),
)
return
}
r.client = client
}
func (r *kaasResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: map[string]schema.Attribute{
"public_cloud_id": schema.Int64Attribute{
Required: true,
Description: "The id of the public cloud where KaaS is installed",
MarkdownDescription: "The id of the public cloud where KaaS is installed",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"public_cloud_project_id": schema.Int64Attribute{
Required: true,
Description: "The id of the public cloud project where KaaS is installed",
MarkdownDescription: "The id of the public cloud project where KaaS is installed",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
},
},
"pack_name": schema.StringAttribute{
Required: true,
Description: "The name of the pack associated to the KaaS project",
MarkdownDescription: "The name of the pack associated to the KaaS project",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
"kubernetes_version": schema.StringAttribute{
Required: true,
Description: "The version of Kubernetes associated with the KaaS being installed",
MarkdownDescription: "The version of Kubernetes associated with the KaaS being installed",
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"name": schema.StringAttribute{
Required: true,
Description: "The name of the KaaS project",
MarkdownDescription: "The name of the KaaS project",
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"id": schema.Int64Attribute{
Computed: true,
Description: "A computed value representing the unique identifier for the architecture. Mandatory for acceptance testing.",
MarkdownDescription: "A computed value representing the unique identifier for the architecture. Mandatory for acceptance testing.",
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
},
"region": schema.StringAttribute{
Required: true,
Description: "The region where the KaaS will reside.",
MarkdownDescription: "The region where the KaaS will reside.",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
},
"kubeconfig": schema.StringAttribute{
Computed: true,
Sensitive: true,
Description: "The kubeconfig generated to access to KaaS project",
MarkdownDescription: "The kubeconfig generated to access to KaaS project",
},
"apiserver": schema.SingleNestedAttribute{
MarkdownDescription: "Kubernetes Apiserver editable params",
Attributes: map[string]schema.Attribute{
"params": schema.MapAttribute{
Optional: true,
ElementType: types.StringType,
MarkdownDescription: "Map of Kubernetes Apiserver params in case the terraform provider does not already abstracts them",
PlanModifiers: []planmodifier.Map{
mapplanmodifier.UseStateForUnknown(),
},
},
"audit": schema.SingleNestedAttribute{
MarkdownDescription: "Kubernetes audit logs specification files",
Optional: true,
PlanModifiers: []planmodifier.Object{
objectplanmodifier.UseStateForUnknown(),
},
Attributes: map[string]schema.Attribute{
"webhook_config": schema.StringAttribute{
MarkdownDescription: "YAML manifest for audit webhook config",
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"policy": schema.StringAttribute{
MarkdownDescription: "YAML manifest for audit policy",
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
},
},
"oidc": schema.SingleNestedAttribute{
MarkdownDescription: "OIDC specific Apiserver params",
Optional: true,
Attributes: map[string]schema.Attribute{
"ca": schema.StringAttribute{
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
MarkdownDescription: "OIDC Ca Certificate",
},
"groups_claim": schema.StringAttribute{
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
MarkdownDescription: "OIDC groups claim",
},
"groups_prefix": schema.StringAttribute{
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
MarkdownDescription: "OIDC groups prefix",
},
"issuer_url": schema.StringAttribute{
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
MarkdownDescription: "OIDC issuer URL",
},
"client_id": schema.StringAttribute{
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
MarkdownDescription: "OIDC client identifier",
},
"username_claim": schema.StringAttribute{
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
MarkdownDescription: "OIDC username claim",
},
"username_prefix": schema.StringAttribute{
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
MarkdownDescription: "OIDC username prefix",
},
"required_claim": schema.StringAttribute{
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
MarkdownDescription: "A key=value pair that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. Repeat this flag to specify multiple claims.",
},
"signing_algs": schema.StringAttribute{
Optional: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
MarkdownDescription: "OIDC signing algorithm. Kubernetes will default it to RS256",
},
},
},
},
Optional: true,
},
},
MarkdownDescription: "The kaas resource allows the user to manage a kaas project",
}
}
func (r *kaasResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
var data KaasModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
chosenPack, err := r.getPackId(data, &resp.Diagnostics)
if err != nil {
return
}
input := &kaas.Kaas{
Project: kaas.KaasProject{
PublicCloudId: data.PublicCloudId.ValueInt64(),
ProjectId: data.PublicCloudProjectId.ValueInt64(),
},
Region: data.Region.ValueString(),
KubernetesVersion: data.KubernetesVersion.ValueString(),
Name: data.Name.ValueString(),
PackId: chosenPack.Id,
}
// CreateKaas API call logic
kaasId, err := r.client.Kaas.CreateKaas(input)
if err != nil {
resp.Diagnostics.AddError(
"Error when creating KaaS",
err.Error(),
)
return
}
data.Id = types.Int64Value(kaasId)
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
kaasObject, err := r.waitUntilActive(ctx, input, kaasId)
if err != nil {
resp.Diagnostics.AddError(
"Error when waiting for KaaS to be Active",
err.Error(),
)
return
}
if kaasObject == nil {
return
}
err = r.fetchAndSetKubeconfig(&data, kaasObject)
if err != nil {
resp.Diagnostics.AddWarning("could not fetch and set kubeconfig", err.Error())
}
data.fill(kaasObject)
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
if data.Apiserver != nil {
apiserverParamsInput := r.buildApiserverParamsInput(data)
created, err := r.client.Kaas.PatchApiserverParams(apiserverParamsInput, input.Project.PublicCloudId, input.Project.ProjectId, kaasId)
if !created || err != nil {
resp.Diagnostics.AddError(
"Error when creating Oidc",
err.Error(),
)
return
}
data.fillApiserverState(ctx, apiserverParamsInput)
}
// Save data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (state *KaasModel) fillApiserverState(ctx context.Context, apiserverParams *kaas.Apiserver) {
if state.shouldUpdateApiserver() {
state.SetDefaultValues(ctx)
state.updateAuditConfig(apiserverParams)
state.updateOIDCConfig(apiserverParams)
if state.canSetApiserverToNil() {
state.Apiserver = nil
}
}
}
func (state *KaasModel) shouldUpdateApiserver() bool {
apiserver := state.Apiserver
return apiserver != nil && (apiserver.Audit != nil || apiserver.Oidc != nil || !apiserver.Params.IsNull())
}
func (state *KaasModel) updateAuditConfig(apiserverParams *kaas.Apiserver) {
if apiserverParams.AuditLogPolicy == nil && apiserverParams.AuditLogWebhook == nil {
state.Apiserver.Audit = nil
} else {
state.Apiserver.Audit.Policy = types.StringPointerValue(apiserverParams.AuditLogPolicy)
state.Apiserver.Audit.WebhookConfig = types.StringPointerValue(apiserverParams.AuditLogWebhook)
}
}
func (state *KaasModel) updateOIDCConfig(apiserverParams *kaas.Apiserver) {
if apiserverParams.Params != nil {
params := apiserverParams.Params
state.Apiserver.Oidc = &OidcModel{
ClientId: types.StringPointerValue(params.ClientId),
IssuerUrl: types.StringPointerValue(params.IssuerUrl),
UsernameClaim: types.StringPointerValue(params.UsernameClaim),
UsernamePrefix: types.StringPointerValue(params.UsernamePrefix),
SigningAlgs: types.StringPointerValue(params.SigningAlgs),
GroupsClaim: types.StringPointerValue(params.GroupsClaim),
GroupsPrefix: types.StringPointerValue(params.GroupsPrefix),
RequiredClaim: types.StringPointerValue(params.RequiredClaim),
Ca: types.StringPointerValue(apiserverParams.OidcCa),
}
} else {
state.Apiserver.Oidc = nil
state.Apiserver.Params = types.MapNull(types.StringType)
}
}
func (state *KaasModel) canSetApiserverToNil() bool {
apiserver := state.Apiserver
return apiserver.Audit == nil && apiserver.Oidc == nil && apiserver.Params.IsNull()
}
func (r *kaasResource) waitUntilActive(ctx context.Context, kaas *kaas.Kaas, id int64) (*kaas.Kaas, error) {
for {
found, err := r.client.Kaas.GetKaas(kaas.Project.PublicCloudId, kaas.Project.ProjectId, id)
if err != nil {
return nil, err
}
if ctx.Err() != nil {
return nil, nil
}
if found.Status == "Active" {
return found, nil
}
time.Sleep(5 * time.Second)
}
}
func (r *kaasResource) getApiserverParamsValues(data KaasModel) map[string]string {
params := make(map[string]string)
if !data.Apiserver.Params.IsNull() && !data.Apiserver.Params.IsUnknown() {
for key, val := range data.Apiserver.Params.Elements() {
if strVal, ok := val.(types.String); ok && !strVal.IsNull() && !strVal.IsUnknown() {
params[key] = strVal.ValueString()
}
}
}
return params
}
func (r *kaasResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
var state KaasModel
// Read Terraform plan data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
// Read API call logic
kaasObject, err := r.client.Kaas.GetKaas(
state.PublicCloudId.ValueInt64(),
state.PublicCloudProjectId.ValueInt64(),
state.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when reading KaaS",
err.Error(),
)
return
}
state.fill(kaasObject)
err = r.fetchAndSetKubeconfig(&state, kaasObject)
if err != nil {
resp.Diagnostics.AddWarning("could not fetch and set kubeconfig", err.Error())
}
apiserverParams, err := r.client.Kaas.GetApiserverParams(state.PublicCloudId.ValueInt64(), state.PublicCloudProjectId.ValueInt64(), kaasObject.Id)
if err != nil {
resp.Diagnostics.AddWarning(
"Could not get Oidc",
err.Error(),
)
}
if apiserverParams != nil {
state.fillApiserverState(ctx, apiserverParams)
}
// Save updated data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
}
func (r *kaasResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
var state KaasModel
var data KaasModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
chosenPackState, err := r.getPackId(state, &resp.Diagnostics)
if err != nil {
return
}
input := r.prepareUpdateInput(state, data, chosenPackState.Id)
if _, err := r.client.Kaas.UpdateKaas(input); err != nil {
resp.Diagnostics.AddError("Error when updating KaaS", err.Error())
return
}
kaasObject, err := r.waitUntilActive(ctx, input, input.Id)
if err != nil || kaasObject == nil {
resp.Diagnostics.AddError("Error waiting for KaaS activation", err.Error())
return
}
err = r.fetchAndSetKubeconfig(&data, kaasObject)
if err != nil {
resp.Diagnostics.AddWarning("could not fetch and set kubeconfig", err.Error())
}
data.fill(kaasObject)
if data.Apiserver != nil {
r.handleApiserverConfig(ctx, &data, input, resp)
}
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
func (r *kaasResource) prepareUpdateInput(state, data KaasModel, packID int64) *kaas.Kaas {
input := &kaas.Kaas{
Project: kaas.KaasProject{
PublicCloudId: data.PublicCloudId.ValueInt64(),
ProjectId: data.PublicCloudProjectId.ValueInt64(),
},
Id: state.Id.ValueInt64(),
Name: data.Name.ValueString(),
PackId: packID,
Region: state.Region.ValueString(),
KubernetesVersion: data.KubernetesVersion.ValueString(),
}
if state.KubernetesVersion.ValueString() == data.KubernetesVersion.ValueString() {
input.KubernetesVersion = ""
}
return input
}
func (r *kaasResource) fetchAndSetKubeconfig(data *KaasModel, input *kaas.Kaas) error {
kubeconfig, err := r.client.Kaas.GetKubeconfig(
input.Project.PublicCloudId,
input.Project.ProjectId,
input.Id,
)
if err != nil {
return fmt.Errorf("could not get kubeconfig: %w", err)
}
data.Kubeconfig = types.StringValue(kubeconfig)
return nil
}
func (r *kaasResource) handleApiserverConfig(ctx context.Context, data *KaasModel, input *kaas.Kaas, resp *resource.UpdateResponse) {
apiserverParamsInput := r.buildApiserverParamsInput(*data)
patched, err := r.client.Kaas.PatchApiserverParams(apiserverParamsInput, input.Project.PublicCloudId, input.Project.ProjectId, input.Id)
if !patched || err != nil {
resp.Diagnostics.AddError("Error when patching Apiserver params", err.Error())
return
}
data.fillApiserverState(ctx, apiserverParamsInput)
}
func (r *kaasResource) buildApiserverParamsInput(data KaasModel) *kaas.Apiserver {
apiserverParamsInput := &kaas.Apiserver{
NonSpecificApiServerParams: r.getApiserverParamsValues(data),
}
if data.Apiserver.Audit != nil {
apiserverParamsInput.AuditLogPolicy = data.Apiserver.Audit.Policy.ValueStringPointer()
apiserverParamsInput.AuditLogWebhook = data.Apiserver.Audit.WebhookConfig.ValueStringPointer()
}
if data.Apiserver.Oidc != nil {
apiserverParamsInput.OidcCa = data.Apiserver.Oidc.Ca.ValueStringPointer()
apiserverParamsInput.Params = &kaas.ApiServerParams{
IssuerUrl: data.Apiserver.Oidc.IssuerUrl.ValueStringPointer(),
ClientId: data.Apiserver.Oidc.ClientId.ValueStringPointer(),
UsernameClaim: data.Apiserver.Oidc.UsernameClaim.ValueStringPointer(),
UsernamePrefix: data.Apiserver.Oidc.UsernamePrefix.ValueStringPointer(),
SigningAlgs: data.Apiserver.Oidc.SigningAlgs.ValueStringPointer(),
GroupsClaim: data.Apiserver.Oidc.GroupsClaim.ValueStringPointer(),
GroupsPrefix: data.Apiserver.Oidc.GroupsPrefix.ValueStringPointer(),
RequiredClaim: data.Apiserver.Oidc.RequiredClaim.ValueStringPointer(),
}
}
return apiserverParamsInput
}
func (r *kaasResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
var data KaasModel
// Read Terraform prior state data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
if resp.Diagnostics.HasError() {
return
}
// DeleteKaas API call logic
_, err := r.client.Kaas.DeleteKaas(
data.PublicCloudId.ValueInt64(),
data.PublicCloudProjectId.ValueInt64(),
data.Id.ValueInt64(),
)
if err != nil {
resp.Diagnostics.AddError(
"Error when deleting KaaS",
err.Error(),
)
return
}
}
func (r *kaasResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, ",")
if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: public_cloud_id,public_cloud_project_id,id. Got: %q", req.ID),
)
return
}
var errorList error
publicCloudId, err := strconv.ParseInt(idParts[0], 10, 64)
errorList = errors.Join(errorList, err)
publicCloudProjectId, err := strconv.ParseInt(idParts[1], 10, 64)
errorList = errors.Join(errorList, err)
kaasId, err := strconv.ParseInt(idParts[2], 10, 64)
errorList = errors.Join(errorList, err)
if errorList != nil {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
fmt.Sprintf("Expected import identifier with format: public_cloud_id,public_cloud_project_id,id. Got: %q", req.ID),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("public_cloud_id"), publicCloudId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("public_cloud_project_id"), publicCloudProjectId)...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), kaasId)...)
}
func (r *kaasResource) getPackId(data KaasModel, diagnostic *diag.Diagnostics) (*kaas.KaasPack, error) {
packs, err := r.client.Kaas.GetPacks()
if err != nil {
diagnostic.AddError(
"Could not get KaaS Packs",
err.Error(),
)
return nil, err
}
var chosenPack *kaas.KaasPack
for _, pack := range packs {
if pack.Name == data.PackName.ValueString() {
chosenPack = pack
break
}
}
if chosenPack == nil {
var packNames []string
for _, pack := range packs {
packNames = append(packNames, pack.Name)
}
diagnostic.AddError(
"Unknown KaaS Pack",
fmt.Sprintf("pack_name must be one of : %v", packNames),
)
return nil, fmt.Errorf("pack name has not been found")
}
return chosenPack, nil
}
func (model *KaasModel) fill(kaas *kaas.Kaas) {
model.Id = types.Int64Value(kaas.Id)
model.Region = types.StringValue(kaas.Region)
model.KubernetesVersion = types.StringValue(kaas.KubernetesVersion)
model.Name = types.StringValue(kaas.Name)
model.PackName = types.StringValue(kaas.Pack.Name)
}
package kaas
import "terraform-provider-infomaniak/internal/provider/registry"
func Register() {
registry.RegisterResource(NewKaasResource)
registry.RegisterResource(NewKaasInstancePoolResource)
registry.RegisterDataSource(NewKaasDataSource)
registry.RegisterDataSource(NewKaasInstancePoolDataSource)
}